##########################################################################
# Grady Model Description
# -----------------------
#
# Currently Grady incorporates four models on top of the existing django models
# like User and Group. The fields should be self explanatory.
#
# SubmissionType
# --------------
#
# This model mostly holds meta information about the kind of task that was
# presented to the student. It serves as a foreign key for the submissions that
# are of this type. This model is currently NOT exposed directly in a view.
#
# Student
# -------
#
# Mostly wraps a User model and adds some data to it describing the user account
#
# Submission
# ----------
#
# This table holds the basic information about a submission of a student
# including all kinds of processed information, like compiler output, etc.
#
# With the method assign_tutor feedback for a submission can be created and a
# tutor will be assigned to this feedback permanently (unless deleted by a
# reviewer or if it gets reassigned). There cannot be more than ONE feedback per
# Submission.
#
# Feedback
# --------
#
# Feedback is the most complicated model. It holds
# information about origin, and status of the current feedback, as well as
# tutor annotations that may not be visible to students. Several methods control
# how feedback is passed along between tutors.
#
# For details on these methods see below.
#
##########################################################################


from collections import OrderedDict
from random import randrange, sample
from string import ascii_lowercase

from django.contrib.auth.models import User
from django.db import models
from django.db.models import Value as V
from django.db.models import (BooleanField, Case, Count, F, IntegerField, Q,
                              Sum, When)
from django.db.models.functions import Coalesce

SLUG_LENGTH = 16


def random_slug():
    return ''.join(sample(ascii_lowercase, SLUG_LENGTH))


def random_matrikel_no():
    return str(2e7 + randrange(1e8))


def get_annotated_tutor_list():
    return User.objects\
        .annotate(Count('feedback_list'))\
        .filter(groups__name='Tutors')\
        .order_by('-feedback_list__count')


class ExamType(models.Model):

    class Meta:
        verbose_name = "ExamType"
        verbose_name_plural = "ExamTypes"

    def __str__(self):
        return self.module_reference

    module_reference = models.CharField(max_length=50, unique=True)
    total_score      = models.PositiveIntegerField()
    pass_score       = models.PositiveIntegerField()
    pass_only        = models.BooleanField(default=False)


class SubmissionType(models.Model):
    # Fields
    name        = models.CharField(max_length=50, unique=True)
    full_score  = models.PositiveIntegerField(default=0)
    description = models.TextField()
    solution    = models.TextField()
    slug        = models.SlugField(
        editable=False, unique=True, default=random_slug)

    def __str__(self):
        return self.name

    class Meta:
        verbose_name        = "SubmissionType"
        verbose_name_plural = "SubmissionType Set"

    @classmethod
    def get_annotated_feedback_count(cls):
        """ Annotates submission lists with counts

        count both
            * number of submission per submission type
            * count of received feedback per submission type
            *
        Alternative with case
            Count(Case(
                When(submissions__feedback_list__origin=Feedback.MANUAL,
                    then=Value(1)), output_field=IntegerField())
            )

        Returns:
            annotated queryset
        """
        return cls.objects\
            .annotate( # to display only manual
                feedback_count=Count(
                    Case(
                        When(
                                Q(submissions__feedback__isnull=False) &
                                Q(submissions__feedback__status=Feedback.ACCEPTED),
                            then=V(1)), output_field=IntegerField(),
                    )
                )
            ).annotate(
                submission_count=Count('submissions')
            ).annotate(
                percentage=(F('feedback_count') * 100 / F('submission_count'))
            ).all().order_by('name')


class Student(models.Model):
    # Fields
    has_logged_in   = models.BooleanField(default=False)
    exam            = models.ForeignKey('ExamType', related_name='students', null=True)
    name            = models.CharField(max_length=50, default="__no_name__")
    matrikel_no     = models.CharField(
        unique=True, max_length=8, default=random_matrikel_no)
    user = models.OneToOneField(
        User, on_delete=models.CASCADE,
        related_name='student',
        limit_choices_to={'groups__name': 'Students'},
    )

    def score_per_submission(self):
        if self.submissions.all():
            return OrderedDict({
                s.type : s.feedback.score if hasattr(s, 'feedback') else 0
                for s in self.submissions.all()
            })
        else:
            return OrderedDict({
                t.name : 0 for t in SubmissionType.objects.all()
            })

    @classmethod
    def get_overall_score_annotated_submission_list(cls):
        return cls.objects.annotate(
            overall_score=Coalesce(Sum('submissions__feedback__score'), V(0)),
        ).annotate(
            done=Case(
                When(exam__pass_score__lt=F('overall_score'), then=V(1)),
                default=V(0),
                output_field=BooleanField()
            )
        )

    def disable(self):
        self.has_logged_in = True
        self.save()

    def __str__(self):
        return self.user.username

    class Meta:
        verbose_name        = "Student"
        verbose_name_plural = "Student Set"


class Test(models.Model):

    name       = models.CharField(max_length=30)
    label      = models.CharField(max_length=50)
    annotation = models.TextField()
    submission = models.ForeignKey(
        'submission',
        related_name='tests',
        on_delete=models.CASCADE,
    )

    class Meta:
        verbose_name        = "Test"
        verbose_name_plural = "Tests"
        unique_together     = (('submission', 'name'),)

    def __str__(self):
        return f'{self.name} {self.label}'


class Submission(models.Model):

    # Fields
    seen_by_student = models.BooleanField(default=False)
    text            = models.TextField(blank=True)
    pre_corrections = models.TextField(blank=True)
    slug            = models.SlugField(
        editable=False,
        unique=True,
        default=random_slug)
    type            = models.ForeignKey(
        SubmissionType,
        related_name='submissions')
    student         = models.ForeignKey(
        Student,
        on_delete=models.CASCADE,
        related_name='submissions')

    class Meta:
        verbose_name        = "Submission"
        verbose_name_plural = "Submission Set"
        unique_together     = (('type', 'student'),)
        ordering            = ('type__name',)

    def __str__(self):
        return "Submission of type '{}' from Student '{}'".format(
            self.type,
            self.student
        )

    @classmethod
    def assign_tutor(cls, tutor, slug=None) -> bool:
        """Assigns a tutor to a submission

        A submission is not assigned to the specified tutor in the case
            1. the tutor already has a feedback in progress
            2. there is no more feedback to give

        Arguments:
            tutor {User} -- the tutor that should be assigned

        Returns:
            True if something was assigned, false if not
        """

        # Get a submission from the submission set
        unfinished = Feedback.tutor_unfinished_feedback(tutor)
        if unfinished:
            return False

        candidates = cls.objects.filter(
            (
                Q(feedback__isnull=True)
                | Q(feedback__origin=Feedback.DID_NOT_COMPILE)
                | Q(feedback__origin=Feedback.COULD_NOT_LINK)
                | Q(feedback__origin=Feedback.FAILED_UNIT_TESTS)
            )
            & ~Q(feedback__of_tutor=tutor)
        )

        # we want a submission of a specific type
        if slug:
            candidates = candidates.filter(type__slug=slug)

        # we couldn't find any submission to correct
        if not candidates:
            return False

        submission = candidates[0]
        feedback = submission.feedback if hasattr(
            submission, 'feedback') else Feedback()
        feedback.origin = Feedback.MANUAL
        feedback.status = Feedback.EDITABLE
        feedback.of_tutor = tutor
        feedback.of_submission = submission
        feedback.save()
        return True


class Feedback(models.Model):
    # Fields
    text        = models.TextField()
    score       = models.PositiveIntegerField(default=0)
    created     = models.DateTimeField(auto_now_add=True)
    modified    = models.DateTimeField(auto_now=True)

    slug = models.SlugField(
        editable=False,
        unique=True,
        default=random_slug)
    of_submission = models.OneToOneField(
        Submission,
        related_name='feedback',
        unique=True,
        blank=False, null=False)
    of_tutor = models.ForeignKey(
        User, related_name='feedback_list',)
    of_reviewer = models.ForeignKey(
        User,
        related_name='reviewed_submissions',
        blank=True, null=True)

    # what is the current status of our feedback
    (
        EDITABLE,
        OPEN,
        NEEDS_REVIEW,
        ACCEPTED,
    ) = range(4) # this order matters
    STATUS = (
        (EDITABLE,      'editable'),
        (OPEN,          'request reassignment'),
        (NEEDS_REVIEW,  'request review'),
        (ACCEPTED,      'accepted'),
    )
    status = models.IntegerField(
        choices=STATUS,
        default=EDITABLE,
    )

    # how was this feedback created
    (
        WAS_EMPTY,
        FAILED_UNIT_TESTS,
        DID_NOT_COMPILE,
        COULD_NOT_LINK,
        MANUAL,
    ) = range(5)
    ORIGIN = (
        (WAS_EMPTY,         'was empty'),
        (FAILED_UNIT_TESTS, 'passed unittests'),
        (DID_NOT_COMPILE,   'did not compile'),
        (COULD_NOT_LINK,    'could not link'),
        (MANUAL,            'created by a human. yak!'),
    )
    origin = models.IntegerField(
        choices=ORIGIN,
        default=MANUAL,
    )

    class Meta:
        verbose_name        = "Feedback"
        verbose_name_plural = "Feedback Set"

    def __str__(self):
        return 'Feedback for {}'.format(self.of_submission)

    def is_full_score(self):
        return self.of_submission.type.full_score == self.score

    def get_full_score(self):
        return self.of_submission.type.full_score

    @classmethod
    def get_open_feedback(cls, user):
        return cls.objects.filter(
            Q(status=Feedback.OPEN) &
            ~Q(of_tutor=user) # you shall not request your own feedback
        )


    @classmethod
    def tutor_unfinished_feedback(cls, user):
        """Gets only the feedback that is assigned and not accepted. A tutor
        should have only one feedback assigned that is not accepted

        Arguments:
            user {User} -- the tutor who formed the request

        Returns:
            Feedback -- the feedback or none if no feedback was assigned
        """
        tutor_feedback = cls.objects.filter(
            Q(of_tutor=user), Q(status=Feedback.EDITABLE),
        )
        return tutor_feedback[0] if tutor_feedback else None

    def tutor_assigned_feedback(cls, user):
        """ Gets all feedback that is assigned to the tutor including
        all status cases.

        Returns:
            [list] -- a QuerySet of tasks that have been assigned to this tutor
        """
        tutor_feedback = cls.objects.filter(of_tutor=user)
        return tutor_feedback

    def finalize_feedback(self, user):
        """ Used to mark feedback as accepted (reviewed)

        This makes it uneditable by the tutor

        Arguments:
            user {[type]} -- [description]
        """
        self.status = Feedback.ACCEPTED
        self.of_reviewer = user
        self.save()

    def unfinalize_feedback(self):
        """Used to mark feedback as accepted (reviewed)

        This makes it uneditable by the tutor.
        """

        self.origin = Feedback.MANUAL
        self.of_reviewer = None
        self.save()

    def reassign_to_tutor(self, user):
        """ When a tutor does not want to correct some task they can pass it
        along to another tutor who will accept the request.

        Args:
            user: The user to which to feedback should be assigned to
        """
        assert self.status == Feedback.OPEN
        self.of_tutor = user
        self.status = Feedback.EDITABLE
        self.save()