From 2b68a07e4d3581c2761a67ea348be4d3601541e1 Mon Sep 17 00:00:00 2001 From: janmax <mail-github@jmx.io> Date: Fri, 14 Jul 2017 18:35:18 +0200 Subject: [PATCH] Added docstrings for model classes describing attributes --- core/models.py | 371 ++++++++++++++++++++++++++++++++++++------------- 1 file changed, 271 insertions(+), 100 deletions(-) diff --git a/core/models.py b/core/models.py index 802b5fe3..100e6f1a 100644 --- a/core/models.py +++ b/core/models.py @@ -1,45 +1,10 @@ -########################################################################## -# Grady Model Description -# ----------------------- -# -# Currently Grady incorporates four models on top of the existing django models -# like User and Group. The fields should be self explanatory. -# -# SubmissionType -# -------------- -# -# This model mostly holds meta information about the kind of task that was -# presented to the student. It serves as a foreign key for the submissions that -# are of this type. This model is currently NOT exposed directly in a view. -# -# Student -# ------- -# -# Mostly wraps a User model and adds some data to it describing the user account -# -# Submission -# ---------- -# -# This table holds the basic information about a submission of a student -# including all kinds of processed information, like compiler output, etc. -# -# With the method assign_tutor feedback for a submission can be created and a -# tutor will be assigned to this feedback permanently (unless deleted by a -# reviewer or if it gets reassigned). There cannot be more than ONE feedback per -# Submission. -# -# Feedback -# -------- -# -# Feedback is the most complicated model. It holds -# information about origin, and status of the current feedback, as well as -# tutor annotations that may not be visible to students. Several methods control -# how feedback is passed along between tutors. -# -# For details on these methods see below. -# -########################################################################## +''' +Grady Model Description +----------------------- +See docstrings of the individual models for information on the setup of the +database. +''' from collections import OrderedDict from random import randrange, sample @@ -52,31 +17,65 @@ from django.db.models import (BooleanField, Case, Count, F, IntegerField, Q, Sum, When) from django.db.models.functions import Coalesce -SLUG_LENGTH = 16 +def random_slug(slug_length: int=16) -> str: + """Used for all the slug fields in the application instead of relying on + the primary keys. They are not cryptographically secure since random is + used. -def random_slug(): - return ''.join(sample(ascii_lowercase, SLUG_LENGTH)) + Returns: + str: a random string of lowercase ACSII letter + """ + return ''.join(sample(ascii_lowercase, slug_length)) -def random_matrikel_no(): - return str(2e7 + randrange(1e8)) +def random_matrikel_no() -> str: + """Use as a default value for student's matriculation number. + + Returns: + str: an eight digit number that starts with a 2 + """ + return str(2000_0000 + randrange(1000_0000)) def get_annotated_tutor_list(): + """All tutor accounts are annotate with a field that includes the number of + feedback that tutor has collaborated in. + + Returns: + TYPE: the annotated QuerySet + """ return User.objects\ - .annotate(Count('feedback_list'))\ .filter(groups__name='Tutors')\ + .annotate(Count('feedback_list'))\ .order_by('-feedback_list__count') class ExamType(models.Model): - + """A model that contains information about the module a submission can + belong to. The information is not needed and is currently, just used to + detect if students already have enough points to pass an exam. + + It is NOT + intended to use this for including different exams regarding submissions + types. + + Attributes + ---------- + module_reference : CharField + a unique reference that identifies a module within the university + pass_only : BooleanField + True if no grade is given + pass_score : PositiveIntegerField + minimum score for (just) passing + total_score : PositiveIntegerField + maximum score for the exam (currently never used anywhere) + """ class Meta: verbose_name = "ExamType" verbose_name_plural = "ExamTypes" - def __str__(self): + def __str__(self) -> str: return self.module_reference module_reference = models.CharField(max_length=50, unique=True) @@ -86,7 +85,26 @@ class ExamType(models.Model): class SubmissionType(models.Model): - # Fields + """This model mostly holds meta information about the kind of task that was + presented to the student. It serves as a foreign key for the submissions + that are of this type. This model is currently NOT exposed directly in a + view. + + Attributes + ---------- + description : TextField + The task description the student had to fulfill. The content may be HTML + formatted. + full_score : PositiveIntegerField + Maximum score one can get on that one + name : CharField + The original title of the exam. This is wildly used as an identifier by + the preprocessing scripts. + slug : SlugField + unique TODO: is this needed? + solution : TextField + A sample solution or a correction guideline + """ name = models.CharField(max_length=50, unique=True) full_score = models.PositiveIntegerField(default=0) description = models.TextField() @@ -94,7 +112,7 @@ class SubmissionType(models.Model): slug = models.SlugField( editable=False, unique=True, default=random_slug) - def __str__(self): + def __str__(self) -> str: return self.name class Meta: @@ -105,26 +123,23 @@ class SubmissionType(models.Model): def get_annotated_feedback_count(cls): """ Annotates submission lists with counts - count both - * number of submission per submission type - * count of received feedback per submission type - * - Alternative with case - Count(Case( - When(submissions__feedback_list__origin=Feedback.MANUAL, - then=Value(1)), output_field=IntegerField()) - ) + The following fields are annotated: + * number of submissions per submission type + * count of received *accepted* feedback per submission type + * and finally the progress on each submission type as percentage + + The QuerySet that is return is ordered by name lexicographically. Returns: - annotated queryset + The annotated QuerySet as described above """ return cls.objects\ .annotate( # to display only manual feedback_count=Count( Case( When( - Q(submissions__feedback__isnull=False) & - Q(submissions__feedback__status=Feedback.ACCEPTED), + Q(submissions__feedback__isnull=False) & + Q(submissions__feedback__status=Feedback.ACCEPTED), then=V(1)), output_field=IntegerField(), ) ) @@ -132,35 +147,76 @@ class SubmissionType(models.Model): submission_count=Count('submissions') ).annotate( percentage=(F('feedback_count') * 100 / F('submission_count')) - ).all().order_by('name') + ).order_by('name') class Student(models.Model): - # Fields + """The student model includes all information of a student, that we got + from the E-Learning output, along with some useful classmethods that provide + specially annotated QuerySets. + + Information like email (if given), and the username are stored in the + associated user model. + + Attributes + ---------- + exam : ForeignKey + Which module the student wants to be graded in + has_logged_in : BooleanField + Login is permitted once. If this is set the user can not log in. + matrikel_no : CharField + The matriculation number of the student + name : CharField + The students full real name + user : UserModel + The django auth user that makes a student authenticates with. + """ has_logged_in = models.BooleanField(default=False) - exam = models.ForeignKey('ExamType', related_name='students', null=True) name = models.CharField(max_length=50, default="__no_name__") matrikel_no = models.CharField( unique=True, max_length=8, default=random_matrikel_no) - user = models.OneToOneField( + user = models.OneToOneField( User, on_delete=models.CASCADE, related_name='student', limit_choices_to={'groups__name': 'Students'}, ) + exam = models.ForeignKey( + 'ExamType', + on_delete=models.SET_NULL, + related_name='students', + null=True) def score_per_submission(self): + """ TODO: get rid of it and use an annotation. + + Returns: + TYPE: Description + """ if self.submissions.all(): return OrderedDict({ - s.type : s.feedback.score if hasattr(s, 'feedback') else 0 + s.type: s.feedback.score if hasattr(s, 'feedback') else 0 for s in self.submissions.all() }) else: return OrderedDict({ - t.name : 0 for t in SubmissionType.objects.all() + t.name: 0 for t in SubmissionType.objects.all() }) @classmethod def get_overall_score_annotated_submission_list(cls): + """Can be used to quickly annotate a user with the necessary information + on the overall score of a student and if he does not need any more + correction. + + A student is done if + * module type was pass_only and student has enough points + * every submission got accepted feedback + + Returns + ------- + QuerySet + the annotated QuerySet as described above. + """ return cls.objects.annotate( overall_score=Coalesce(Sum('submissions__feedback__score'), V(0)), ).annotate( @@ -172,10 +228,13 @@ class Student(models.Model): ) def disable(self): + """The student won't be able to login in anymore, but his current + session can be continued until s/he logs out. + """ self.has_logged_in = True self.save() - def __str__(self): + def __str__(self) -> str: return self.user.username class Meta: @@ -184,7 +243,21 @@ class Student(models.Model): class Test(models.Model): - + """Tests contain information that has been generated by automated tests, + and directly belongs to a submission. Often certain Feedback was already + given by information provided by these tests. + + Attributes + ---------- + annotation : TextField + All the output of the test (e.g. compiler output) + label : CharField + Indicates SUCCES or FAILURE + name : CharField + The name of the test that was performed + submission : ForeignKey + The submission the tests where generated on + """ name = models.CharField(max_length=30) label = models.CharField(max_length=50) annotation = models.TextField() @@ -199,22 +272,42 @@ class Test(models.Model): verbose_name_plural = "Tests" unique_together = (('submission', 'name'),) - def __str__(self): + def __str__(self) -> str: return f'{self.name} {self.label}' class Submission(models.Model): - + """The answer of a student to a specific question. Holds the answer and + very often serves as ForeignKey. + + With the method assign_tutor feedback for a submission can be created and a + tutor will be assigned to this feedback permanently (unless deleted by a + reviewer or if it gets reassigned). There cannot be more than ONE feedback + per Submission. + + Attributes + ---------- + seen_by_student : BooleanField + True if the student saw his accepted feedback. + slug : SlugField + Slug for identification in domains + student : OneToOneField + The student how cause all of this + text : TextField + The code/text submitted by the student + type : OneToOneField + Relation to the type containing meta information + """ # Fields seen_by_student = models.BooleanField(default=False) text = models.TextField(blank=True) - pre_corrections = models.TextField(blank=True) slug = models.SlugField( editable=False, unique=True, default=random_slug) type = models.ForeignKey( SubmissionType, + on_delete=models.PROTECT, related_name='submissions') student = models.ForeignKey( Student, @@ -227,7 +320,7 @@ class Submission(models.Model): unique_together = (('type', 'student'),) ordering = ('type__name',) - def __str__(self): + def __str__(self) -> str: return "Submission of type '{}' from Student '{}'".format( self.type, self.student @@ -241,11 +334,20 @@ class Submission(models.Model): 1. the tutor already has a feedback in progress 2. there is no more feedback to give - Arguments: - tutor {User} -- the tutor that should be assigned + Parameters + ---------- + tutor : User object + The tutor that a submission should be assigned to. + slug : None, optional + If a slug for a submission is given the belonging Feedback is + assigned to the tutor. If this submission had feedback before + the tutor that worked on it, is unassigned. + + Returns + ------- + bool + Returns True only if feedback was actually assigned otherwise False. - Returns: - True if something was assigned, false if not """ # Get a submission from the submission set @@ -283,7 +385,42 @@ class Submission(models.Model): class Feedback(models.Model): - # Fields + """ + Attributes + ---------- + created : DateTimeField + When the feedback was initially created + modified : DateTimeField + The last time this feedback was modified + of_reviewer : ForeignKey + The reviewer that accepted/corrected a feedback + of_submission : OneToOneField + The submission this feedback belongs to. It finally determines how many + points a student receives for his submission. + of_tutor : ForeignKey + The tutor/reviewer how last edited the feedback + ORIGIN : TYPE + Description + origin : IntegerField + Of whom was this feedback originally created. She below for the choices + score : PositiveIntegerField + A score that has been assigned to he submission. Is final if it was + accepted. + slug : SlugField + The slug for identification in urls + STATUS : The status determines + Description + status : PositiveIntegerField + The status roughly determines in which state a feedback is in. A just + initiated submission is editable. Based on the status feedback is + presented to different types of users. Students may see feedback only + if it has been accepted, while reviewers have access at any time. + text : TextField + Detailed description by the tutor about what went wrong or what did not. + Every line in the feedback should correspond with a line in the + students submission, maybe with additional comments appended. + + """ text = models.TextField() score = models.PositiveIntegerField(default=0) created = models.DateTimeField(auto_now_add=True) @@ -295,15 +432,23 @@ class Feedback(models.Model): default=random_slug) of_submission = models.OneToOneField( Submission, + on_delete=models.CASCADE, related_name='feedback', unique=True, - blank=False, null=False) + blank=False, + null=False) of_tutor = models.ForeignKey( - User, related_name='feedback_list',) + User, + on_delete=models.SET_NULL, + related_name='feedback_list', + blank=True, + null=True) of_reviewer = models.ForeignKey( User, + on_delete=models.SET_NULL, related_name='reviewed_submissions', - blank=True, null=True) + blank=True, + null=True) # what is the current status of our feedback ( @@ -347,7 +492,7 @@ class Feedback(models.Model): verbose_name = "Feedback" verbose_name_plural = "Feedback Set" - def __str__(self): + def __str__(self) -> str: return 'Feedback for {}'.format(self.of_submission) def is_full_score(self): @@ -358,22 +503,40 @@ class Feedback(models.Model): @classmethod def get_open_feedback(cls, user): + """For a user, returns the feedback that is up for reassignment that + does not belong to the user. + + Parameters + ---------- + user : User object + The user for which feedback should not be returned. Often the user + that is currently searching for a task someone else does not want to + do. + + Returns + ------- + QuerySet + All feedback objects that are open for reassignment that do not + belong to the user + """ return cls.objects.filter( Q(status=Feedback.OPEN) & ~Q(of_tutor=user) # you shall not request your own feedback ) - @classmethod def tutor_unfinished_feedback(cls, user): """Gets only the feedback that is assigned and not accepted. A tutor should have only one feedback assigned that is not accepted - Arguments: - user {User} -- the tutor who formed the request + Parameters + ---------- + user : User object + The tutor who formed the request - Returns: - Feedback -- the feedback or none if no feedback was assigned + Returns + ------- + The feedback or none if no feedback was assigned """ tutor_feedback = cls.objects.filter( Q(of_tutor=user), Q(status=Feedback.EDITABLE), @@ -381,22 +544,28 @@ class Feedback(models.Model): return tutor_feedback[0] if tutor_feedback else None def tutor_assigned_feedback(cls, user): - """ Gets all feedback that is assigned to the tutor including + """Gets all feedback that is assigned to the tutor including all status cases. - Returns: - [list] -- a QuerySet of tasks that have been assigned to this tutor + Returns + ------- + a QuerySet of tasks that have been assigned to this tutor + + Parameters + ---------- + user : User object + The user for which the feedback should be returned """ tutor_feedback = cls.objects.filter(of_tutor=user) return tutor_feedback def finalize_feedback(self, user): - """ Used to mark feedback as accepted (reviewed) - - This makes it uneditable by the tutor + """Used to mark feedback as accepted (reviewed). - Arguments: - user {[type]} -- [description] + Parameters + ---------- + user : User object + The tutor/reviewer that marks some feedback as accepted """ self.status = Feedback.ACCEPTED self.of_reviewer = user @@ -413,11 +582,13 @@ class Feedback(models.Model): self.save() def reassign_to_tutor(self, user): - """ When a tutor does not want to correct some task they can pass it + """When a tutor does not want to correct some task they can pass it along to another tutor who will accept the request. - Args: - user: The user to which to feedback should be assigned to + Parameters + ---------- + User object + The user to which to feedback should be assigned to """ assert self.status == Feedback.OPEN self.of_tutor = user -- GitLab