diff --git a/core/models.py b/core/models.py index 6801e33b57afcd821a58b8b0ddaac94956c5a884..31880da0ddbec12922118cfc5246d57e17989253 100644 --- a/core/models.py +++ b/core/models.py @@ -65,9 +65,9 @@ def random_matrikel_no(): def get_annotated_tutor_list(): return User.objects\ - .annotate(Count('corrected_submissions'))\ + .annotate(Count('feedback_list'))\ .filter(groups__name='Tutors')\ - .order_by('-corrected_submissions__count') + .order_by('-feedback_list__count') class ExamType(models.Model): @@ -257,6 +257,7 @@ class Submission(models.Model): Q(feedback__isnull=True) | Q(feedback__origin=Feedback.DID_NOT_COMPILE) | Q(feedback__origin=Feedback.COULD_NOT_LINK) + | Q(feedback__origin=Feedback.FAILED_UNIT_TESTS) ) & ~Q(feedback__of_tutor=tutor) ) @@ -324,14 +325,14 @@ class Feedback(models.Model): # how was this feedback created ( WAS_EMPTY, - PASSED_UNIT_TESTS, + FAILED_UNIT_TESTS, DID_NOT_COMPILE, COULD_NOT_LINK, MANUAL, ) = range(5) ORIGIN = ( (WAS_EMPTY, 'was empty'), - (PASSED_UNIT_TESTS, 'passed unittests'), + (FAILED_UNIT_TESTS, 'passed unittests'), (DID_NOT_COMPILE, 'did not compile'), (COULD_NOT_LINK, 'could not link'), (MANUAL, 'created by a human. yak!'), diff --git a/core/templates/core/feedback_form.html b/core/templates/core/feedback_form.html index a493e6644b4f355e9d38e30ca9e83f5fcc6dec1c..11a17e6c0591445ba9b1b2499892a33ffe549a3f 100644 --- a/core/templates/core/feedback_form.html +++ b/core/templates/core/feedback_form.html @@ -43,12 +43,12 @@ RESULT: {{test.label}} </a> <div id="collapse5" class="collapse show" role="tabpanel"> <div class="card-block m-2"> - <div id="solution" class="editor editor-code">{{feedback.of_submission.type.possible_solution}}</div> + <div id="solution" class="editor editor-code">{{feedback.of_submission.type.solution}}</div> </div> </div> </div> - {% include "core/component/feedback_card.html" with unique="1" header="Description" content=feedback.of_submission.type.task_description expanded="hide" %} + {% include "core/component/feedback_card.html" with unique="1" header="Description" content=feedback.of_submission.type.description expanded="hide" %} <div class="my-2"> <button type="button" id="collapseAllOpen" class="btn btn-secondary">Open All</button> diff --git a/core/templates/core/r/tutor_list_card.html b/core/templates/core/r/tutor_list_card.html index 4fac5db444705dd4a9a48af36cef6f9d9b98bb74..455927dc5cb645f7150f0bca91d4a60e9d3b6b0e 100644 --- a/core/templates/core/r/tutor_list_card.html +++ b/core/templates/core/r/tutor_list_card.html @@ -10,7 +10,7 @@ <tbody> <tr> <td>{{tutor.username}}</td> - <td><code>{{tutor.corrected_submissions__count}}</code></td> + <td><code>{{tutor.feedback_list__count}}</code></td> </tr> </tbody> {% endfor %} diff --git a/core/views/user_startpages.py b/core/views/user_startpages.py index 748c3da10c7a41a96b3fb030469c42a678abc7e6..9d1ae9803b5204044d8e9825b28a45b4dd3d78b1 100644 --- a/core/views/user_startpages.py +++ b/core/views/user_startpages.py @@ -42,8 +42,8 @@ class TutorStartPage(TutorDetailView): class StudentStartPage(StudentDetailView): - model = Student - template_name = 'core/s/student_startpage.html' + model = Student + template_name = 'core/s/student_startpage.html' def get_object(self): return self.request.user.student diff --git a/util/importer.py b/util/importer.py index b1598a059e4546e786c2bb4d842c01eb8ea7cebf..1a04a22fb69e395c659f93a09af77f17b837c929 100644 --- a/util/importer.py +++ b/util/importer.py @@ -1,9 +1,9 @@ -import collections import csv import json import os import readline import secrets +import configparser from typing import Callable from django.contrib.auth.models import Group, User @@ -21,12 +21,26 @@ REVIEWERS = Group.objects.get(name='Reviewers') HISTFILE = '.importer_history' RECORDS = '.importer' +PASSWORDS = '.importer_passwords' YES = 'Y/n' NO = 'y/N' valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} +FEEDBACK_MAPPER = { + util.processing.EmptyTest.__name__ : Feedback.WAS_EMPTY, + util.processing.CompileTest.__name__ : Feedback.DID_NOT_COMPILE, + util.processing.LinkTest.__name__ : Feedback.COULD_NOT_LINK, + util.processing.UnitTestTest.__name__ : Feedback.FAILED_UNIT_TESTS, +} + +TEST_ORDER = ( + util.processing.EmptyTest.__name__, + util.processing.CompileTest.__name__, + util.processing.LinkTest.__name__, +) + class chdir_context(object): """ @@ -69,6 +83,19 @@ def i(prompt: str, default: str='', is_path: bool=False, is_file: bool=False): return answer +def store_password(username, group, password): + storage = configparser.ConfigParser() + storage.read(PASSWORDS) + + if not group in storage: + storage[group] = {} + + storage[group][username] = password + + with open(PASSWORDS, 'w') as passwd_file: + storage.write(passwd_file) + + def add_user(username: str, group: str, **kwargs): """ This is a specific wrapper for the django update_or_create method of objects. @@ -84,8 +111,10 @@ def add_user(username: str, group: str, **kwargs): Returns: TYPE: Description """ + username = username.strip() + user, created = User.objects.update_or_create( - username=username.strip(), + username=username, defaults=kwargs ) @@ -94,11 +123,14 @@ def add_user(username: str, group: str, **kwargs): user.set_password(password) user.save() + store_password(username, group.name, password) + user.groups.clear() # remove all other groups group.user_set.add(user) return user + def add_student(username, email, submissions, **kwargs): user = add_user(username, STUDENTS, email=email) @@ -109,6 +141,7 @@ def add_student(username, email, submissions, **kwargs): return student + def add_submission(student_obj, code, tests, type): submission_type = SubmissionType.objects.get(name=type) @@ -119,7 +152,14 @@ def add_submission(student_obj, code, tests, type): defaults={'text' : code} ) - for name, test_data in tests.items(): + auto_correct, _ = User.objects.get_or_create( + username='auto_correct', + defaults={'is_active': False} + ) + + available_tests = util.processing.Test.available_tests() + + for name, test_data in ((name, tests[name]) for name in TEST_ORDER): test_obj, created = Test.objects.update_or_create( name=test_data['name'], submission=submission_obj, @@ -129,19 +169,20 @@ def add_submission(student_obj, code, tests, type): } ) - if test_obj.name == EmptyTest.__name__ and test_obj.label == EmptyTest.label_failure: - auto_correct, _ = User.objects.update_or_create(username='auto_correct', defaults={'is_active': False}) + if test_obj.label == available_tests[test_obj.name].label_failure\ + and not hasattr(test_obj.submission, 'feedback'): Feedback.objects.update_or_create( of_submission=submission_obj, defaults={ 'of_tutor' : auto_correct, 'score' : 0, 'text' : test_obj.label, - 'origin' : Feedback.WAS_EMPTY, - 'status' : Feedback.ACCEPTED, + 'origin' : FEEDBACK_MAPPER[test_obj.name], + 'status' : Feedback.ACCEPTED if test_obj.name == EmptyTest.__name__ else Feedback.EDITABLE, } ) + def add_user_list(lst, group, **kwargs): for name in lst: add_user(name, group, **kwargs) @@ -169,6 +210,8 @@ def call_loader(func: Callable) -> None: records_f.write(func.__name__) records_f.write('\n') + info(f'{func.__name__} is done.') + def do_convert_xls(): @@ -243,10 +286,10 @@ def do_load_submission_types(): def do_load_module_descriptions(): print(''' - These are descriptions of modules in an Exam. The step is purely - optional -- Grady works just fine without these information. If you - want to distinguish students within one instance or give information - about the grading type you should provide this info. + This loader imports descriptions of modules in an exam. This step is purely + optional -- Grady works just fine without these information. If you want to + distinguish students within one instance or give information about the + grading type you should provide this info. CSV file format: module_reference, total_score, pass_score, pass_only @@ -255,7 +298,8 @@ def do_load_module_descriptions(): B.Mat.31415, 50, 10, no ''') - module_description_csv = i('Where is the file?', 'modules.csv', is_file=True) + module_description_csv = i( + 'Where is the file?', 'modules.csv', is_file=True) with open(module_description_csv, encoding='utf-8') as tfile: csv_rows = [row for row in csv.reader(tfile)] @@ -276,6 +320,7 @@ def do_load_module_descriptions(): info(f'{"Created" if created else "Updated"} ExamType {data["module_reference"]}') + def do_preprocess_submissions(): print(''' @@ -306,8 +351,8 @@ def do_load_submissions(): file = i('Get me the file with all the submissions', 'submissions.json') - exam = None - if ExamType.objects.all() and i('Do you want to add module/exam information?', YES): + exam = {} + if ExamType.objects.all() and i('Do you want to add module/exam information?', NO): exam_query_set = ExamType.objects.all() print('You have the following choices:\n') for j, exam_type in enumerate(exam_query_set): @@ -315,13 +360,13 @@ def do_load_submissions(): print() exam = i('Choose wisely') - exam = exam_query_set[int(exam)] + exam = {'exam' : exam_query_set[int(exam)]} with open(file) as submission_file: submissions = json.JSONDecoder().decode(submission_file.read()) for username, data in submissions.items(): - student_obj = add_student(username, exam=exam, **data) + student_obj = add_student(username, **exam, **data) for submission_obj in data['submissions']: add_submission(student_obj, **submission_obj) @@ -357,8 +402,6 @@ call_order = ( def start(): - if User.objects.filter(is_superuser=False) : - warn('Warning database is not clean. Aborting.') if os.path.exists(HISTFILE): readline.read_history_file(HISTFILE) @@ -373,6 +416,7 @@ def start(): print('The following importers are available:\n') for fid, func in enumerate(call_order): print(f'\t[{fid}] {func.__name__}') + print('\t[q] exit') print() fid = i('Choose a number or hit enter to start at the beginning') @@ -380,12 +424,15 @@ def start(): if not fid: for func in call_order: call_loader(func) + elif fid in ('q', 'quit', 'exit'): + return elif not 0 <= int(fid) < len(call_order): - w('There is no loader with this number') + warn('There is no loader with this number') else: call_loader(call_order[int(fid)]) except (EOFError, KeyboardInterrupt) as err: + print() return except Exception as err: import traceback diff --git a/util/processing.py b/util/processing.py index b9f1327b4c3d36b56901797cde04c0a5fcad1332..21acf939f32d5333ef6bf9a743bc3cb5ac3ac109 100644 --- a/util/processing.py +++ b/util/processing.py @@ -152,13 +152,13 @@ class UnitTestTest(Test): ret = run_cmd("./code %s" % args, check=True) assert ret.stdout == stdout except AssertionError: - return False, f"Case #{i}: [ASSERT FAILED] ./program {args} WAS '{ret.stdout}' SHOULD '{stdout}'" + return False, f"Case #{i:>2}: [ASSERT FAILED] ./program {args} WAS '{ret.stdout.strip()}' SHOULD '{stdout.strip()}'" except subprocess.CalledProcessError as err: - return False, f"Case #{i}: [FAILED] ./program {args} WITH ERROR '{err.stderr}'" + return False, f"Case #{i:>2}: [FAILED] ./program {args} WITH ERROR '{err.stderr.strip()}'" except subprocess.TimeoutExpired: - return False, f"Case #{i}: [TIMEOUT] ./program {args}" + return False, f"Case #{i:>2}: [TIMEOUT] ./program {args}" else: - return True, f"Case #{i}: [SUCCESS] ./program {args}" + return True, f"Case #{i:>2}: [SUCCESS] ./program {args}" def run_test(self, submission_obj):