From fd86b5a9b06d001e17fc434ac107724ad60d3ee2 Mon Sep 17 00:00:00 2001
From: janmax <mail-github@jmx.io>
Date: Wed, 12 Jul 2017 16:55:06 +0200
Subject: [PATCH] Added an exam type that carries grading details. Closes #9

---
 core/models.py                               |   8 +-
 core/static/css/custom.css                   |   1 +
 core/templates/core/r/student_list.html      |   8 +-
 core/templates/core/s/single_submission.html |  29 +---
 core/views/submission.py                     |   4 +-
 util/importer.py                             | 136 ++++++++++++++-----
 6 files changed, 116 insertions(+), 70 deletions(-)

diff --git a/core/models.py b/core/models.py
index 30543fda..8f417ff8 100644
--- a/core/models.py
+++ b/core/models.py
@@ -47,7 +47,7 @@ from collections import OrderedDict
 
 from django.contrib.auth.models import User
 from django.db import models
-from django.db.models import Q, Sum, Value as V
+from django.db.models import Q, F, Sum, Value as V, When, Case, BooleanField
 from django.db.models.functions import Coalesce
 
 SLUG_LENGTH = 16
@@ -120,6 +120,12 @@ class Student(models.Model):
     def get_overall_score_annotated_submission_list(cls):
         return cls.objects.annotate(
             overall_score=Coalesce(Sum('submissions__feedback__score'), V(0)),
+        ).annotate(
+            done=Case(
+                When(exam__pass_score__lt=F('overall_score'), then=V(1)),
+                default=V(0),
+                output_field=BooleanField()
+            )
         )
 
     def overall_score(self): # TODO purge
diff --git a/core/static/css/custom.css b/core/static/css/custom.css
index a0efe890..8e2e7f1c 100644
--- a/core/static/css/custom.css
+++ b/core/static/css/custom.css
@@ -58,3 +58,4 @@ table.dataTable {
     white-space: nowrap;
     width: 1%;
 }
+
diff --git a/core/templates/core/r/student_list.html b/core/templates/core/r/student_list.html
index ebaea5e3..4d483721 100644
--- a/core/templates/core/r/student_list.html
+++ b/core/templates/core/r/student_list.html
@@ -8,15 +8,16 @@
   <h5 class="card-header">Student Overview</h5>
   <div class="card-block">
     <table id="list-id-submission_list" class="table nomargin">
-      <thead>
-        <tr>
+      <thead class="rotate">
+        <tr class="high">
           <th>Name</th>
           <th>Username</th>
           <th>Module</th>
           {% for submission_type in submission_type_list %}
-            <th>{{submission_type.name}}</th>
+            <th><font size="1">{{submission_type.name}}</font></th>
           {% endfor %}
           <th>Total score</th>
+          <th>Done</th>
         </tr>
       </thead>
       <tbody>
@@ -33,6 +34,7 @@
             {% endif %} </td>
           {% endfor %}
           <td><code>{{student.overall_score}}</code></td>
+          <td>{% if student.done %}<span class="badge badge-success">yes</span>{% else %}<span class="badge badge-danger">no</span>{% endif %}</td>
         </tr>
       {% endfor %}
       </tbody>
diff --git a/core/templates/core/s/single_submission.html b/core/templates/core/s/single_submission.html
index 0a018c7c..cc346a89 100644
--- a/core/templates/core/s/single_submission.html
+++ b/core/templates/core/s/single_submission.html
@@ -15,22 +15,9 @@
         <ul class="list-group list-group-flush">
           <li class="list-group-item"><strong class="mr-2">Submission Type: </strong> {{ submission.type }} </li>
           <li class="list-group-item"><strong class="mr-2">Student: </strong> {{ submission.student }}</li>
-          {% if feedback and is_reviewer %}
-          <li class="list-group-item">
-            <strong class="mr-2">Status: </strong> {% include "core/feedback_badge.html" %}
-            <span class="badge badge-warning ml-2">Only visible to reviewer</span>
-          </li>
-          <li class="list-group-item">
-            <strong class="mr-2">Tutor: </strong> {{ feedback.of_tutor }}
-            <span class="badge badge-warning ml-2">Only visible to reviewer</span>
-          </li>
-          {% endif %}
           <li class="list-group-item"><strong class="mr-2">Score: </strong>
             {% if feedback and feedback.status == feedback.ACCEPTED %}
             <code> {{ feedback.score }} / {{submission.type.full_score}} </code>
-            {% elif feedback and is_reviewer %}
-            <code> {{ feedback.score }} / {{submission.type.full_score}} </code>
-            <span class="badge badge-warning ml-2">Only visible to reviewer</span>
             {% else %}
             <span class="badge badge-danger">No Feedback</span>
             {% endif %}
@@ -38,18 +25,7 @@
         </ul>
       </div>
       <div class="card-footer">
-        {% if is_reviewer %}
-        <a href="{% url 'create_feedback_for_submission' submission.slug %}" class="btn btn-success">
-          {% if feedback %}
-          Edit Feedback
-          {% else %}
-          Create Feedback
-          {% endif %}
-        </a>
-        <a href="{% url 'submission_list' %}" class="btn btn-outline-success">Back</a>
-        {% else %}
         <a href="{% url 'start' %}" class="btn btn-success">Back</a>
-        {% endif %}
       </div>
     </div>
   </div>
@@ -65,14 +41,11 @@
 
 
   {% if feedback %}
-  {% if feedback.status == feedback.ACCEPTED or is_reviewer %}
+  {% if feedback.status == feedback.ACCEPTED %}
   <div class="col-4 my-4">
     <div class="card">
       <div class="card-block">
         <div class="card-header">Our feedback
-          {% if is_reviewer %}
-          <span class="badge badge-warning ml-2">Only visible to reviewer</span>
-          {% endif %}
         </div>
         <div class="editor-code" id="textarea_feedback">{{ feedback.text }}</div>
       </div>
diff --git a/core/views/submission.py b/core/views/submission.py
index c0d3c6bb..fc6a14bb 100644
--- a/core/views/submission.py
+++ b/core/views/submission.py
@@ -61,9 +61,7 @@ class StudentListView(ListView):
         return super().dispatch(*args, **kwargs)
 
     def get_queryset(self):
-        ret = self.model.get_overall_score_annotated_submission_list()
-        print(ret)
-        return ret
+        return self.model.get_overall_score_annotated_submission_list()
 
     def get_context_data(self, **kwargs):
         context = super().get_context_data(**kwargs)
diff --git a/util/importer.py b/util/importer.py
index eb1c7be3..00d29622 100644
--- a/util/importer.py
+++ b/util/importer.py
@@ -3,7 +3,6 @@ import csv
 import os
 import readline
 import secrets
-import sys
 import json
 from typing import Callable
 
@@ -11,7 +10,7 @@ from django.contrib.auth.models import Group, User
 
 import util.convert
 import util.processing
-from core.models import Feedback, Student, Submission, SubmissionType, Test
+from core.models import Feedback, Student, Submission, SubmissionType, Test, ExamType
 from util.messages import *
 from util.processing import EmptyTest
 
@@ -99,12 +98,12 @@ def add_user(username: str, group: str, **kwargs):
 
     return user
 
-def add_student(username, name, matrikel_no, email, **kwargs):
+def add_student(username, email, submissions, **kwargs):
 
     user        = add_user(username, STUDENTS, email=email)
     student, _  = Student.objects.update_or_create(
-        name=name,
-        defaults={'matrikel_no' : matrikel_no, 'user' : user}
+        user=user,
+        defaults={'user' : user, **kwargs}
     )
 
     return student
@@ -171,7 +170,6 @@ def call_loader(func: Callable) -> None:
 
 
 def do_convert_xls():
-    info('[Executing]', sys._getframe().f_code.co_name)
 
     ans = i('''Do you want to convert the ILIAS .xls output to .json?''', YES)
     if not ans:
@@ -185,22 +183,39 @@ def do_convert_xls():
 
 
 def do_load_submission_types():
-    info('[Executing] ', sys._getframe().f_code.co_name)
 
-    print('''For the following import you need three files:
-
-        1) A .csv file where the columns are: id, name, score
-        2) A path to a directory where I can find sample solutions named
-            <id>-lsg.c
-        3) A path to a directory where I can find HTML files with an accurate
-            description of the task. File name pattern has to be: <id>.html
+    print(
+    '''For the following import you need three files:
+
+    1) A .csv file where the columns are: id, name, score
+    2) A path to a directory where I can find sample solutions named
+        <id>-lsg.c
+    3) A path to a directory where I can find HTML files with an accurate
+        description of the task. File name pattern has to be: <id>.html
+
+    Example:
+        $ cat submission_types.csv
+        a01, Alpha Team, 10
+        a02, Beta Distribution, 10
+        a03, Gamma Ray, 20
+
+        $ tree -L 2
+        .
+        ├── code-lsg
+        │   ├── a01-lsg.c
+        │   ├── a02-lsg.c
+        │   └── a03-lsg.c
+        └── html
+            ├── a01.html
+            ├── a02.html
+            └── a03.html
     ''')
 
     path = i('Where are your files located?', '.', is_path=True)
 
     with chdir_context(path):
         submission_types_csv    = i('CSV file',         'submission_types.csv')
-        lsg_dir                 = i('solution dir',     'code/code-lsg')
+        lsg_dir                 = i('solution dir',     'code-lsg')
         desc_dir                = i('descriptions dir', 'html')
 
         with open(submission_types_csv, encoding='utf-8') as tfile:
@@ -224,8 +239,43 @@ def do_load_submission_types():
             info(f'{"Created" if created else "Updated"} {name}')
 
 
+def do_load_module_descriptions():
+
+    print('''
+    These are descriptions of modules in an Exam. The step is purely
+    optional -- Grady works just fine without these information. If you
+    want to distinguish students within one instance or give information
+    about the grading type you should provide this info.
+
+    CSV file format: module_reference, total_score, pass_score, pass_only
+
+    Example:
+        B.Inf.1801,  90, 45, yes
+        B.Mat.31415, 50, 10, no
+    ''')
+
+    module_description_csv = i('Where is the file?', 'modules.csv', is_file=True)
+
+    with open(module_description_csv, encoding='utf-8') as tfile:
+        csv_rows = [row for row in csv.reader(tfile)]
+
+    for row in csv_rows:
+        data = {
+            field : kind(data) for field, kind, data in zip(
+                ('module_reference', 'total_score', 'pass_score', 'pass_only'),
+                (str, int, int, lambda x: x == 'yes'),
+                (col.strip() for col in row)
+            )
+        }
+
+        _, created = ExamType.objects.update_or_create(
+            module_reference=data['module_reference'],
+            defaults=data,
+        )
+
+        info(f'{"Created" if created else "Updated"} ExamType {data["module_reference"]}')
+
 def do_preprocess_submissions():
-    info('[Executing] ', sys._getframe().f_code.co_name)
 
     print('''
     Preprocessing might take some time depending on the amount of data
@@ -252,21 +302,32 @@ def do_preprocess_submissions():
 
 
 def do_load_submissions():
-    info('[Executing] ', sys._getframe().f_code.co_name)
 
     file = i('Get me the file with all the submissions', 'submissions.json')
+
+    exam = None
+    if ExamType.objects.all() and i('Do you want to add module/exam information?', YES):
+        exam_query_set = ExamType.objects.all()
+        print('You have the following choices:\n')
+        for j, exam_type in enumerate(exam_query_set):
+            print(f'\t[{j}] {exam_type.module_reference}')
+        print()
+
+        exam = i('Choose wisely')
+        exam = exam_query_set[int(exam)]
+
     with open(file) as submission_file:
         submissions = json.JSONDecoder().decode(submission_file.read())
 
     for username, data in submissions.items():
-        student_obj = add_student(username, **data)
+        student_obj = add_student(username, exam=exam, **data)
 
         for submission_obj in data['submissions']:
             add_submission(student_obj, **submission_obj)
 
 
 def do_load_tutors():
-    info('[Executing] ', sys._getframe().f_code.co_name)
+
     print('Please import tutor users by providing one name per line')
     tutors    = i('List of tutors', 'tutors', is_file=True)
 
@@ -275,7 +336,7 @@ def do_load_tutors():
 
 
 def do_load_reviewer():
-    info('[Executing] ', sys._getframe().f_code.co_name)
+
     print('Please import reviewer users by providing one name per line')
     reviewers = i('List of reviewers', 'reviewers', is_file=True)
 
@@ -283,14 +344,15 @@ def do_load_reviewer():
         add_user_list(reviewers_f, REVIEWERS, is_staff=True)
 
 
-call_order = collections.OrderedDict({
-    0 : do_convert_xls,
-    1 : do_load_submission_types,
-    2 : do_preprocess_submissions,
-    3 : do_load_submissions,
-    4 : do_load_tutors,
-    5 : do_load_reviewer
-})
+call_order = (
+    do_convert_xls,
+    do_load_submission_types,
+    do_load_module_descriptions,
+    do_preprocess_submissions,
+    do_load_submissions,
+    do_load_tutors,
+    do_load_reviewer
+)
 
 
 def start():
@@ -308,16 +370,20 @@ def start():
 
     try:
         print('The following importers are available:\n')
-        for fid, func in call_order.items():
+        for fid, func in enumerate(call_order):
             print(f'\t[{fid}] {func.__name__}')
-
         print()
-        fid = i('Press enter for all in given order or choose a number')
-        if fid:
-            call_loader(call_order[int(fid)])
-        else:
-            for func in call_order.values():
+
+        fid = i('Choose a number or hit enter to start at the beginning')
+
+        if not fid:
+            for func in call_order:
                 call_loader(func)
+        elif not 0 <= int(fid) < len(call_order):
+            w('There is no loader with this number')
+        else:
+            call_loader(call_order[int(fid)])
+
     except (EOFError, KeyboardInterrupt) as err:
         return
     except Exception as err:
-- 
GitLab