From bef0a059ce510f4b2dec1b24916448c607b37a1e Mon Sep 17 00:00:00 2001
From: janmax <j.michal@stud.uni-goettingen.de>
Date: Wed, 10 Jan 2018 20:00:45 +0100
Subject: [PATCH] This commit reconfigures the importer script

* Tests can now be executed from the importer
* All jobs run fine and produce correct results
---
 .gitlab-ci.yml                  |   2 +-
 core/migrations/0001_initial.py |  10 +-
 util/convert.py                 |  12 ++-
 util/factories.py               |  16 +--
 util/importer.py                | 182 ++++++++++++++++++++------------
 util/processing.py              |  90 ++++++++++------
 util/testcases.py               |  26 +++--
 7 files changed, 207 insertions(+), 131 deletions(-)

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 5e528bfc..76b05a06 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -54,7 +54,7 @@ test_flake8:
   <<: *test_definition_virtualenv
   stage: test
   script:
-    - flake8 --exclude=migrations --ignore=N802 core util/factories.py
+    - flake8 --exclude=migrations core util
 
 # ----------------------------- Frontend subsection -------------------------- #
 .test_template_frontend: &test_definition_frontend
diff --git a/core/migrations/0001_initial.py b/core/migrations/0001_initial.py
index 014e8786..8eb1e6be 100644
--- a/core/migrations/0001_initial.py
+++ b/core/migrations/0001_initial.py
@@ -1,13 +1,15 @@
 # Generated by Django 2.0.1 on 2018-01-10 10:46
 
-import core.models
-from django.conf import settings
+import uuid
+
 import django.contrib.auth.models
 import django.contrib.auth.validators
-from django.db import migrations, models
 import django.db.models.deletion
 import django.utils.timezone
-import uuid
+from django.conf import settings
+from django.db import migrations, models
+
+import core.models
 
 
 class Migration(migrations.Migration):
diff --git a/util/convert.py b/util/convert.py
index 44995cbb..dcfc3e79 100755
--- a/util/convert.py
+++ b/util/convert.py
@@ -3,7 +3,7 @@
 
 The json output will look like this:
 {
-    "max.mustermann": { <<--- OR all uppercase letter of the name + username/matrikel_no
+    "max.mustermann": { <<--- OR all uppercase letter of the name + username/matrikel_no  # noqa: E501
         "matrikel_no": "12345678",
         "name": "Mustermann, Max",
         "task_list": {
@@ -58,8 +58,8 @@ parser.add_argument(
 # yes, I know it is possible to name match groups via (?P<name>) but
 # I like this solution better since it gets the job done nicely
 user_head = namedtuple('user_head', 'kohorte, name')
-user_head_re = re.compile(
-    r'^Ergebnisse von Testdurchlauf (?P<kohorte>\d+) für (?P<name>[\w\s\.,-]+)$')
+user_head_re = re.compile(r'^Ergebnisse von Testdurchlauf '
+                          '(?P<kohorte>\d+) für (?P<name>[\w\s\.,-]+)$')
 
 # one task has a title and id and hpfly code
 task_head_re = re.compile(r'^Quellcode Frage(?P<title>.*) \d{8}$')
@@ -84,7 +84,8 @@ def converter(infile, usernames=None, number_of_tasks=0,):
             if any(map(lambda c: c.ctype, row)):
                 yield ''.join(c.value for c in row)
 
-    # meta sheet contains ilias evaluation names usernames etc - data contains code
+    # meta sheet contains ilias evaluation names usernames etc - data contains
+    # code
     meta, *data = open_workbook(infile, open(os.devnull, 'w')).sheets()
 
     # nice!
@@ -122,7 +123,8 @@ def converter(infile, usernames=None, number_of_tasks=0,):
     usernames = {user.name: get_username(user) for (user, *_) in root}
 
     # form list to json_like via comprehension
-    # the format {userinitials + matrikel_no : {name:, matrikel_no:, tasklist: {id:, ..., id:}}}
+    # the format {userinitials + matrikel_no : {name:, matrikel_no:, tasklist:
+    # {id:, ..., id:}}}
     return {
         usernames[user.name]: {
             'name': user.name,
diff --git a/util/factories.py b/util/factories.py
index d8e7c77a..5582b6dd 100644
--- a/util/factories.py
+++ b/util/factories.py
@@ -3,9 +3,8 @@ import secrets
 import string
 
 from core import models
-from core.models import UserAccount as User
 from core.models import (ExamType, Feedback, StudentInfo, Submission,
-                         SubmissionType)
+                         SubmissionType, UserAccount)
 
 STUDENTS = 'students'
 TUTORS = 'tutors'
@@ -54,7 +53,7 @@ class GradyUserFactory:
         }[role]
 
     def _make_base_user(self, username, role, password=None,
-                        store_pw=False, **kwargs):
+                        store_pw=False, name='', **kwargs):
         """ This is a specific wrapper for the django update_or_create method of
         objects.
             * If now username is passed, a generic one will be generated
@@ -71,8 +70,9 @@ class GradyUserFactory:
 
         username = username.strip()
 
-        user, created = User.objects.update_or_create(
+        user, created = UserAccount.objects.update_or_create(
             username=username,
+            fullname=name,
             role=role,
             defaults=kwargs)
 
@@ -90,7 +90,7 @@ class GradyUserFactory:
         return user
 
     def make_student(self, username=None, matrikel_no=None,
-                     exam=None, **kwargs):
+                     exam=None, submissions=None, **kwargs):
         """ Creates a student. Defaults can be passed via kwargs like in
         relation managers objects.update method. """
         user = self._make_base_user(username, 'Student', **kwargs)
@@ -168,9 +168,9 @@ def make_submissions(submissions=[], **kwargs):
     for submission in submissions:
         submission_type, _ = SubmissionType.objects.get_or_create(
             name=submission.get('type', 'Auto generated type'))
-        student, _ = StudentInfo.objects.get_or_create(user=User.objects.get(
-            username=submission.get('user', 'default_user')
-        ))
+        student, _ = StudentInfo.objects.get_or_create(
+            user=UserAccount.objects.get(
+                username=submission.get('user', 'default_user')))
         submission_object, _ = Submission.objects.get_or_create(
             type=submission_type, student=student, defaults={
                 'seen_by_student': submission.get('seen_by_student', False),
diff --git a/util/importer.py b/util/importer.py
index c445545c..f2e0b0e1 100644
--- a/util/importer.py
+++ b/util/importer.py
@@ -4,14 +4,23 @@ import os
 import readline
 from typing import Callable
 
+from django.db import transaction
+
 import util.convert
 import util.processing
 from core.models import UserAccount as User
-from core.models import (ExamType, Feedback, StudentInfo, Submission,
-                         SubmissionType, Test)
-from util.factories import REVIEWERS, STUDENTS, TUTORS, GradyUserFactory
+from core.models import ExamType, Feedback, Submission, SubmissionType, Test
+from util.factories import GradyUserFactory
 from util.messages import info, warn
-from util.processing import EmptyTest
+
+WELCOME = '''
+   ______               __         ____                           __
+  / ____/________ _____/ /_  __   /  _/___ ___  ____  ____  _____/ /____  _____
+ / / __/ ___/ __ `/ __  / / / /   / // __ `__ \/ __ \/ __ \/ ___/ __/ _ \/ ___/
+/ /_/ / /  / /_/ / /_/ / /_/ /  _/ // / / / / / /_/ / /_/ / /  / /_/  __/ /
+\____/_/   \__,_/\__,_/\__, /  /___/_/ /_/ /_/ .___/\____/_/   \__/\___/_/
+                      /____/                /_/
+'''
 
 HISTFILE = '.importer_history'
 RECORDS = '.importer'
@@ -38,6 +47,8 @@ TEST_ORDER = (
 
 FEEDBACK_MAPPER = dict(zip(TEST_ORDER, ORIGIN_ORDER))
 
+user_factory = GradyUserFactory()
+
 
 class chdir_context(object):
     """
@@ -53,8 +64,8 @@ class chdir_context(object):
         os.chdir(self.new_dir)
 
     def __exit__(self, *args):
-        info(f'Returning to {self.new_dir}')
         os.chdir(self.old_dir)
+        info(f'Returned to {self.old_dir}')
 
 
 def i(prompt: str, default: str='', is_path: bool=False, is_file: bool=False):
@@ -66,49 +77,37 @@ def i(prompt: str, default: str='', is_path: bool=False, is_file: bool=False):
     else:
         answer = input(f'[Q] {prompt}: ')
 
-    if (is_path or is_file) and not os.path.exists(answer) or is_file and not os.path.isfile(answer):
-        warn(f'The {"path" if is_path else "file"} does not exist. Please try again.')
+    if (is_path or is_file) and \
+            not os.path.exists(answer) or is_file and \
+            not os.path.isfile(answer):
+        path_or_type = "path" if is_path else "file"
+        warn(f'The {path_or_type} does not exist. Please try again.')
         return i(prompt, default, is_path, is_file)
 
     return answer
 
 
-def add_user(username, group, **kwargs):
-    user = GradyUserFactory()._make_base_user(
-        username, group, store_pw=True, **kwargs
-    )
-
-    return user
-
-
-def add_student(username, email, submissions, **kwargs):
-
-    user = add_user(username, STUDENTS, email=email)
-    student, _ = StudentInfo.objects.update_or_create(
-        user=user,
-        defaults={'user': user, **kwargs}
-    )
-
-    return student
-
-
-def add_submission(student_obj, code, tests, type):
+def add_feedback_if_test_recommends_it(test_obj):
+    available_tests = util.processing.Test.available_tests()
 
-    submission_type = SubmissionType.objects.get(name=type)
+    if test_obj.label == available_tests[test_obj.name].label_failure \
+            and not hasattr(test_obj.submission, 'feedback') \
+            and not test_obj.name == util.processing.UnitTestTest.__name__:
+        return Feedback.objects.update_or_create(
+            of_submission=test_obj.submission,
+            defaults={
+                'score': 0,
+                'origin': FEEDBACK_MAPPER[test_obj.name],
+            }
+        )
 
-    submission_obj, _ = Submission.objects.update_or_create(
-        type=submission_type,
-        student=student_obj,
-        defaults={'text': code}
-    )
 
+def add_tests(submission_obj, tests):
     auto_correct, _ = User.objects.get_or_create(
         username='auto_correct',
         defaults={'is_active': False}
     )
 
-    available_tests = util.processing.Test.available_tests()
-
     for name, test_data in ((name, tests[name]) for name in TEST_ORDER):
         test_obj, created = Test.objects.update_or_create(
             name=test_data['name'],
@@ -118,27 +117,24 @@ def add_submission(student_obj, code, tests, type):
                 'annotation': test_data['annotation'],
             }
         )
+        add_feedback_if_test_recommends_it(test_obj)
 
-        if test_obj.label == available_tests[test_obj.name].label_failure\
-                and not hasattr(test_obj.submission, 'feedback')\
-                and not test_obj.name == util.processing.UnitTestTest.__name__:
-            Feedback.objects.update_or_create(
-                of_submission=submission_obj,
-                defaults={
-                    'of_tutor': auto_correct,
-                    'score': 0,
-                    'text': test_obj.label,
-                    'origin': FEEDBACK_MAPPER[test_obj.name],
-                    'status': Feedback.ACCEPTED if test_obj.name == EmptyTest.__name__ else Feedback.EDITABLE,
-                }
-            )
 
+def add_submission(student_obj, code, tests, type):
 
-def add_user_list(lst, group, **kwargs):
-    for name in lst:
-        add_user(name, group, **kwargs)
+    submission_type = SubmissionType.objects.get(name=type)
 
+    submission_obj, _ = Submission.objects.update_or_create(
+        type=submission_type,
+        student=student_obj,
+        defaults={'text': code}
+    )
 
+    if tests:
+        add_tests(submission_obj, tests)
+
+
+@transaction.atomic
 def call_loader(func: Callable) -> None:
     """ This function handles if a function will be executed at all. Currently
     it just checks in the RECORDS file for the name of the function. If it is
@@ -151,9 +147,10 @@ def call_loader(func: Callable) -> None:
         with open(RECORDS, 'r') as records_f:
             done = [line.strip() for line in records_f]
 
-        if func.__name__ in done and not \
-                i(f'{func.__name__} has already been processed once. Proceed anyway?', NO):
-            return
+        if func.__name__ in done:
+            warn(f'{func.__name__} has already been processed once.')
+            if not i('Proceed anyway?', NO):
+                return
 
     func()  # This executes the specified loader
 
@@ -219,8 +216,10 @@ def do_load_submission_types():
         for row in csv_rows:
             tid, name, score = (col.strip() for col in row)
             with \
-                    open(os.path.join(lsg_dir, tid + '-lsg.c'), encoding='utf-8') as lsg,\
-                    open(os.path.join(desc_dir, tid + '.html'), encoding='utf-8') as desc:
+                open(os.path.join(lsg_dir, tid + '-lsg.c'),
+                     encoding='utf-8') as lsg, \
+                open(os.path.join(desc_dir, tid + '.html'),
+                     encoding='utf-8') as desc:
                 data = {
                     'name': name,
                     'description': desc.read(),
@@ -269,7 +268,8 @@ def do_load_module_descriptions():
             defaults=data,
         )
 
-        info(f'{"Created" if created else "Updated"} ExamType {data["module_reference"]}')
+        modification = "Created" if created else "Updated"
+        info(f'{modification} ExamType {data["module_reference"]}')
 
 
 def do_preprocess_submissions():
@@ -290,20 +290,56 @@ def do_preprocess_submissions():
         print(f'\t[{j}] {test}')
     print()
 
-    answer = i('Which tests do you want to run?')
+    test_index = i('Which tests do you want to run?')
 
-    if not answer or answer == 'q':
+    if not test_index or test_index == 'q':
         return
 
-    raise NotImplementedError
+    test_to_run = test_enum[int(test_index)]
+    location = i('Where do you keep the specifications for the tests?',
+                 'anon-export', is_path=True)
+
+    with chdir_context(location):
+        descfile = i(
+            'Please provide usage for sample solution', 'descfile.txt',
+            is_file=True)
+        binaries = i(
+            'Please provide executable binaries of solution', 'bin',
+            is_path=True)
+        objects = i(
+            'Please provide object files of solution', 'objects',
+            is_path=True)
+        submissions = i(
+            'Please provide the student submissions', 'binf1601-anon.json',
+            is_file=True)
+        headers = i(
+            'Please provide header files if any', 'code-testing',
+            is_path=True)
+
+        info('Looks good. The tests mights take some time.')
+        processed_submissions = util.processing.process(descfile,
+                                                        binaries,
+                                                        objects,
+                                                        submissions,
+                                                        headers,
+                                                        test_to_run)
+    output_f = i('And everything is done. Where should I put the results?',
+                 f'{submissions.rsplit(".")[0]}.processed.json')
+
+    with open(output_f, 'w+') as outfile:
+        json.dump(processed_submissions, outfile,
+                  sort_keys=True, indent=4)
+    info('Wrote processed data to %s' % os.path.join(os.curdir, output_f))
 
 
 def do_load_submissions():
 
-    file = i('Get me the file with all the submissions', 'submissions.json')
+    file = i('Get me the file with all the submissions',
+             'submissions.json', is_file=True)
 
     exam = {}
-    if ExamType.objects.all() and i('Do you want to add module/exam information?', NO):
+    if ExamType.objects.all() and \
+            i('Do you want to add module/exam information?', NO):
         exam_query_set = ExamType.objects.all()
         print('You have the following choices:\n')
         for j, exam_type in enumerate(exam_query_set):
@@ -317,7 +353,9 @@ def do_load_submissions():
         submissions = json.JSONDecoder().decode(submission_file.read())
 
     for username, data in submissions.items():
-        student_obj = make_student(username, **exam, **data)
+        student_obj = user_factory.make_student(username,
+                                                **exam,
+                                                **data).student
 
         for submission_obj in data['submissions']:
             add_submission(student_obj, **submission_obj)
@@ -329,7 +367,8 @@ def do_load_tutors():
     tutors = i('List of tutors', 'tutors', is_file=True)
 
     with open(tutors) as tutors_f:
-        add_user_list(tutors_f, TUTORS)
+        for tutor in tutors_f:
+            user_factory.make_tutor(tutor.strip(), store_pw=True)
 
 
 def do_load_reviewer():
@@ -338,7 +377,10 @@ def do_load_reviewer():
     reviewers = i('List of reviewers', 'reviewers', is_file=True)
 
     with open(reviewers) as reviewers_f:
-        add_user_list(reviewers_f, REVIEWERS, is_staff=True)
+        for reviewer in reviewers_f:
+            user_factory.make_reviewer(reviewer.strip(),
+                                       is_staff=True,
+                                       store_pw=True)
 
 
 call_order = (
@@ -357,14 +399,16 @@ def start():
     if os.path.exists(HISTFILE):
         readline.read_history_file(HISTFILE)
 
-    print('''Welcome to the Grady importer!
+    print(WELCOME + '''
+
+    Welcome to the Grady import script!
 
     This script aims at making the setup of the database as easy as possible.
-    It at the same time serves as a documentation on how data is imported in
+    At the same time it serves as a documentation on how data is imported into
     Grady. Let\'s dive right in.\n''')
 
     try:
-        print('The following importers are available:\n')
+        print('The following sub importers are available:\n')
         for fid, func in enumerate(call_order):
             print(f'\t[{fid}] {func.__name__}')
         print('\t[q] exit')
@@ -385,6 +429,8 @@ def start():
     except (EOFError, KeyboardInterrupt) as err:
         print()
         return
+    except FileNotFoundError as err:
+        raise
     except Exception as err:
         import traceback
         traceback.print_exc()
diff --git a/util/processing.py b/util/processing.py
index 26671595..58b0420f 100644
--- a/util/processing.py
+++ b/util/processing.py
@@ -7,17 +7,13 @@ import shutil
 import subprocess
 import tempfile
 
+from tqdm import tqdm
+
 try:
     import testcases
 except ModuleNotFoundError:
     from util import testcases
 
-DESCFILE = '../data/descfile.txt'
-BINARIES = '../data/klausur_zweittermin/bin'
-OBJECTS = '../data/klausur_zweittermin/objects'
-SUBMISSIONS = '../data/binf1801_pre.json'
-HEADER = '../data/klausur_zweittermin/code-testing'
-
 
 def run_cmd(cmd, stdin=None, check=False, timeout=1):
     return subprocess.run(
@@ -55,7 +51,6 @@ class Test(metaclass=abc.ABCMeta):
         return super().__new__(cls)
 
     def __init__(self, submission_obj, **kwargs):
-
         if not self.dependencies_satisfied(submission_obj):
             self.result = False
             self.annotation = "TEST DEPENDENCY NOT MET"
@@ -65,8 +60,7 @@ class Test(metaclass=abc.ABCMeta):
             self.deserialize(submission_obj['tests'][str(self)])
 
         else:
-            self.result, self.annotation = self.run_test(
-                submission_obj, **kwargs)
+            self.result, self.annotation = self.run_test(submission_obj)
             self.serialize(submission_obj)
 
     def __bool__(self):
@@ -119,8 +113,9 @@ class CompileTest(Test):
 
     def run_test(self, submission_obj):
 
-        ret = run_cmd("gcc-7 -std=c11 -Wall -c -xc -Icode-testing -o code.o -",
-                      submission_obj['code'])
+        ret = run_cmd(
+            "gcc-7 -std=c11 -Wall -c -xc -Icode-testing -o code.o -",
+            submission_obj['code'])
         return not ret.returncode, ret.stderr
 
 
@@ -135,7 +130,8 @@ class LinkTest(Test):
         t = submission_obj['type']
         m = re.search(r'(a0\d)', t)
 
-        ret = run_cmd(f"gcc-7 -o code objects/{m.group(0)}-testing.o code.o")
+        ret = run_cmd(
+            f"gcc-7 -o code objects/{m.group(0)}-testing.o code.o")
         return not ret.returncode, ret.stderr
 
 
@@ -152,44 +148,76 @@ class UnitTestTest(Test):
             ret = run_cmd("./code %s" % args, check=True, timeout=0.1)
             assert ret.stdout == stdout
         except AssertionError:
-            return False, f"Case #{i:>2}: [ASSERT FAILED] ./program {args} WAS '{ret.stdout.strip()}' SHOULD '{stdout.strip()}'"
+            return False, "Case #{}: [ASSERT FAIL] ./prog {:>2} WAS '{}' SHOULD '{}'".format(  # noqa: E501
+                i, args, ret.stdout.strip(), stdout.strip())
         except subprocess.CalledProcessError as err:
-            return False, f"Case #{i:>2}: [FAILED] ./program {args} WITH ERROR '{err.stderr.strip()}'"
+            return False, "Case #{:>2}: [FAILED] ./prog {} ERROR '{}'".format(
+                i, args, err.stderr.strip())
         except subprocess.TimeoutExpired:
-            return False, f"Case #{i:>2}: [TIMEOUT] ./program {args}"
+            return False, "Case #{:>2}: [TIMEOUT] ./prog {}".format(i, args)
         else:
-            return True,  f"Case #{i:>2}: [SUCCESS] ./program {args}"
+            return True, "Case #{:>2}: [SUCCESS] ./prog {}".format(i, args)
 
     def run_test(self, submission_obj):
 
-        task = testcases_dict[submission_obj['type']]
+        task = self.testcases_dict[submission_obj['type']]
         results, messages = zip(*list(self.testcase(i, case, result)
-                                      for i, (case, result) in enumerate(zip(task['cases'], task['results']))))
+                                      for i, (case, result) in enumerate(
+            zip(task['cases'], task['results']))))
 
         return all(results), '\n'.join(messages)
 
 
-def processing(highest_test):
+def process(descfile, binaries, objects, submissions, header, highest_test):
+    if isinstance(highest_test, str):
+        highestTestClass = Test.available_tests()[highest_test]
+        highestTestClass.testcases_dict = testcases.evaluated_testcases(
+            descfile, binaries)
 
-    with open(SUBMISSIONS) as submission_file:
-        submissions = json.JSONDecoder().decode(submission_file.read())
+    with open(submissions) as submission_file:
+        submissions_json = json.JSONDecoder().decode(
+            submission_file.read())
 
     # Get something disposable
     path = tempfile.mkdtemp()
-    run_cmd(f'cp -r {OBJECTS}  {path}')
-    run_cmd(f'cp -r {BINARIES} {path}')
-    run_cmd(f'cp -r {HEADER} {path}')
+    run_cmd(f'cp -r {objects}  {path}')
+    run_cmd(f'cp -r {binaries} {path}')
+    run_cmd(f'cp -r {header} {path}')
     os.chdir(path)
 
-    for username, data in submissions.items():
-        for submission_obj in data['submissions']:
-            highest_test(submission_obj)
-            run_cmd('rm code*')
+    def iterate_submissions():
+        yield from (obj
+                    for _, data in tqdm(submissions_json.items())
+                    for obj in data['submissions'])
+
+    for submission_obj in tqdm(iterate_submissions()):
+        highestTestClass(submission_obj)
+        run_cmd('rm code*')
+    print()  # line after progress bar
 
     shutil.rmtree(path)
-    return submissions
+    return submissions_json
+
+
+def parseme():
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument('descfile')
+    parser.add_argument('binaries')
+    parser.add_argument('objects')
+    parser.add_argument('submissions')
+    parser.add_argument('header')
+    return parser.parse_args()
 
 
 if __name__ == '__main__':
-    testcases_dict = testcases.evaluated_testcases(DESCFILE)
-    print(json.dumps(processing(UnitTestTest), sort_keys=True, indent=4))
+    args = parseme()
+    testcases_dict = testcases.evaluated_testcases(args.descfile,
+                                                   args.binaries)
+    print(json.dumps(process(args.descfile,
+                             args.binaries,
+                             args.objects,
+                             args.submissions,
+                             args.header, UnitTestTest),
+                     sort_keys=True,
+                     indent=4))
diff --git a/util/testcases.py b/util/testcases.py
index a95e657d..99729af4 100644
--- a/util/testcases.py
+++ b/util/testcases.py
@@ -1,4 +1,3 @@
-import json
 import os
 import random
 import re
@@ -12,8 +11,8 @@ except ModuleNotFoundError:
 types = ('integer', 'unsigned_integer', 'character', 'string')
 list_sep = '...'
 
-re_task = re.compile(r'^-- (?P<title>.*)\n(USAGE: (?P<cmd>[\./\w]+) (?P<syntax>.*)|NO EXECUTABLE)', re.MULTILINE)
-re_args = re.compile(rf"<({'|'.join(types)}|{'|'.join(t + '_list' for t in types)})>")
+re_task = re.compile(r'^-- (?P<title>.*)\n(USAGE: (?P<cmd>[\./\w]+) (?P<syntax>.*)|NO EXECUTABLE)', re.MULTILINE)  # noqa: E501
+re_args = re.compile(rf"<({'|'.join(types)}|{'|'.join(t + '_list' for t in types)})>")  # noqa: E501
 
 
 def call_function(name: str, *args, **kwargs):
@@ -38,19 +37,23 @@ def string(lenght=31):
 
 def type_list(_type):
     def generic_list():
-        return ' '.join(str(call_function(_type)) for i in range(2, unsigned_integer(6) * 2))
+        return ' '.join(str(
+            call_function(_type)) for i in range(2, unsigned_integer(6) * 2))
     return generic_list
 
 
 def rubbish():
-    return str(call_function(random.choice(tuple(t + '_list' for t in types) + types)))
+    return str(call_function(
+        random.choice(tuple(t + '_list' for t in types) + types)))
 
 
 def argument_generator(syntax):
-    syntax, _ = re.subn(r'<([\w\s]+)> <\1> \.\.\. <\1> <\1>', r'<\1_list>', syntax)
+    syntax, _ = re.subn(
+        r'<([\w\s]+)> <\1> \.\.\. <\1> <\1>', r'<\1_list>', syntax)
     syntax, _ = re.subn(r'<(\w+)\s(\w+)>', r'<\1_\2>', syntax)
 
-    return ' '.join(str(call_function(arg)) for arg in re.findall(re_args, syntax))
+    return ' '.join(
+        str(call_function(arg)) for arg in re.findall(re_args, syntax))
 
 
 def testcases_generator(task, n=10):
@@ -84,18 +87,13 @@ def testcases(description_path):
     }
 
 
-def evaluated_testcases(description_path):
+def evaluated_testcases(description_path, binaries):
     task_testcases = testcases(description_path)
 
     for task in filter(lambda t: t['cmd'], task_testcases.values()):
         path_to_binary = os.path.join(os.path.join(
-            processing.BINARIES, os.path.basename(task['cmd'])))
+            binaries, os.path.basename(task['cmd'])))
         task['results'] = [processing.run_cmd(
             f"{path_to_binary} {case}").stdout for case in task['cases']]
 
     return task_testcases
-
-
-if __name__ == '__main__':
-    print(json.dumps(evaluated_testcases(
-        processing.DESCFILE), sort_keys=True, indent=4))
-- 
GitLab