From 60a2008ec3a7bc8ff9b5f7330401ff22b50536e0 Mon Sep 17 00:00:00 2001
From: "robinwilliam.hundt" <robinwilliam.hundt@stud.uni-goettingen.de>
Date: Fri, 29 Mar 2019 20:13:25 +0100
Subject: [PATCH] First version of test runner

This script has been extracted from the proper gray project
---
 .gitignore       |   1 +
 README.md        |   6 +-
 requirements.txt |   1 +
 test.py          | 157 +++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 163 insertions(+), 2 deletions(-)
 create mode 100644 .gitignore
 create mode 100644 requirements.txt
 create mode 100755 test.py

diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..0cafc1c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+.venv/
\ No newline at end of file
diff --git a/README.md b/README.md
index cbe5b18..850f3b4 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,5 @@
-# grady-test-runner
+# Grady Test Runner
 
-Run different tests on submissions contained in export transformed by Rusty Hektor.
\ No newline at end of file
+Run different tests on submissions contained in export transformed by Rusty Hektor.
+
+Currently supports empty and C compile tests.
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..93a1e7b
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1 @@
+tqdm~=4.31
\ No newline at end of file
diff --git a/test.py b/test.py
new file mode 100755
index 0000000..6f66a9c
--- /dev/null
+++ b/test.py
@@ -0,0 +1,157 @@
+import abc
+import json
+import logging
+import os
+import shutil
+import subprocess
+import tempfile
+
+from tqdm import tqdm
+
+log = logging.getLogger(__name__)
+
+
+def run_cmd(cmd, stdin=None, check=False, timeout=1):
+    return subprocess.run(
+        'timeout 1 ' + cmd,
+        stderr=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+        input=stdin,
+        shell=True,
+        check=check,
+        encoding='utf-8',
+        timeout=timeout
+    )
+
+
+def all_subclasses(cls):
+    return cls.__subclasses__() \
+        + [g for s in cls.__subclasses__() for g in all_subclasses(s)]
+
+
+class Test(metaclass=abc.ABCMeta):
+    """docstring for IliasQuestion"""
+
+    @classmethod
+    def available_tests(cls):
+        return {sub.__name__: sub for sub in all_subclasses(cls)}
+
+    def __new__(cls, *args, **kwargs):
+        assert hasattr(cls, 'depends'), "depends not defined"
+        assert hasattr(cls, 'label_success'), "label_success not defined"
+        assert hasattr(cls, 'label_failure'), "label_failure not defined"
+        return super().__new__(cls)
+
+    def __init__(self, submission_obj, **kwargs):
+        if not self.dependencies_satisfied(submission_obj):
+            self.result = False
+            self.annotation = "TEST DEPENDENCY NOT MET"
+            self.serialize(submission_obj)
+
+        elif str(self) in submission_obj['tests']:
+            self.deserialize(submission_obj['tests'][str(self)])
+
+        else:
+            self.result, self.annotation = self.run_test(submission_obj, **kwargs)
+            self.serialize(submission_obj)
+
+    def __bool__(self):
+        return self.result
+
+    def __str__(self):
+        return self.__class__.__name__
+
+    def dependencies_satisfied(self, submission_obj):
+        return all(dep(submission_obj).result for dep in self.depends)
+
+    def deserialize(self, test):
+        self.result = test['label'] == self.label_success
+        self.annotation = test['annotation']
+
+    def serialize(self, submission_obj):
+        as_dict = {
+            'name': str(self),
+            'annotation': self.annotation
+        }
+
+        if self.result:
+            as_dict['label'] = self.label_success
+        else:
+            as_dict['label'] = self.label_failure
+
+        submission_obj['tests'][str(self)] = as_dict
+
+    @abc.abstractmethod
+    def run_test(self, submission_obj, **kwargs) -> (bool, str):
+        return NotImplemented
+
+
+class EmptyTest(Test):
+    """docstring for EmptyTest"""
+
+    depends = ()
+    label_success = 'NOT_EMPTY'
+    label_failure = 'EMPTY'
+
+    def run_test(self, submission_obj, **kwargs):
+        return bool(submission_obj['code'].strip()), ""
+
+
+class CompileTest(Test):
+
+    depends = (EmptyTest, )
+    label_success = 'COMPILATION_SUCCESSFUL'
+    label_failure = 'COMPILATION_FAILED'
+
+    def run_test(self, submission_obj, **kwargs):
+
+        ret = run_cmd(
+            f"gcc -Wall -c -x c -std=c11 -I {kwargs['header']} -o code.o -",
+            submission_obj['code'])
+        return not ret.returncode, ret.stderr
+
+
+def process(submissions, header, highest_test):
+    highest_test_class = Test.available_tests()[highest_test]
+
+    with open(submissions) as submission_file:
+        submissions_json = json.JSONDecoder().decode(
+            submission_file.read())
+
+    # Get something disposable
+    if highest_test != EmptyTest.__name__:
+        path = tempfile.mkdtemp()
+        run_cmd(f'cp -r {header} {path}')
+        os.chdir(path)
+
+    def iterate_submissions():
+        yield from (obj
+                    for student in tqdm(submissions_json['students'])
+                    for obj in student['submissions'])
+
+    for submission_obj in tqdm(iterate_submissions()):
+        highest_test_class(submission_obj, header=header)
+        if highest_test != EmptyTest.__name__:
+            run_cmd('rm code*')
+    print()  # line after progress bar
+    if highest_test != EmptyTest.__name__:
+        shutil.rmtree(path)
+    return submissions_json
+
+
+def parseme():
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument('submissions')
+    parser.add_argument('header')
+    parser.add_argument('test')
+    return parser.parse_args()
+
+
+if __name__ == '__main__':
+    args = parseme()
+    print(json.dumps(process(args.submissions,
+                             args.header,
+                             args.test),
+                     sort_keys=True,
+                     indent=4))
-- 
GitLab