diff --git a/.gitignore b/.gitignore
index 13baa08b23325c68166c3157f4478b1ef01415cc..76818e987e90fea2666187a33953e9a8e76d94ee 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,7 +27,10 @@ static/
 # project specific
 env-grady/
 env/
+scripts/
 *.csv
+*.json
+.importer*
 
 # operation system
 .DS_Store
diff --git a/Makefile b/Makefile
index 7eab7e0ecbef9a1465fabdac4cdebe6002b1c0e1..c53e5fae320758b94b14d8a89572b54883ca395e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-APP_LIST ?= core grady
+APP_LIST ?= core grady util
 
 
 .PHONY: collectstatics run install migrations-check isort isort-check build-webpack
diff --git a/core/admin.py b/core/admin.py
index eec44cbba967b3dac3c8da81fb4e54684897f6fd..300130b8b737f58e64454245a483d9f13d00d77f 100644
--- a/core/admin.py
+++ b/core/admin.py
@@ -1,10 +1,11 @@
 from django.contrib import admin
 
-from .models import Feedback, Student, Submission, SubmissionType
+from .models import Feedback, Student, Submission, SubmissionType, Test
 
 # Register your models here.
 
 admin.site.register(SubmissionType)
 admin.site.register(Feedback)
 admin.site.register(Student)
+admin.site.register(Test)
 admin.site.register(Submission)
diff --git a/core/migrations/0009_auto_20170710_1308.py b/core/migrations/0009_auto_20170710_1308.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a06d82f61967fa295b5b9783e80a2f5a2cfbf62
--- /dev/null
+++ b/core/migrations/0009_auto_20170710_1308.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 1.10.7 on 2017-07-10 13:08
+from __future__ import unicode_literals
+
+from django.db import migrations
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core', '0008_auto_20170522_1834'),
+    ]
+
+    operations = [
+        migrations.RenameField(
+            model_name='submissiontype',
+            old_name='task_description',
+            new_name='description',
+        ),
+        migrations.RenameField(
+            model_name='submissiontype',
+            old_name='possible_solution',
+            new_name='solution',
+        ),
+    ]
diff --git a/core/migrations/0010_auto_20170710_1604.py b/core/migrations/0010_auto_20170710_1604.py
new file mode 100644
index 0000000000000000000000000000000000000000..319da912d39b54051376e985c63dc8407adf5c0d
--- /dev/null
+++ b/core/migrations/0010_auto_20170710_1604.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 1.10.7 on 2017-07-10 16:04
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+import django.db.models.deletion
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core', '0009_auto_20170710_1308'),
+    ]
+
+    operations = [
+        migrations.CreateModel(
+            name='Test',
+            fields=[
+                ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
+                ('name', models.CharField(max_length=30, unique=True)),
+                ('label', models.CharField(max_length=50, unique=True)),
+                ('annotation', models.TextField()),
+                ('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tests', to='core.Submission')),
+            ],
+            options={
+                'verbose_name': 'Test',
+                'verbose_name_plural': 'Tests',
+            },
+        ),
+        migrations.AlterUniqueTogether(
+            name='test',
+            unique_together=set([('submission', 'name')]),
+        ),
+    ]
diff --git a/core/migrations/0011_auto_20170710_1610.py b/core/migrations/0011_auto_20170710_1610.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bf4689a3ab7df1d1112e9930660aedbc85da745
--- /dev/null
+++ b/core/migrations/0011_auto_20170710_1610.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 1.10.7 on 2017-07-10 16:10
+from __future__ import unicode_literals
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+    dependencies = [
+        ('core', '0010_auto_20170710_1604'),
+    ]
+
+    operations = [
+        migrations.AlterField(
+            model_name='test',
+            name='label',
+            field=models.CharField(max_length=50),
+        ),
+        migrations.AlterField(
+            model_name='test',
+            name='name',
+            field=models.CharField(max_length=30),
+        ),
+    ]
diff --git a/core/models.py b/core/models.py
index c3dcbd4a6ca2c3525cf0fc32413929f7dcff2bf9..3e43ac3e768fb3c20d57cb66aa3336d301ec9e13 100644
--- a/core/models.py
+++ b/core/models.py
@@ -81,7 +81,6 @@ class Student(models.Model):
     # Fields
     has_logged_in   = models.BooleanField(default=False)
     name            = models.CharField(max_length=50, default="__no_name__")
-
     matrikel_no = models.CharField(
         unique=True, max_length=8, default=random_matrikel_no)
     user = models.OneToOneField(
@@ -115,6 +114,26 @@ class Student(models.Model):
         verbose_name_plural = "Student Set"
 
 
+class Test(models.Model):
+
+    name       = models.CharField(max_length=30)
+    label      = models.CharField(max_length=50)
+    annotation = models.TextField()
+    submission = models.ForeignKey(
+        'submission',
+        related_name='tests',
+        on_delete=models.CASCADE,
+    )
+
+    class Meta:
+        verbose_name        = "Test"
+        verbose_name_plural = "Tests"
+        unique_together     = (('submission', 'name'),)
+
+    def __str__(self):
+        return f'{self.name} {self.label}'
+
+
 class Submission(models.Model):
 
     # Fields
diff --git a/core/serializers.py b/core/serializers.py
index abf19ba7eb8363ab60ab9c3de629a2832c99695e..8ff9df315a2fe0893112916ef40eb5126d4d6c17 100644
--- a/core/serializers.py
+++ b/core/serializers.py
@@ -14,7 +14,7 @@ class SubmissionTypeSerializer(serializers.ModelSerializer):
 
     class Meta:
         model = SubmissionType
-        exclude = ('slug', 'correction_guideline',)
+        exclude = ('slug',)
 
 
 class CreateStudentSerializer(serializers.ModelSerializer):
diff --git a/requirements.txt b/requirements.txt
index 738aeb1fbf3b3846632bb180edd110e5b8b2b543..b6bb6dad38921a3b22dcd34bd9b20d0ea06c89d4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,8 +1,8 @@
 Django~=1.10.6
 django-extensions~=1.7.7
+djangorestframework~=3.6.3
+django_compressor~=2.1.1
 gunicorn~=19.7.0
 psycopg2~=2.7.1
-xkcdpass~=1.9.5
 xlrd~=1.0.0
-django_compressor~=2.1.1
 lxml~=3.8.0
diff --git a/scripts/convert.py b/scripts/convert.py
deleted file mode 100755
index 638fc16f8504f48d6f55488d20e6973b56f06f51..0000000000000000000000000000000000000000
--- a/scripts/convert.py
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/usr/local/bin/python3
-""" a simple script that converts ilias exam output to readable json
-
-The json output will look like this:
-{
-    "max.mustermann": { <<--- OR all uppercase letter of the name + username/matrikel_no
-        "matrikel_no": "12345678",
-        "name": "Mustermann, Max",
-        "task_list": {
-            "[task_id_1]": "print Hello World!",
-            ....,
-            "[task_id_n]": "#include <stdio.h> etc."
-        }
-    },
-    ... ans so on
-}
-
-usage: convert.py [-h] [-u USERNAMES] [-n NUMBER_OF_TASKS] INFILE OUTFILE
-
-positional arguments:
-  INFILE                Ilias exam data
-  OUTFILE               Where to write the final file
-
-optional arguments:
-  -h, --help            show this help message and exit
-  -u USERNAMES, --usernames USERNAMES
-                        a json dict matno -> email
-  -n NUMBER_OF_TASKS, --NUMBER_OF_TASKS NUMBER_OF_TASKS
-                        Where to write the final file
-
-
-Author: Jan Maximilian Michal
-Date: 30 March 2017
-"""
-
-import json
-import os
-import re
-import argparse
-import urllib.parse
-from collections import namedtuple, defaultdict
-
-from xlrd import open_workbook
-
-parser = argparse.ArgumentParser()
-parser.add_argument('INFILE', help='Ilias exam data')
-parser.add_argument('OUTFILE', help='Where to write the final file')
-parser.add_argument('-u', '--usernames', help='a json dict matno -> email')
-parser.add_argument(
-    '-n', '--NUMBER_OF_TASKS',
-    default=0, # don't check
-    metavar='NUMBER_OF_TASKS',
-    type=int,
-    help='Where to write the final file')
-args = parser.parse_args()
-
-# meta sheet contains ilias evaluation names usernames etc - data contains code
-meta, *data = open_workbook(args.INFILE, open(os.devnull, 'w')).sheets()
-
-# one user has one submission (code) per task
-# yes, I know it is possible to name match groups via (?P<name>) but
-# I like this solution better since it gets the job done nicely
-user_head = namedtuple('user_head', 'kohorte, name')
-user_head_re = re.compile(r'^Ergebnisse von Testdurchlauf (?P<kohorte>\d+) für (?P<name>[\w\s\.,-]+)$')
-
-# one task has a title and id and hpfly code
-task_head_re = re.compile(r'^Quellcode Frage(?P<title>.*) \d{8}$')
-
-# nor parsing the weird mat no
-matno_re = re.compile(r'^(?P<matrikel_no>\d{8})-(\d{3})-(\d{3})$')
-
-# Modify these iterators in order to change extraction behaviour
-
-
-def sheet_iter_meta(sheet):
-    """ yield first and second col entry as tuple of (name, matnr) """
-    for row in (sheet.row(i) for i in range(1, sheet.nrows)):
-        m = re.search(matno_re, row[1].value)
-        yield row[0].value, m.group('matrikel_no') if m else row[1].value
-
-
-def sheet_iter_data(sheet):
-    """ yields all rows that are not of empty type as one string """
-    for row in (sheet.row(i) for i in range(sheet.nrows)):
-        if any(map(lambda c: c.ctype, row)):
-            yield ''.join(c.value for c in row)
-
-# nice!
-name2mat = dict(sheet_iter_meta(meta))
-
-# from xls to lists and namedtuples
-# [ [user0, task0_h, code0, ..., taskn, coden ], ..., [...] ]
-root = []
-for sheet in data:
-    for row in sheet_iter_data(sheet):
-        user = re.search(user_head_re, row)
-        task = re.search(task_head_re, row)
-        if user:
-            root.append([user_head(*user.groups())])
-        elif task:
-            root[-1].append(task.group('title'))
-        else: # should be code
-            root[-1].append(urllib.parse.unquote(row).strip())
-
-if args.NUMBER_OF_TASKS:
-    for (user, *task_list) in sorted(root, key=lambda u: u[0].name):
-        assert len(task_list) == args.NUMBER_OF_TASKS * 2
-
-mat_to_email = defaultdict(str)
-if args.usernames:
-    with open(args.usernames) as data:
-        mat_to_email.update(json.JSONDecoder().decode(data.read()))
-
-def get_username(user):
-    if name2mat[user.name] in mat_to_email:
-        return mat_to_email[name2mat[user.name]].split('@')[0]
-    return ''.join(filter(str.isupper, user.name)) + name2mat[user.name]
-
-usernames = {user.name : get_username(user) for (user, *_) in root}
-
-# form list to json_like via comprehension
-# the format {userinitials + matrikel_no : {name:, matrikel_no:, tasklist: {id:, ..., id:}}}
-json_dict = {
-    usernames[user.name] : {
-        'name' : user.name,
-        'email' : mat_to_email[name2mat[user.name]],
-        'matrikel_no' : name2mat[user.name],
-        'submissions' : [
-            {
-                "type" : task,
-                "code" : code,
-                "tests" : {},
-            } for task, code in zip(task_list[::2], task_list[1::2])
-        ]
-    } for (user, *task_list) in sorted(root, key=lambda u: u[0].name)
-}
-
-# just encode python style
-with open(args.OUTFILE, "w") as out:
-    out.write(json.JSONEncoder().encode(json_dict))
-
-print(f"Wrote data to {args.OUTFILE}. Done.")
diff --git a/util/convert.py b/util/convert.py
new file mode 100755
index 0000000000000000000000000000000000000000..5798b2458bbc61f25700a90283b73efa379644c3
--- /dev/null
+++ b/util/convert.py
@@ -0,0 +1,155 @@
+#!/usr/local/bin/python3
+""" a simple script that converts ilias exam output to readable json
+
+The json output will look like this:
+{
+    "max.mustermann": { <<--- OR all uppercase letter of the name + username/matrikel_no
+        "matrikel_no": "12345678",
+        "name": "Mustermann, Max",
+        "task_list": {
+            "[task_id_1]": "print Hello World!",
+            ....,
+            "[task_id_n]": "#include <stdio.h> etc."
+        }
+    },
+    ... ans so on
+}
+
+usage: convert.py [-h] [-u USERNAMES] [-n NUMBER_OF_TASKS] INFILE OUTFILE
+
+positional arguments:
+  INFILE                Ilias exam data
+  OUTFILE               Where to write the final file
+
+optional arguments:
+  -h, --help            show this help message and exit
+  -u USERNAMES, --usernames USERNAMES
+                        a json dict matno -> email
+  -n NUMBER_OF_TASKS, --NUMBER_OF_TASKS NUMBER_OF_TASKS
+                        Where to write the final file
+
+
+Author: Jan Maximilian Michal
+Date: 30 March 2017
+"""
+
+import argparse
+import json
+import os
+import re
+import urllib.parse
+from collections import defaultdict, namedtuple
+
+from xlrd import open_workbook
+
+parser = argparse.ArgumentParser()
+parser.add_argument('INFILE', help='Ilias exam data')
+parser.add_argument('OUTFILE', help='Where to write the final file')
+parser.add_argument('-u', '--usernames', help='a json dict matno -> email')
+parser.add_argument(
+    '-n', '--NUMBER_OF_TASKS',
+    default=0, # don't check
+    metavar='NUMBER_OF_TASKS',
+    type=int,
+    help='Where to write the final file')
+
+
+
+# one user has one submission (code) per task
+# yes, I know it is possible to name match groups via (?P<name>) but
+# I like this solution better since it gets the job done nicely
+user_head = namedtuple('user_head', 'kohorte, name')
+user_head_re = re.compile(r'^Ergebnisse von Testdurchlauf (?P<kohorte>\d+) für (?P<name>[\w\s\.,-]+)$')
+
+# one task has a title and id and hpfly code
+task_head_re = re.compile(r'^Quellcode Frage(?P<title>.*) \d{8}$')
+
+# nor parsing the weird mat no
+matno_re = re.compile(r'^(?P<matrikel_no>\d{8})-(\d{3})-(\d{3})$')
+
+# Modify these iterators in order to change extraction behaviour
+
+def converter(infile, usernames=None, number_of_tasks=0,):
+
+    def sheet_iter_meta(sheet):
+        """ yield first and second col entry as tuple of (name, matnr) """
+        for row in (sheet.row(i) for i in range(1, sheet.nrows)):
+            m = re.search(matno_re, row[1].value)
+            yield row[0].value, m.group('matrikel_no') if m else row[1].value
+
+
+    def sheet_iter_data(sheet):
+        """ yields all rows that are not of empty type as one string """
+        for row in (sheet.row(i) for i in range(sheet.nrows)):
+            if any(map(lambda c: c.ctype, row)):
+                yield ''.join(c.value for c in row)
+
+    # meta sheet contains ilias evaluation names usernames etc - data contains code
+    meta, *data = open_workbook(infile, open(os.devnull, 'w')).sheets()
+
+    # nice!
+    name2mat = dict(sheet_iter_meta(meta))
+
+    # from xls to lists and namedtuples
+    # [ [user0, task0_h, code0, ..., taskn, coden ], ..., [...] ]
+    root = []
+    for sheet in data:
+        for row in sheet_iter_data(sheet):
+            user = re.search(user_head_re, row)
+            task = re.search(task_head_re, row)
+            if user:
+                root.append([user_head(*user.groups())])
+            elif task:
+                root[-1].append(task.group('title'))
+            else: # should be code
+                root[-1].append(urllib.parse.unquote(row).strip())
+
+    if number_of_tasks:
+        for (user, *task_list) in sorted(root, key=lambda u: u[0].name):
+            assert len(task_list) == number_of_tasks * 2
+
+    mat_to_email = defaultdict(str)
+    if usernames:
+        with open(usernames) as data:
+            mat_to_email.update(json.JSONDecoder().decode(data.read()))
+
+    def get_username(user):
+        if name2mat[user.name] in mat_to_email:
+            return mat_to_email[name2mat[user.name]].split('@')[0]
+        return ''.join(filter(str.isupper, user.name)) + name2mat[user.name]
+
+    usernames = {user.name : get_username(user) for (user, *_) in root}
+
+    # form list to json_like via comprehension
+    # the format {userinitials + matrikel_no : {name:, matrikel_no:, tasklist: {id:, ..., id:}}}
+    return {
+        usernames[user.name] : {
+            'name' : user.name,
+            'email' : mat_to_email[name2mat[user.name]],
+            'matrikel_no' : name2mat[user.name],
+            'submissions' : [
+                {
+                    "type" : task,
+                    "code" : code,
+                    "tests" : {},
+                } for task, code in zip(task_list[::2], task_list[1::2])
+            ]
+        } for (user, *task_list) in sorted(root, key=lambda u: u[0].name)
+    }
+
+def write_to_file(json_dict, outfile):
+    # just encode python style
+    with open(outfile, "w") as out:
+        out.write(json.JSONEncoder().encode(json_dict))
+
+    print(f"Wrote data to {outfile}. Done.")
+
+
+def main():
+    args = parser.parse_args()
+    json_dict = converter(args.INFILE, args.usernames, args.NUMBER_OF_TASKS)
+    write_to_file(json_dict, args.OUTFILE)
+
+if __name__ == '__main__':
+    SCRIPT = True
+    main()
diff --git a/util/importer.py b/util/importer.py
index 99e119086bff9eda6cbd213f8fd47b8a96391d37..eb1c7be317b9e3baa6dfdd4ab37a540ba6e433c2 100644
--- a/util/importer.py
+++ b/util/importer.py
@@ -1,16 +1,50 @@
+import collections
 import csv
 import os
 import readline
 import secrets
+import sys
+import json
+from typing import Callable
 
 from django.contrib.auth.models import Group, User
-from core.models import Student, Submission, SubmissionType, Feedback
 
+import util.convert
+import util.processing
+from core.models import Feedback, Student, Submission, SubmissionType, Test
+from util.messages import *
+from util.processing import EmptyTest
 
 STUDENTS  = Group.objects.get(name='Students')
 TUTORS    = Group.objects.get(name='Tutors')
 REVIEWERS = Group.objects.get(name='Reviewers')
 
+HISTFILE  = '.importer_history'
+RECORDS   = '.importer'
+
+YES = 'Y/n'
+NO  = 'y/N'
+
+valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
+
+
+class chdir_context(object):
+    """
+    Step into a directory temporarily.
+    """
+
+    def __init__(self, path):
+        self.old_dir = os.getcwd()
+        self.new_dir = path
+
+    def __enter__(self):
+        info(f'Changing to {self.new_dir}')
+        os.chdir(self.new_dir)
+
+    def __exit__(self, *args):
+        info(f'Returning to {self.new_dir}')
+        os.chdir(self.old_dir)
+
 
 def get_xkcd_password(k=2):
     with open('/usr/share/dict/words') as words:
@@ -20,97 +54,274 @@ def get_xkcd_password(k=2):
     return ''.join(secrets.choice(choose_from) for _ in range(k))
 
 
-def i(prompt, default=''):
-    if default:
-        return input(f'[Q] {prompt} ({default}): ') or default
-    return input(f'[Q] {prompt}: ')
+def i(prompt: str, default: str='', is_path: bool=False, is_file: bool=False):
+    if default is YES or default is NO:
+        answer = valid[input(f'[Q] {prompt} ({default}): ').lower() or ('y' if YES == default else 'n')]
+    elif default:
+        answer = input(f'[Q] {prompt} ({default}): ') or default
+    else:
+        answer = input(f'[Q] {prompt}: ')
 
+    if (is_path or is_file) and not os.path.exists(answer) or is_file and not os.path.isfile(answer):
+        warn(f'The {"path" if is_path else "file"} does not exist. Please try again.')
+        return i(prompt, default, is_path, is_file)
 
-def make_submission_type_objects(csvfilename, lsg_dir, desc_dir):
-    with open(csvfilename, encoding='utf-8') as tfile:
-        reader = csv.reader(tfile)
+    return answer
 
-        for row in reader:
-            tid, name, score = row
-            with open(os.path.join(lsg_dir, tid + '-lsg.c'), encoding='utf-8') as lsg, open(os.path.join(desc_dir, tid + '.html'), encoding='utf-8') as desc:
-                yield {
-                    'name' : name,
-                    'description' : desc.read(),
-                    'solution' : lsg.read(),
-                    'score' : int(score),
-                }
 
+def add_user(username: str, group: str, **kwargs):
+    """ This is a specific wrapper for the django update_or_create method of
+    objects.
+        * A new user is created and password and group are set accordingly
+        * If the user was there before password is NOT change but group is. A
+          user must only have one group.
 
-def add_user(username, group):
-    user = User(username=username.strip())
+    Args:
+        username (str): the username is the login name
+        group (str): the (only) group the user should belong to
+        **kwargs: more attributes for user creation
 
-    password = get_xkcd_password()
-    user.set_password(password)
-    user.save()
+    Returns:
+        TYPE: Description
+    """
+    user, created = User.objects.update_or_create(
+        username=username.strip(),
+        defaults=kwargs
+    )
 
+    if created:
+        password = get_xkcd_password()
+        user.set_password(password)
+        user.save()
+
+    user.groups.clear() # remove all other groups
     group.user_set.add(user)
 
     return user
 
-def add_user_list(lst, group):
+def add_student(username, name, matrikel_no, email, **kwargs):
+
+    user        = add_user(username, STUDENTS, email=email)
+    student, _  = Student.objects.update_or_create(
+        name=name,
+        defaults={'matrikel_no' : matrikel_no, 'user' : user}
+    )
+
+    return student
+
+def add_submission(student_obj, code, tests, type):
+
+    submission_type = SubmissionType.objects.get(name=type)
+
+    submission_obj, _ = Submission.objects.update_or_create(
+        type=submission_type,
+        student=student_obj,
+        defaults={'text' : code}
+    )
+
+    for name, test_data in tests.items():
+        test_obj, created = Test.objects.update_or_create(
+            name=test_data['name'],
+            submission=submission_obj,
+            defaults={
+                'label': test_data['label'],
+                'annotation': test_data['annotation'],
+            }
+        )
+
+        if test_obj.name == EmptyTest.__name__ and test_obj.label == EmptyTest.label_failure:
+            auto_correct, _ = User.objects.update_or_create(username='auto_correct', defaults={'is_active': False})
+            Feedback.objects.update_or_create(
+                of_submission=submission_obj,
+                defaults={
+                    'of_tutor'  : auto_correct,
+                    'score'     : 0,
+                    'text'      : test_obj.label,
+                    'origin'    : Feedback.WAS_EMPTY,
+                    'status'    : Feedback.ACCEPTED,
+                }
+            )
+
+def add_user_list(lst, group, **kwargs):
     for name in lst:
-        add_user(name, group)
+        add_user(name, group, **kwargs)
+
+
+def call_loader(func: Callable) -> None:
+    """ This function handles if a function will be executed at all. Currently
+    it just checks in the RECORDS file for the name of the function. If it is
+    present the function will not be executed
+
+    Args:
+        func (Callable): the loader specified below
+    """
+    if os.path.exists(RECORDS):
+        with open(RECORDS, 'r') as records_f:
+            done = [line.strip() for line in records_f]
+
+        if func.__name__ in done and not \
+                i(f'{func.__name__} has already been processed once. Proceed anyway?', NO):
+            return
+
+    func() # This executes the specified loader
+
+    with open(RECORDS, 'a') as records_f:
+        records_f.write(func.__name__)
+        records_f.write('\n')
+
+
+def do_convert_xls():
+    info('[Executing]', sys._getframe().f_code.co_name)
+
+    ans = i('''Do you want to convert the ILIAS .xls output to .json?''', YES)
+    if not ans:
+        return
 
-print('''Welcome to the Grady importer!
+    infile  = i('Please provide the path to the .xls file', is_file=True)
+    outfile = i('Where should the output go?', 'submissons.json')
 
-This script aims at making to setup of the database as easy as
-possible. It at the same time serves as a documentation on how data is imported
-in Grady. Let\'s dive right in.\n''')
+    json_dict = util.convert.converter(infile)
+    util.convert.write_to_file(json_dict, outfile)
 
 
-def main_loop():
-    path = i('location of data files', '.')
-    os.chdir(path)
+def do_load_submission_types():
+    info('[Executing] ', sys._getframe().f_code.co_name)
 
-    print('''Please provide a .csv file with
+    print('''For the following import you need three files:
 
-        id, name, score
+        1) A .csv file where the columns are: id, name, score
+        2) A path to a directory where I can find sample solutions named
+            <id>-lsg.c
+        3) A path to a directory where I can find HTML files with an accurate
+            description of the task. File name pattern has to be: <id>.html
+    ''')
 
-    The id should correspond to the names of description files if you want to
-    include them now.''')
+    path = i('Where are your files located?', '.', is_path=True)
 
-    submission_types_csv    = i('CSV file', 'submission_types.csv')
-    lsg_dir                 = i('solution dir prefix', 'code-lsg')
-    desc_dir                = i('html descriptions dir prefix', 'html')
+    with chdir_context(path):
+        submission_types_csv    = i('CSV file',         'submission_types.csv')
+        lsg_dir                 = i('solution dir',     'code/code-lsg')
+        desc_dir                = i('descriptions dir', 'html')
 
-    submission_types = [d for d in make_submission_type_objects(
-        submission_types_csv, lsg_dir, desc_dir)]
+        with open(submission_types_csv, encoding='utf-8') as tfile:
+            csv_rows = [row for row in csv.reader(tfile)]
+
+        for row in csv_rows:
+            tid, name, score = (col.strip() for col in row)
+            with \
+                    open(os.path.join(lsg_dir, tid + '-lsg.c'), encoding='utf-8') as lsg,\
+                    open(os.path.join(desc_dir, tid + '.html'), encoding='utf-8') as desc:
+                data={
+                    'name'          : name,
+                    'description'   : desc.read(),
+                    'solution'      : lsg.read(),
+                    'full_score'    : int(score),
+                }
+            _, created = SubmissionType.objects.update_or_create(
+                name=name,
+                defaults=data
+            )
+            info(f'{"Created" if created else "Updated"} {name}')
+
+
+def do_preprocess_submissions():
+    info('[Executing] ', sys._getframe().f_code.co_name)
+
+    print('''
+    Preprocessing might take some time depending on the amount of data
+    and the complexity of the programs and the corresponding unit tests. You can
+    specify what test you want to run.
+
+    Tests do depend on each other. Therefore specifying a test will also
+    result in running all its dependencies\n''')
+
+    test_enum = dict(enumerate(util.processing.Test.available_tests()))
+
+    print('The following test are available:\n')
+    print('\t[q] Do nothing')
+    for j, test in test_enum.items():
+        print(f'\t[{j}] {test}')
+    print()
+
+    answer = i('Which tests do you want to run?')
+
+    if not answer or answer == 'q':
+        return
+
+    raise NotImplementedError
 
-    print('Now please provide files where you list usernames for reviewer and tutors')
-    tutors    = i('list of tutors', 'tutors')
-    reviewers = i('list of reviewer', 'reviewers')
 
+def do_load_submissions():
+    info('[Executing] ', sys._getframe().f_code.co_name)
+
+    file = i('Get me the file with all the submissions', 'submissions.json')
+    with open(file) as submission_file:
+        submissions = json.JSONDecoder().decode(submission_file.read())
+
+    for username, data in submissions.items():
+        student_obj = add_student(username, **data)
+
+        for submission_obj in data['submissions']:
+            add_submission(student_obj, **submission_obj)
+
+
+def do_load_tutors():
+    info('[Executing] ', sys._getframe().f_code.co_name)
+    print('Please import tutor users by providing one name per line')
+    tutors    = i('List of tutors', 'tutors', is_file=True)
 
     with open(tutors) as tutors_f:
         add_user_list(tutors_f, TUTORS)
 
+
+def do_load_reviewer():
+    info('[Executing] ', sys._getframe().f_code.co_name)
+    print('Please import reviewer users by providing one name per line')
+    reviewers = i('List of reviewers', 'reviewers', is_file=True)
+
     with open(reviewers) as reviewers_f:
-        add_user_list(reviewers_f, REVIEWERS)
+        add_user_list(reviewers_f, REVIEWERS, is_staff=True)
+
+
+call_order = collections.OrderedDict({
+    0 : do_convert_xls,
+    1 : do_load_submission_types,
+    2 : do_preprocess_submissions,
+    3 : do_load_submissions,
+    4 : do_load_tutors,
+    5 : do_load_reviewer
+})
+
 
 def start():
     if User.objects.filter(is_superuser=False) :
-        print('Warning database is not clean. Aborting')
-        exit(0)
-
-    while True:
-        try:
-            main_loop()
-        except FileNotFoundError as err:
-            print(err)
-        except (EOFError, KeyboardInterrupt) as err:
-            print()
-            exit(0)
-
-
-# Note to myself: split the module in single tests that perform some action
-# on the database. save success in a .importer history along with readline
-# completition.
-#
-# on importer load it can be determined which imports have already been done
-# and which are still due. it also saves us from repeating all over again
-# if a some file not found error occured.
+        warn('Warning database is not clean. Aborting.')
+
+    if os.path.exists(HISTFILE):
+        readline.read_history_file(HISTFILE)
+
+    print('''Welcome to the Grady importer!
+
+    This script aims at making the setup of the database as easy as possible. It
+    at the same time serves as a documentation on how data is imported in Grady.
+    Let\'s dive right in.\n''')
+
+    try:
+        print('The following importers are available:\n')
+        for fid, func in call_order.items():
+            print(f'\t[{fid}] {func.__name__}')
+
+        print()
+        fid = i('Press enter for all in given order or choose a number')
+        if fid:
+            call_loader(call_order[int(fid)])
+        else:
+            for func in call_order.values():
+                call_loader(func)
+    except (EOFError, KeyboardInterrupt) as err:
+        return
+    except Exception as err:
+        import traceback
+        traceback.print_exc()
+    finally:
+        readline.write_history_file(HISTFILE)
diff --git a/util/messages.py b/util/messages.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f236313b598099954497a5e3a78e93a5d61ae12
--- /dev/null
+++ b/util/messages.py
@@ -0,0 +1,21 @@
+import sys
+
+
+def warn(*message):
+    print('[W]', *message)
+
+def debug(*message):
+    print('[DEBUG]', *message)
+
+def info(*message):
+    print('[I]', *message)
+
+def error(*message):
+    print('[E]', *message)
+
+def abort(*message):
+    print('[FATAL]', *message)
+    sys.exit('exiting...')
+
+def exit(message='exiting...'):
+    sys.exit(*message)
diff --git a/util/populatedb.py b/util/populatedb.py
index 3b237129cc704c8b5ec720d87260d0f7561b32e5..f5a712b0d12d68a978a2e5b7765b616311d3f8c5 100644
--- a/util/populatedb.py
+++ b/util/populatedb.py
@@ -1,12 +1,17 @@
-import os
+import argparse
 import csv
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'grady.settings')
+import json
+import os
+from collections import namedtuple
 
 import django
 import xkcdpass.xkcd_password as xp
-import json
-import argparse
-from collections import namedtuple
+from django.contrib.auth.models import Group, User
+
+from core.models import Feedback, Student, Submission, SubmissionType
+
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'grady.settings')
+
 django.setup()
 
 INFO = 1
@@ -18,9 +23,7 @@ wordfile = xp.locate_wordfile()
 wordlist = xp.generate_wordlist(wordfile=wordfile, min_length=5, max_length=8)
 
 
-from django.contrib.auth.models import Group, User
 
-from core.models import Student, Submission, SubmissionType, Feedback
 
 if INFO:
     info = print
diff --git a/util/processing.py b/util/processing.py
index d67d3c4a70fde90b6c0e63493612bb7d049a1005..0a4445f5f9ea9ec567a37f2cd6bf819d658ac1fa 100644
--- a/util/processing.py
+++ b/util/processing.py
@@ -1,19 +1,22 @@
 import abc
 import hashlib
-import tempfile
+import json
 import os
 import re
-import json
 import shutil
 import subprocess
+import tempfile
 
-import testcases
+try:
+    import testcases
+except ModuleNotFoundError:
+    from util import testcases
 
 DESCFILE    = '../data/descfile.txt'
-BINARIES    = '../data/klausur20170627/bin'
-OBJECTS     = '../data/klausur20170627/objects'
+BINARIES    = '../data/klausur_zweittermin/bin'
+OBJECTS     = '../data/klausur_zweittermin/objects'
 SUBMISSIONS = '../data/ok.json'
-HEADER      = '../data/klausur20170627/code-testing'
+HEADER      = '../data/klausur_zweittermin/code-testing'
 
 
 def run_cmd(cmd, stdin=None, check=False):
@@ -29,20 +32,6 @@ def run_cmd(cmd, stdin=None, check=False):
     )
 
 
-def testcase(i, args, stdout):
-    try:
-        ret = run_cmd("./code %s" % args, check=True)
-        assert ret.stdout == stdout
-    except AssertionError:
-        return False, f"Case #{i}: [ASSERT FAILED] ./program {args} WAS '{ret.stdout}' SHOULD '{stdout}'"
-    except subprocess.CalledProcessError as err:
-        return False, f"Case #{i}: [FAILED] ./program {args} WITH ERROR '{err.stderr}'"
-    except subprocess.TimeoutExpired:
-        return False, f"Case #{i}: [TIMEOUT] ./program {args}"
-    else:
-        return True,  f"Case #{i}: [SUCCESS] ./program {args}"
-
-
 def all_subclasses(cls):
     return cls.__subclasses__() \
         + [g for s in cls.__subclasses__() for g in all_subclasses(s)]
@@ -157,16 +146,30 @@ class UnitTestTest(Test):
     label_success   = 'UNITTEST_SUCCSESSFUL'
     label_failure   = 'UNITTEST_FAILED'
 
+    @staticmethod
+    def testcase(i, args, stdout):
+        try:
+            ret = run_cmd("./code %s" % args, check=True)
+            assert ret.stdout == stdout
+        except AssertionError:
+            return False, f"Case #{i}: [ASSERT FAILED] ./program {args} WAS '{ret.stdout}' SHOULD '{stdout}'"
+        except subprocess.CalledProcessError as err:
+            return False, f"Case #{i}: [FAILED] ./program {args} WITH ERROR '{err.stderr}'"
+        except subprocess.TimeoutExpired:
+            return False, f"Case #{i}: [TIMEOUT] ./program {args}"
+        else:
+            return True,  f"Case #{i}: [SUCCESS] ./program {args}"
+
     def run_test(self, submission_obj):
 
         task = testcases_dict[submission_obj['type']]
-        results, messages = zip(*list(testcase(i, case, result)
+        results, messages = zip(*list(self.testcase(i, case, result)
                                       for i, (case, result) in enumerate(zip(task['cases'], task['results']))))
 
         return all(results), '\n'.join(messages)
 
 
-def processing():
+def processing(highest_test):
 
     with open(SUBMISSIONS) as submission_file:
         submissions = json.JSONDecoder().decode(submission_file.read())
@@ -180,7 +183,7 @@ def processing():
 
     for username, data in submissions.items():
         for submission_obj in data['submissions']:
-            UnitTestTest(submission_obj)
+            highest_test(submission_obj)
             run_cmd('rm code*')
 
     shutil.rmtree(path)
@@ -189,4 +192,4 @@ def processing():
 
 if __name__ == '__main__':
     testcases_dict = testcases.evaluated_testcases(DESCFILE)
-    print(json.dumps(processing(), sort_keys=True, indent=4))
+    print(json.dumps(processing(LinkTest), sort_keys=True, indent=4))
diff --git a/util/testcases.py b/util/testcases.py
index dab44d1733b3488e5192f63c874793893e153530..3e9892fdf4c07a4babf315e1a9437fac7e7c5899 100644
--- a/util/testcases.py
+++ b/util/testcases.py
@@ -1,10 +1,12 @@
-import re
-import os
 import json
+import os
 import random
+import re
 from string import ascii_letters, digits
-
-import processing
+try:
+    import processing
+except ModuleNotFoundError:
+    from util import processing
 
 types = ('integer', 'unsigned_integer', 'character', 'string')
 list_sep = '...'
@@ -36,7 +38,7 @@ def string(lenght=31):
 
 def type_list(_type):
     def generic_list():
-        return ' '.join(str(call_function(_type)) for i in range(unsigned_integer(10)))
+        return ' '.join(str(call_function(_type)) for i in range(unsigned_integer(5) * 2))
     return generic_list