diff options
author | ankitjavalkar | 2016-03-16 10:57:07 +0530 |
---|---|---|
committer | ankitjavalkar | 2016-05-05 19:00:33 +0530 |
commit | 195aead9b0fab0d8cdb86a9fc884ac3edca5db84 (patch) | |
tree | 107355d17a248504726aed93bf0e4cb490f190bf | |
parent | 1e993bee18028c59d809f49d853b60e41326991c (diff) | |
download | online_test-195aead9b0fab0d8cdb86a9fc884ac3edca5db84.tar.gz online_test-195aead9b0fab0d8cdb86a9fc884ac3edca5db84.tar.bz2 online_test-195aead9b0fab0d8cdb86a9fc884ac3edca5db84.zip |
- Connect test case type models to backend code server
- Support for Stdout test case and Standard assertion test case
- Add MCQ Test case and support for validations
- Remove tester dir
-rw-r--r-- | yaksh/admin.py | 5 | ||||
-rwxr-xr-x | yaksh/code_server.py | 2 | ||||
-rw-r--r-- | yaksh/forms.py | 4 | ||||
-rw-r--r-- | yaksh/models.py | 115 | ||||
-rw-r--r-- | yaksh/python_code_evaluator.py | 17 | ||||
-rw-r--r-- | yaksh/python_stdout_evaluator.py | 52 | ||||
-rw-r--r-- | yaksh/settings.py | 9 | ||||
-rw-r--r-- | yaksh/templates/yaksh/add_question.html | 2 | ||||
-rw-r--r-- | yaksh/tester/python/verifier.py | 121 | ||||
-rw-r--r-- | yaksh/views.py | 8 |
10 files changed, 127 insertions, 208 deletions
diff --git a/yaksh/admin.py b/yaksh/admin.py index 71dfb3b..d223cd4 100644 --- a/yaksh/admin.py +++ b/yaksh/admin.py @@ -1,6 +1,9 @@ -from yaksh.models import Question, Quiz, TestCase +from yaksh.models import Question, Quiz, TestCase,\ + StandardTestCase, StdoutBasedTestCase from django.contrib import admin admin.site.register(Question) admin.site.register(TestCase) +admin.site.register(StandardTestCase) +admin.site.register(StdoutBasedTestCase) admin.site.register(Quiz) diff --git a/yaksh/code_server.py b/yaksh/code_server.py index 7951ac8..66c4271 100755 --- a/yaksh/code_server.py +++ b/yaksh/code_server.py @@ -64,7 +64,7 @@ class CodeServer(object): """ code_evaluator = create_evaluator_instance(language, test_case_type, json_data, in_dir) - data = unpack_json(json_data) #@@@ def should be here + data = unpack_json(json_data) result = code_evaluator.evaluate(**data) # Put us back into the server pool queue since we are free now. diff --git a/yaksh/forms.py b/yaksh/forms.py index 5959dc4..94498a1 100644 --- a/yaksh/forms.py +++ b/yaksh/forms.py @@ -29,9 +29,9 @@ question_types = ( ) test_case_types = ( - ("assert_based", "Assertion Based Testcase"), + ("standardtestcase", "Standard Testcase"), # ("argument_based", "Multiple Correct Choices"), - ("stdout_based", "Stdout Based Testcase"), + ("stdoutbasedtestcase", "Stdout Based Testcase"), ) UNAME_CHARS = letters + "._" + digits diff --git a/yaksh/models.py b/yaksh/models.py index 6fa96bf..331446f 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -5,6 +5,7 @@ from itertools import islice, cycle from collections import Counter from django.db import models from django.contrib.auth.models import User +from django.forms.models import model_to_dict from taggit.managers import TaggableManager @@ -31,9 +32,9 @@ enrollment_methods = ( ) test_case_types = ( - ("assert_based", "Assertion Based Testcase"), - # ("argument_based", "Multiple Correct Choices"), - ("stdout_based", "Stdout Based Testcase"), + ("standardtestcase", "Standard Testcase"), + ("stdoutbasedtestcase", "Stdout Based Testcase"), + # ("mcqtestcase", "MCQ Testcase"), ) attempts = [(i, i) for i in range(1, 6)] @@ -152,14 +153,14 @@ class Question(models.Model): points = models.FloatField(default=1.0) # Answer for MCQs. - test = models.TextField(blank=True) + # test = models.TextField(blank=True) # Test cases file paths (comma seperated for reference code path and test case code path) # Applicable for CPP, C, Java and Scilab - ref_code_path = models.TextField(blank=True) + # ref_code_path = models.TextField(blank=True) - # Any multiple choice options. Place one option per line. - options = models.TextField(blank=True) + # # Any multiple choice options. Place one option per line. + # options = models.TextField(blank=True) # The language for question. language = models.CharField(max_length=24, @@ -176,7 +177,7 @@ class Question(models.Model): active = models.BooleanField(default=True) # Snippet of code provided to the user. - snippet = models.CharField(max_length=256, blank=True) + # snippet = models.CharField(max_length=256) # Tags for the Question. tags = TaggableManager(blank=True) @@ -184,41 +185,57 @@ class Question(models.Model): # user for particular question user = models.ForeignKey(User, related_name="user") - def consolidate_answer_data(self, user_answer): - test_case_data_dict = [] - question_info_dict = {} + # def consolidate_answer_data(self, test_cases, user_answer): + # def consolidate_answer_data(self, user_answer): + # test_case_data_dict = [] + # question_info_dict = {} + + # for test_case in test_cases: + # kw_args_dict = {} + # pos_args_list = [] + + # test_case_data = {} + # test_case_data['test_id'] = test_case.id + # test_case_data['func_name'] = test_case.func_name + # test_case_data['expected_answer'] = test_case.expected_answer + + # if test_case.kw_args: + # for args in test_case.kw_args.split(","): + # arg_name, arg_value = args.split("=") + # kw_args_dict[arg_name.strip()] = arg_value.strip() + + # if test_case.pos_args: + # for args in test_case.pos_args.split(","): + # pos_args_list.append(args.strip()) - # for test_case in test_cases: - # kw_args_dict = {} - # pos_args_list = [] + # test_case_data['kw_args'] = kw_args_dict + # test_case_data['pos_args'] = pos_args_list + # test_case_data_dict.append(test_case_data) - # test_case_data = {} - # test_case_data['test_id'] = test_case.id - # test_case_data['func_name'] = test_case.func_name - # test_case_data['expected_answer'] = test_case.expected_answer + # question_info_dict['language'] = self.language + # question_info_dict['id'] = self.id + # question_info_dict['user_answer'] = user_answer + # question_info_dict['test_parameter'] = test_case_data_dict + # question_info_dict['ref_code_path'] = self.ref_code_path + # question_info_dict['test'] = self.test + # question_info_dict['test_case_type'] = self.test_case_type - # if test_case.kw_args: - # for args in test_case.kw_args.split(","): - # arg_name, arg_value = args.split("=") - # kw_args_dict[arg_name.strip()] = arg_value.strip() + # return json.dumps(question_info_dict) - # if test_case.pos_args: - # for args in test_case.pos_args.split(","): - # pos_args_list.append(args.strip()) + def consolidate_answer_data(self, user_answer): + question_data = {} + test_case_data = [] - # test_case_data['kw_args'] = kw_args_dict - # test_case_data['pos_args'] = pos_args_list - # test_case_data_dict.append(test_case_data) + test_cases = self.testcase_set.all() + for test in test_cases: + test_instance = test.get_child_instance(self.test_case_type) + test_case_field_value = test_instance.get_field_value() + test_case_data.append(test_case_field_value) - # question_info_dict['language'] = self.language - # question_info_dict['id'] = self.id - question_info_dict['user_answer'] = user_answer - # question_info_dict['test_parameter'] = test_case_data_dict - question_info_dict['ref_code_path'] = self.ref_code_path - question_info_dict['test'] = self.test - # question_info_dict['test_case_type'] = self.test_case_type + question_data['test_case_data'] = test_case_data + question_data['user_answer'] = user_answer - return json.dumps(question_info_dict) + return json.dumps(question_data) def dump_into_json(self, question_ids, user): questions = Question.objects.filter(id__in = question_ids, user_id = user.id) @@ -755,14 +772,24 @@ class AssignmentUpload(models.Model): class TestCase(models.Model): question = models.ForeignKey(Question, blank=True, null = True) - # Test case function name - func_name = models.CharField(blank=True, null = True, max_length=200) + def get_child_instance(self, type): + return getattr(self, type) + +class StandardTestCase(TestCase): + test_case = models.TextField(blank=True) + + def get_field_value(self): + return self.test_case - # Test case Keyword arguments in dict form - kw_args = models.TextField(blank=True, null = True) +class StdoutBasedTestCase(TestCase): + output = models.TextField(blank=True) - # Test case Positional arguments in list form - pos_args = models.TextField(blank=True, null = True) + def get_field_value(self): + return self.output + +class McqTestCase(TestCase): + options = models.TextField() + correct = models.BooleanField(default=False) - # Test case Expected answer in list form - expected_answer = models.TextField(blank=True, null = True) + def validate(self, user_answer): + pass diff --git a/yaksh/python_code_evaluator.py b/yaksh/python_code_evaluator.py index 5722b2d..a131a0e 100644 --- a/yaksh/python_code_evaluator.py +++ b/yaksh/python_code_evaluator.py @@ -12,17 +12,18 @@ from code_evaluator import CodeEvaluator, TimeoutException class PythonCodeEvaluator(CodeEvaluator): """Tests the Python code obtained from Code Server""" - def check_code(self, test, user_answer, ref_code_path): + # def check_code(self, test, user_answer, ref_code_path): + def check_code(self, user_answer, test_cases): success = False try: tb = None - test_code = test submitted = compile(user_answer, '<string>', mode='exec') g = {} exec submitted in g - _tests = compile(test_code, '<string>', mode='exec') - exec _tests in g + for test_code in test_cases: + _tests = compile(test_code, '<string>', mode='exec') + exec _tests in g except AssertionError: type, value, tb = sys.exc_info() info = traceback.extract_tb(tb) @@ -40,6 +41,14 @@ class PythonCodeEvaluator(CodeEvaluator): del tb return success, err + # def unpack_test_case_data(self, test_case_data): + # test_cases = [] + # for t in test_case_data: + # test_case = t.get('test_case') + # test_cases.append(test_case) + + # return test_cases + # def check_code(self): # success = False diff --git a/yaksh/python_stdout_evaluator.py b/yaksh/python_stdout_evaluator.py index 89d3424..28c3372 100644 --- a/yaksh/python_stdout_evaluator.py +++ b/yaksh/python_stdout_evaluator.py @@ -12,36 +12,42 @@ from code_evaluator import CodeEvaluator @contextmanager def redirect_stdout(): - from StringIO import StringIO - new_target = StringIO() + from StringIO import StringIO + new_target = StringIO() - old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout - try: - yield new_target # run some code with the replaced stdout - finally: - sys.stdout = old_target # restore to the previous value + old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout + try: + yield new_target # run some code with the replaced stdout + finally: + sys.stdout = old_target # restore to the previous value class PythonStdoutEvaluator(CodeEvaluator): """Tests the Python code obtained from Code Server""" - def check_code(self, test, user_answer, ref_code_path): + def check_code(self, user_answer, test_cases): success = False - try: - tb = None - test_code = test - submitted = compile(user_answer, '<string>', mode='exec') - with redirect_stdout() as output_buffer: - g = {} - exec submitted in g - raw_output_value = output_buffer.getvalue() - output_value = raw_output_value.encode('string_escape').strip() - if output_value == str(test_code): - success = True - err = 'Correct answer' - else: - raise ValueError("Incorrect Answer") + tb = None + expected_output = test_cases[0] + submitted = compile(user_answer, '<string>', mode='exec') + with redirect_stdout() as output_buffer: + g = {} + exec submitted in g + raw_output_value = output_buffer.getvalue() + output_value = raw_output_value.encode('string_escape').strip() + if output_value == str(test_code): + success = True + err = 'Correct answer' + else: + success = False + err = "Incorrect Answer" del tb - return success, err
\ No newline at end of file + return success, err + + # def unpack_test_case_data(self, test_case_data): + # for t in test_case_data: + # test_case = t.get('output') + + # return test_case diff --git a/yaksh/settings.py b/yaksh/settings.py index f8b240d..a4e78db 100644 --- a/yaksh/settings.py +++ b/yaksh/settings.py @@ -20,13 +20,12 @@ SERVER_TIMEOUT = 2 URL_ROOT = '' code_evaluators = { - "python": {"assert_based": "python_code_evaluator.PythonCodeEvaluator", - "argument_based": "python_argument_based_evaluator.PythonCodeEvaluator", - "stdout_based": "python_stdout_evaluator.PythonStdoutEvaluator" - }, + "python": {"standardtestcase": "python_code_evaluator.PythonCodeEvaluator", + "stdoutbasedtestcase": "python_stdout_evaluator.PythonStdoutEvaluator" + }, "c": "cpp_code_evaluator.CppCodeEvaluator", "cpp": "cpp_code_evaluator.CppCodeEvaluator", "java": "java_code_evaluator.JavaCodeEvaluator", "bash": "bash_code_evaluator.BashCodeEvaluator", "scilab": "scilab_code_evaluator.ScilabCodeEvaluator", - } + } diff --git a/yaksh/templates/yaksh/add_question.html b/yaksh/templates/yaksh/add_question.html index 88d8f03..0d74663 100644 --- a/yaksh/templates/yaksh/add_question.html +++ b/yaksh/templates/yaksh/add_question.html @@ -28,7 +28,7 @@ <tr><td>Snippet: <td>{{ form.snippet }}{{ form.snippet.errors }}</td></tD></td></tr> <tr><td>Tags: <td>{{ form.tags }} <tr><td id='label_option'>Options: <td>{{ form.options }} {{form.options.errors}} - <tr><td id='label_solution'>Test: <td>{{ form.test }} {{form.test.errors}} + <!-- <tr><td id='label_solution'>Test: <td>{{ form.test }} {{form.test.errors}} --> <tr><td id='label_ref_code_path'>Reference Code Path: <td>{{ form.ref_code_path }} {{form.ref_code_path.errors}} <tr><td> test_case_type: <td> {{ form.test_case_type }}{{ form.test_case_type.errors }} diff --git a/yaksh/tester/python/verifier.py b/yaksh/tester/python/verifier.py deleted file mode 100644 index 102dcb9..0000000 --- a/yaksh/tester/python/verifier.py +++ /dev/null @@ -1,121 +0,0 @@ -import sys -from .utils import import_by_path -from contextlib import contextmanager - - -@contextmanager -def redirect_stdout(): - from StringIO import StringIO - new_target = StringIO() - - old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout - try: - yield new_target # run some code with the replaced stdout - finally: - sys.stdout = old_target # restore to the previous value - -# def redirect_stdout(): -# # import sys -# from StringIO import StringIO -# oldout,olderr = sys.stdout, sys.stderr -# try: -# out = StringIO() -# err = StringIO() -# # sys.stdout,sys.stderr = out, err -# yield out, err -# finally: -# sys.stdout,sys.stderr = oldout, olderr -# out = out.getvalue() -# err = err.getvalue() - -TESTER_BACKEND = { - "python": "PythonPrintTesterBackend" #@@@rename to test-case-creator, this file should be backend.py -} - -class TesterException(Exception): - """ Parental class for all tester exceptions """ - pass - -class UnknownBackendException(TesterException): - """ Exception thrown if tester backend is not recognized. """ - pass - - -def detect_backend(language): - """ - Detect the right backend for a test case. - """ - backend_name = TESTER_BACKEND.get(language) - # backend = import_by_path(backend_name) - backend = PythonTesterBackend() #@@@ - return backend - -class PythonPrintTesterBackend(object): - def test_code(self, submitted, reference_output): - """ - create a test command - """ - with redirect_stdout() as output_buffer: - g = {} - exec submitted in g - - # return_buffer = out.encode('string_escape') - raw_output_value = output_buffer.getvalue() - output_value = raw_output_value.encode('string_escape').strip() - if output_value == str(reference_output): - return True - else: - raise ValueError("Incorrect Answer", output_value, reference_output) - - -class PythonTesterBackend(object): - # def __init__(self, test_case): - # self._test_case = test_case - def create(self): #@@@ test() - """ - create a test command - """ - test_code = "assert {0}({1}) == {2}".format(self.test_case_parameters['function_name'], self.test_case_parameters['args'], - self.test_case_parameters['expected_answer']) - return test_code - - def pack(self, test_case): - kw_args_dict = {} - pos_args_list = [] - test_case_data = {} - test_case_data['test_id'] = test_case.id - test_case_data['func_name'] = test_case.func_name - test_case_data['expected_answer'] = test_case.expected_answer - - if test_case.kw_args: - for args in test_case.kw_args.split(","): - arg_name, arg_value = args.split("=") - kw_args_dict[arg_name.strip()] = arg_value.strip() - - if test_case.pos_args: - for args in test_case.pos_args.split(","): - pos_args_list.append(args.strip()) - - test_case_data['kw_args'] = kw_args_dict - test_case_data['pos_args'] = pos_args_list - - return test_case_data - - def unpack(self, test_case_data): - pos_args = ", ".join(str(i) for i in test_case_data.get('pos_args')) \ - if test_case_data.get('pos_args') else "" - kw_args = ", ".join(str(k+"="+a) for k, a - in test_case_data.get('kw_args').iteritems()) \ - if test_case_data.get('kw_args') else "" - args = pos_args + ", " + kw_args if pos_args and kw_args \ - else pos_args or kw_args - function_name = test_case_data.get('func_name') - expected_answer = test_case_data.get('expected_answer') - - self.test_case_parameters = { - 'args': args, - 'function_name': function_name, - 'expected_answer': expected_answer - } - - return self.test_case_parameters
\ No newline at end of file diff --git a/yaksh/views.py b/yaksh/views.py index 520f396..a8fb66a 100644 --- a/yaksh/views.py +++ b/yaksh/views.py @@ -188,7 +188,6 @@ def add_question(request, question_id=None): d = Question.objects.get(id=question_id) if 'save_question' in request.POST: qtn = form.save(commit=False) - test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=qtn) form.save() question = Question.objects.get(id=question_id) return my_redirect("/exam/manage/questions") @@ -213,7 +212,6 @@ def add_question(request, question_id=None): else: d = Question.objects.get(id=question_id) form = QuestionForm(instance=d) - return my_render_to_response('yaksh/add_question.html', {'form': form}, # 'formset': test_case_formset}, @@ -455,8 +453,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None): # If we were not skipped, we were asked to check. For any non-mcq # questions, we obtain the results via XML-RPC with the code executed # safely in a separate process (the code_server.py) running as nobody. - test_cases = TestCase.objects.filter(question=question) - json_data = question.consolidate_answer_data(test_cases, user_answer) \ + json_data = question.consolidate_answer_data(user_answer) \ if question.type == 'code' else None correct, result = validate_answer(user, user_answer, question, json_data) if correct: @@ -505,12 +502,10 @@ def validate_answer(user, user_answer, question, json_data=None): if question.type == 'mcq': if user_answer.strip() == question.test.strip(): correct = True - message = 'Correct answer' elif question.type == 'mcc': answers = set(question.test.splitlines()) if set(user_answer) == answers: correct = True - message = 'Correct answer' elif question.type == 'code': user_dir = get_user_dir(user) json_result = code_server.run_code(question.language, question.test_case_type, json_data, user_dir) @@ -848,6 +843,7 @@ def show_all_questions(request): return my_render_to_response('yaksh/showquestions.html', context, context_instance=ci) + @login_required def user_data(request, user_id, questionpaper_id=None): """Render user data.""" |