summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMahesh Gudi2016-12-29 18:00:28 +0530
committermaheshgudi2017-01-09 18:10:30 +0530
commit65f8f0f4dd867c1d1647712560e3aef73b8c921f (patch)
tree709bdb459847ed64cf59bb46d21018fba9f13f96
parent7fb288ffd992c912a8e2288aa97d6c6ceeedf1a1 (diff)
downloadonline_test-65f8f0f4dd867c1d1647712560e3aef73b8c921f.tar.gz
online_test-65f8f0f4dd867c1d1647712560e3aef73b8c921f.tar.bz2
online_test-65f8f0f4dd867c1d1647712560e3aef73b8c921f.zip
Python hook evaluator
-rw-r--r--yaksh/grader.py3
-rw-r--r--yaksh/hook_evaluator.py92
-rw-r--r--yaksh/models.py7
-rw-r--r--yaksh/settings.py20
4 files changed, 114 insertions, 8 deletions
diff --git a/yaksh/grader.py b/yaksh/grader.py
index 086abb7..1d4e61e 100644
--- a/yaksh/grader.py
+++ b/yaksh/grader.py
@@ -120,7 +120,6 @@ class Grader(object):
for test_case in test_case_data:
test_case_instance = create_evaluator_instance(metadata, test_case)
test_case_instances.append(test_case_instance)
-
return test_case_instances
@@ -157,7 +156,7 @@ class Grader(object):
except TimeoutException:
error.append(self.timeout_msg)
except OSError:
- msg = traceback.format_exc(limit=0)
+ msg = traceback.format_exc()
error.append("Error: {0}".format(msg))
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
diff --git a/yaksh/hook_evaluator.py b/yaksh/hook_evaluator.py
new file mode 100644
index 0000000..5480849
--- /dev/null
+++ b/yaksh/hook_evaluator.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+import sys
+import traceback
+import os
+from os.path import join
+import importlib
+
+# Local imports
+from .file_utils import copy_files, delete_files
+from .base_evaluator import BaseEvaluator
+from .grader import TimeoutException
+
+
+class HookEvaluator(BaseEvaluator):
+ def __init__(self, metadata, test_case_data):
+ self.exec_scope = None
+ self.files = []
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('code')
+ self.weight = test_case_data.get('weight')
+
+ def teardown(self):
+ # Delete the created file.
+ if self.files:
+ delete_files(self.files)
+
+ def compile_code(self):
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
+ if self.exec_scope:
+ return None
+ else:
+ submitted = compile(self.user_answer, '<string>', mode='exec')
+ self.exec_scope = {}
+ exec(submitted, self.exec_scope)
+ return self.exec_scope
+
+ def check_code(self):
+ """ Function validates user answer by running an assertion based test case
+ against it
+
+ Returns
+ --------
+ Returns a tuple (success, error, test_case_weight)
+
+ success - Boolean, indicating if code was executed successfully, correctly
+ weight - Float, indicating total weight of all successful test cases
+ error - String, error message if success is false
+
+ returns (True, "Correct answer", 1.0) : If the student script passes all
+ test cases/have same output, when compared to the instructor script
+
+ returns (False, error_msg, 0.0): If the student script fails a single
+ test/have dissimilar output, when compared to the instructor script.
+
+ Returns (False, error_msg, 0.0): If mandatory arguments are not files or if
+ the required permissions are not given to the file(s).
+ """
+ success = False
+ mark_fraction = 0.0
+ try:
+ tb = None
+ _tests = compile(self.test_case, '<string>', mode='exec')
+ exec(_tests, globals())
+ success, err, mark_fraction = check_answer(self.user_answer)
+ except AssertionError:
+ type, value, tb = sys.exc_info()
+ info = traceback.extract_tb(tb)
+ fname, lineno, func, text = info[-1]
+ text = str(self.test_case)
+ err = "Expected Test Case:\n{0}\n" \
+ "Error - {1} {2} in: {3}\n".format(
+ self.test_case,
+ type.__name__,
+ str(value),
+ text
+ )
+ except TimeoutException:
+ raise
+ except Exception:
+ msg = traceback.format_exc(limit=0)
+ err = "Error in Test case: {0}".format(msg)
+ del tb
+ return success, err, mark_fraction
+ \ No newline at end of file
diff --git a/yaksh/models.py b/yaksh/models.py
index d65970b..cc65b9c 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1185,3 +1185,10 @@ class McqTestCase(TestCase):
class HookTestCase(TestCase):
code = models.TextField()
weight = models.FloatField(default=1.0)
+
+ def get_field_value(self):
+ return {"test_case_type": "hooktestcase", "code": self.code}
+
+ def __str__(self):
+ return u'Hook Testcase | Correct: {0}'.format(self.code)
+
diff --git a/yaksh/settings.py b/yaksh/settings.py
index 0e432cf..72f9fda 100644
--- a/yaksh/settings.py
+++ b/yaksh/settings.py
@@ -21,18 +21,26 @@ URL_ROOT = ''
code_evaluators = {
"python": {"standardtestcase": "yaksh.python_assertion_evaluator.PythonAssertionEvaluator",
- "stdiobasedtestcase": "yaksh.python_stdio_evaluator.PythonStdIOEvaluator"
+ "stdiobasedtestcase": "yaksh.python_stdio_evaluator.PythonStdIOEvaluator",
+ "hooktestcase": "yaksh.hook_evaluator.HookEvaluator"
},
"c": {"standardtestcase": "yaksh.cpp_code_evaluator.CppCodeEvaluator",
- "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdIOEvaluator"
+ "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdIOEvaluator",
+ "hooktestcase": "yaksh.hook_evaluator.HookEvaluator"
},
"cpp": {"standardtestcase": "yaksh.cpp_code_evaluator.CppCodeEvaluator",
- "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdIOEvaluator"
+ "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdIOEvaluator",
+ "hooktestcase": "yaksh.hook_evaluator.HookEvaluator"
},
"java": {"standardtestcase": "yaksh.java_code_evaluator.JavaCodeEvaluator",
- "stdiobasedtestcase": "yaksh.java_stdio_evaluator.JavaStdIOEvaluator"},
+ "stdiobasedtestcase": "yaksh.java_stdio_evaluator.JavaStdIOEvaluator",
+ "hooktestcase": "yaksh.hook_evaluator.HookEvaluator"
+ },
"bash": {"standardtestcase": "yaksh.bash_code_evaluator.BashCodeEvaluator",
- "stdiobasedtestcase": "yaksh.bash_stdio_evaluator.BashStdIOEvaluator"
+ "stdiobasedtestcase": "yaksh.bash_stdio_evaluator.BashStdIOEvaluator",
+ "hooktestcase": "yaksh.hook_evaluator.HookEvaluator"
},
- "scilab": {"standardtestcase": "yaksh.scilab_code_evaluator.ScilabCodeEvaluator"},
+ "scilab": {"standardtestcase": "yaksh.scilab_code_evaluator.ScilabCodeEvaluator",
+ "hooktestcase": "yaksh.hook_evaluator.HookEvaluator"
+ },
}