summaryrefslogtreecommitdiff
path: root/yaksh
diff options
context:
space:
mode:
Diffstat (limited to 'yaksh')
-rw-r--r--yaksh/base_evaluator.py66
-rw-r--r--yaksh/code_evaluator.py118
-rw-r--r--yaksh/code_server.py5
-rw-r--r--yaksh/cpp_code_evaluator.py46
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py13
-rw-r--r--yaksh/models.py41
-rw-r--r--yaksh/python_assertion_evaluator.py12
-rw-r--r--yaksh/python_stdio_evaluator.py4
8 files changed, 205 insertions, 100 deletions
diff --git a/yaksh/base_evaluator.py b/yaksh/base_evaluator.py
new file mode 100644
index 0000000..c8177b7
--- /dev/null
+++ b/yaksh/base_evaluator.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+import subprocess
+
+class BaseEvaluator(object):
+ """Base Evaluator class containing generic attributes and callable methods"""
+
+ def __init__(self):
+ pass
+
+ def check_code(self):
+ raise NotImplementedError("check_code method not implemented")
+
+ def compile_code(self):
+ pass
+
+ def _run_command(self, cmd_args, *args, **kw):
+ """Run a command in a subprocess while blocking, the process is killed
+ if it takes more than 2 seconds to run. Return the Popen object, the
+ stdout and stderr.
+ """
+ try:
+ proc = subprocess.Popen(cmd_args, *args, **kw)
+ stdout, stderr = proc.communicate()
+ except TimeoutException:
+ # Runaway code, so kill it.
+ proc.kill()
+ # Re-raise exception.
+ raise
+ return proc, stdout.decode('utf-8'), stderr.decode('utf-8')
+
+ def _remove_null_substitute_char(self, string):
+ """Returns a string without any null and substitute characters"""
+ stripped = ""
+ for c in string:
+ if ord(c) is not 26 and ord(c) is not 0:
+ stripped = stripped + c
+ return ''.join(stripped)
+
+ def create_submit_code_file(self, file_name):
+ """ Set the file path for code (`answer`)"""
+ submit_path = abspath(file_name)
+ if not exists(submit_path):
+ submit_f = open(submit_path, 'w')
+ submit_f.close()
+
+ return submit_path
+
+ def write_to_submit_code_file(self, file_path, user_answer):
+ """ Write the code (`answer`) to a file"""
+ submit_f = open(file_path, 'w')
+ submit_f.write(user_answer.lstrip())
+ submit_f.close()
+
+ def _set_test_code_file_path(self, ref_path=None, test_case_path=None):
+ if ref_path and not ref_path.startswith('/'):
+ ref_path = join(MY_DIR, ref_path)
+
+ if test_case_path and not test_case_path.startswith('/'):
+ test_case_path = join(MY_DIR, test_case_path)
+
+ return ref_path, test_case_path
diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py
index 5ede63d..e5b8853 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/code_evaluator.py
@@ -89,11 +89,9 @@ class CodeEvaluator(object):
A tuple: (success, error message, weight).
"""
- # self.language = language
- # self.test_case_type = test_case_type
-
+ test_case_instances = self.get_evaluator_objects(kwargs)
self.setup()
- success, error, weight = self.safe_evaluate(**kwargs)
+ success, error, weight = self.safe_evaluate(test_case_instances)
self.teardown()
result = {'success': success, 'error': error, 'weight': weight}
@@ -106,29 +104,43 @@ class CodeEvaluator(object):
os.makedirs(self.in_dir)
self._change_dir(self.in_dir)
- def safe_evaluate(self, **kwargs): #user_answer, partial_grading, test_case_data, file_paths=None
+ def get_evaluator_objects(self, kwargs):
+ metadata = kwargs.get('metadata') # metadata contains user_answer, language, partial_grading, file_paths
+ test_case_data = kwargs.get('test_case_data')
+ test_case_instances = []
+
+ for test_case in test_case_data:
+ test_case_instance = create_evaluator_instance(metadata, test_case) #language, test_case
+ test_case_instances.append(test_case_instance)
+
+ return test_case_instances
+
+
+ def safe_evaluate(self, test_case_instances): #user_answer, partial_grading, test_case_data, file_paths=None
"""
Handles code evaluation along with compilation, signal handling
and Exception handling
"""
- metadata = kwargs.get('metadata') # metadata contains user_answer, language, partial_grading, file_paths
- test_case_data = kwargs.get('test_case_data')
+ # metadata = kwargs.get('metadata') # metadata contains user_answer, language, partial_grading, file_paths
+ # test_case_data = kwargs.get('test_case_data')
# Add a new signal handler for the execution of this code.
prev_handler = create_signal_handler()
success = False
- test_case_success_status = [False] * len(test_case_data)
+ test_case_success_status = [False] * len(test_case_instances)
error = ""
weight = 0.0
# Do whatever testing needed.
try:
# Run evaluator selection registry here
- for idx, test_case in enumerate(test_case_data):
- test_case_instance = create_evaluator_instance(metadata, test_case) #language, test_case
+ for idx, test_case_instance in enumerate(test_case_instances):
+ # test_case_instance = create_evaluator_instance(metadata, test_case) #language, test_case
+ # self.setup()
test_case_success = False
test_case_instance.compile_code() #user_answer, file_paths, test_case
test_case_success, err, test_case_weight = test_case_instance.check_code() #**kwargs
+ # self.teardown()
# user_answer,
# file_paths,
# partial_grading,
@@ -213,64 +225,64 @@ class CodeEvaluator(object):
delete_signal_handler()
self._change_dir(dirname(MY_DIR))
- def check_code(self):
- raise NotImplementedError("check_code method not implemented")
+ # def check_code(self):
+ # raise NotImplementedError("check_code method not implemented")
- def compile_code(self, user_answer, file_paths, **kwargs):
- pass
+ # def compile_code(self, user_answer, file_paths, **kwargs):
+ # pass
- def create_submit_code_file(self, file_name):
- """ Set the file path for code (`answer`)"""
- submit_path = abspath(file_name)
- if not exists(submit_path):
- submit_f = open(submit_path, 'w')
- submit_f.close()
+ # def create_submit_code_file(self, file_name):
+ # """ Set the file path for code (`answer`)"""
+ # submit_path = abspath(file_name)
+ # if not exists(submit_path):
+ # submit_f = open(submit_path, 'w')
+ # submit_f.close()
- return submit_path
+ # return submit_path
- def write_to_submit_code_file(self, file_path, user_answer):
- """ Write the code (`answer`) to a file"""
- submit_f = open(file_path, 'w')
- submit_f.write(user_answer.lstrip())
- submit_f.close()
+ # def write_to_submit_code_file(self, file_path, user_answer):
+ # """ Write the code (`answer`) to a file"""
+ # submit_f = open(file_path, 'w')
+ # submit_f.write(user_answer.lstrip())
+ # submit_f.close()
def _set_file_as_executable(self, fname):
os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
| stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
- def _set_test_code_file_path(self, ref_path=None, test_case_path=None):
- if ref_path and not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
+ # def _set_test_code_file_path(self, ref_path=None, test_case_path=None):
+ # if ref_path and not ref_path.startswith('/'):
+ # ref_path = join(MY_DIR, ref_path)
- if test_case_path and not test_case_path.startswith('/'):
- test_case_path = join(MY_DIR, test_case_path)
+ # if test_case_path and not test_case_path.startswith('/'):
+ # test_case_path = join(MY_DIR, test_case_path)
- return ref_path, test_case_path
+ # return ref_path, test_case_path
- def _run_command(self, cmd_args, *args, **kw):
- """Run a command in a subprocess while blocking, the process is killed
- if it takes more than 2 seconds to run. Return the Popen object, the
- stdout and stderr.
- """
- try:
- proc = subprocess.Popen(cmd_args, *args, **kw)
- stdout, stderr = proc.communicate()
- except TimeoutException:
- # Runaway code, so kill it.
- proc.kill()
- # Re-raise exception.
- raise
- return proc, stdout.decode('utf-8'), stderr.decode('utf-8')
+ # def _run_command(self, cmd_args, *args, **kw):
+ # """Run a command in a subprocess while blocking, the process is killed
+ # if it takes more than 2 seconds to run. Return the Popen object, the
+ # stdout and stderr.
+ # """
+ # try:
+ # proc = subprocess.Popen(cmd_args, *args, **kw)
+ # stdout, stderr = proc.communicate()
+ # except TimeoutException:
+ # # Runaway code, so kill it.
+ # proc.kill()
+ # # Re-raise exception.
+ # raise
+ # return proc, stdout.decode('utf-8'), stderr.decode('utf-8')
def _change_dir(self, in_dir):
if in_dir is not None and isdir(in_dir):
os.chdir(in_dir)
- def _remove_null_substitute_char(self, string):
- """Returns a string without any null and substitute characters"""
- stripped = ""
- for c in string:
- if ord(c) is not 26 and ord(c) is not 0:
- stripped = stripped + c
- return ''.join(stripped)
+ # def _remove_null_substitute_char(self, string):
+ # """Returns a string without any null and substitute characters"""
+ # stripped = ""
+ # for c in string:
+ # if ord(c) is not 26 and ord(c) is not 0:
+ # stripped = stripped + c
+ # return ''.join(stripped)
diff --git a/yaksh/code_server.py b/yaksh/code_server.py
index abe7cd8..3c1a3e3 100644
--- a/yaksh/code_server.py
+++ b/yaksh/code_server.py
@@ -54,6 +54,7 @@ from tornado.web import Application, RequestHandler
# Local imports
from .settings import SERVER_PORTS, SERVER_POOL_PORT
from .language_registry import create_evaluator_instance
+from .code_evaluator import CodeEvaluator
MY_DIR = abspath(dirname(__file__))
@@ -89,9 +90,9 @@ class CodeServer(object):
# json_data,
# in_dir
# )
- data = unpack_json_to_python_obj(json_data)
+ data = self.unpack_json_to_python_obj(json_data)
code_eval_instance = CodeEvaluator(in_dir)
- result = code_eval_instance.evaluate(**data) #language, test_case_type,
+ result = code_eval_instance.evaluate(data) #language, test_case_type,
# Put us back into the server pool queue since we are free now.
self.queue.put(self.port)
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index 716a522..806fe67 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -5,17 +5,15 @@ import pwd
import os
from os.path import join, isfile
import subprocess
-import importlib
# Local imports
-from .code_evaluator import CodeEvaluator
from .file_utils import copy_files, delete_files
+from .base_evaluator import BaseEvaluator
-class CppCodeEvaluator(CodeEvaluator):
+class CppCodeEvaluator(BaseEvaluator):
"""Tests the C code obtained from Code Server"""
- def setup(self):
- super(CppCodeEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
self.submit_code_path = self.create_submit_code_file('submit.c')
self.compiled_user_answer = None
@@ -23,16 +21,34 @@ class CppCodeEvaluator(CodeEvaluator):
self.user_output_path = ""
self.ref_output_path = ""
- def teardown(self):
- # Delete the created file.
- os.remove(self.submit_code_path)
- if os.path.exists(self.ref_output_path):
- os.remove(self.ref_output_path)
- if os.path.exists(self.user_output_path):
- os.remove(self.user_output_path)
- if self.files:
- delete_files(self.files)
- super(CppCodeEvaluator, self).teardown()
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
+
+ # def setup(self):
+ # super(CppCodeEvaluator, self).setup()
+ # self.files = []
+ # self.submit_code_path = self.create_submit_code_file('submit.c')
+ # self.compiled_user_answer = None
+ # self.compiled_test_code = None
+ # self.user_output_path = ""
+ # self.ref_output_path = ""
+
+ # def teardown(self):
+ # # Delete the created file.
+ # os.remove(self.submit_code_path)
+ # if os.path.exists(self.ref_output_path):
+ # os.remove(self.ref_output_path)
+ # if os.path.exists(self.user_output_path):
+ # os.remove(self.user_output_path)
+ # if self.files:
+ # delete_files(self.files)
+ # super(CppCodeEvaluator, self).teardown()
def set_file_paths(self):
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 688002f..4bf0032 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -263,7 +263,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(3, len(err))
+ self.assertEqual(9, len(err))
for msg in name_error_msg:
self.assertIn(msg, result.get("error"))
@@ -337,7 +337,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(3, len(err))
+ self.assertEqual(9, len(err))
for msg in type_error_msg:
self.assertIn(msg, result.get("error"))
@@ -377,7 +377,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(4, len(err))
+ self.assertEqual(9, len(err))
for msg in value_error_msg:
self.assertIn(msg, result.get("error"))
@@ -455,7 +455,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(5, len(err))
+ self.assertEqual(6, len(err))
for msg in syntax_error_msg:
self.assertIn(msg, result.get("error"))
@@ -476,9 +476,6 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
name_error_msg = ["Traceback",
"call",
- "File",
- "line",
- "<string>",
"NameError",
"name 'S' is not defined"
]
@@ -504,7 +501,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(3, len(err))
+ self.assertEqual(7, len(err))
for msg in name_error_msg:
self.assertIn(msg, result.get("error"))
diff --git a/yaksh/models.py b/yaksh/models.py
index 7fae305..4951836 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -255,6 +255,7 @@ class Question(models.Model):
def consolidate_answer_data(self, user_answer):
question_data = {}
+ metadata = {}
test_case_data = []
test_cases = self.get_test_cases()
@@ -264,12 +265,15 @@ class Question(models.Model):
test_case_data.append(test_case_as_dict)
question_data['test_case_data'] = test_case_data
- question_data['user_answer'] = user_answer
- question_data['partial_grading'] = self.partial_grading
+ metadata['user_answer'] = user_answer
+ metadata['language'] = self.language
+ metadata['partial_grading'] = self.partial_grading
files = FileUpload.objects.filter(question=self)
if files:
- question_data['file_paths'] = [(file.file.path, file.extract)
+ metadata['file_paths'] = [(file.file.path, file.extract)
for file in files]
+ question_data['metadata'] = metadata
+
return json.dumps(question_data)
@@ -309,15 +313,20 @@ class Question(models.Model):
delete_files(files_list, file_path)
def get_test_cases(self, **kwargs):
- test_case_ctype = ContentType.objects.get(app_label="yaksh",
- model=self.test_case_type
- )
- test_cases = test_case_ctype.get_all_objects_for_this_type(
- question=self,
- **kwargs
- )
-
- return test_cases
+ # test_case_ctype = ContentType.objects.get(app_label="yaksh",
+ # model=self.test_case_type
+ # )
+ # test_cases = test_case_ctype.get_all_objects_for_this_type(
+ # question=self,
+ # **kwargs
+ # )
+ tc_list = []
+ for tc in self.testcase_set.all():
+ tc_type = tc.type
+ obj = getattr(tc, tc_type)
+ tc_list.append(obj)
+
+ return tc_list
def get_test_case(self, **kwargs):
test_case_ctype = ContentType.objects.get(app_label="yaksh",
@@ -1137,7 +1146,8 @@ class StandardTestCase(TestCase):
weight = models.FloatField(default=1.0)
def get_field_value(self):
- return {"test_case": self.test_case,
+ return {"test_case_type": "standardtestcase",
+ "test_case": self.test_case,
"weight": self.weight}
def __str__(self):
@@ -1152,7 +1162,8 @@ class StdioBasedTestCase(TestCase):
weight = models.IntegerField(default=1.0)
def get_field_value(self):
- return {"expected_output": self.expected_output,
+ return {"test_case_type": "stdiobasedtestcase",
+ "expected_output": self.expected_output,
"expected_input": self.expected_input,
"weight": self.weight}
@@ -1167,7 +1178,7 @@ class McqTestCase(TestCase):
correct = models.BooleanField(default=False)
def get_field_value(self):
- return {"options": self.options, "correct": self.correct}
+ return {"test_case_type": "mcqtestcase", "options": self.options, "correct": self.correct}
def __str__(self):
return u'Question: {0} | Correct: {1}'.format(self.question,
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index 3e172ec..eb13f53 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -7,11 +7,12 @@ from os.path import join
import importlib
# Local imports
-from .code_evaluator import CodeEvaluator, TimeoutException
from .file_utils import copy_files, delete_files
+from .base_evaluator import BaseEvaluator
+from .code_evaluator import TimeoutException
-class PythonAssertionEvaluator(object):
+class PythonAssertionEvaluator(BaseEvaluator):
"""Tests the Python code obtained from Code Server"""
def __init__(self, metadata, test_case_data):
@@ -32,7 +33,6 @@ class PythonAssertionEvaluator(object):
# if self.files:
# delete_files(self.files)
-
# def setup(self):
# super(PythonAssertionEvaluator, self).setup()
# self.exec_scope = None
@@ -98,9 +98,11 @@ class PythonAssertionEvaluator(object):
text = str(self.test_case).splitlines()[lineno-1]
err = ("-----\nExpected Test Case:\n{0}\n"
"Error - {1} {2} in: {3}\n-----").format(self.test_case, type.__name__, str(value), text)
+ except TimeoutException:
+ raise
except Exception:
- msg = traceback.format_exc(limit=0)
- err = "Error in Test case: {0}".format(msg)
+ msg = traceback.format_exc(limit=0)
+ err = "Error in Test case: {0}".format(msg)
else:
success = True
err = '-----\nCorrect answer\nTest Case: {0}\n-----'.format(self.test_case)
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index b618a0b..7ef3a7c 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -14,8 +14,8 @@ except ImportError:
from io import StringIO
# Local imports
-from .code_evaluator import CodeEvaluator
from .file_utils import copy_files, delete_files
+from .base_evaluator import BaseEvaluator
@contextmanager
@@ -28,7 +28,7 @@ def redirect_stdout():
sys.stdout = old_target # restore to the previous value
-class PythonStdioEvaluator(CodeEvaluator):
+class PythonStdioEvaluator(BaseEvaluator):
"""Tests the Python code obtained from Code Server"""
# def setup(self):