summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--yaksh/admin.py5
-rw-r--r--yaksh/bash_code_evaluator.py124
-rw-r--r--yaksh/code_evaluator.py101
-rwxr-xr-xyaksh/code_server.py23
-rw-r--r--yaksh/cpp_code_evaluator.py126
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py49
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py83
-rw-r--r--yaksh/evaluator_tests/test_code_evaluation.py38
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py56
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py224
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py65
-rw-r--r--yaksh/forms.py22
-rw-r--r--yaksh/java_code_evaluator.py127
-rw-r--r--yaksh/language_registry.py29
-rw-r--r--yaksh/models.py189
-rw-r--r--yaksh/python_assertion_evaluator.py47
-rw-r--r--yaksh/python_code_evaluator.py65
-rw-r--r--yaksh/python_stdout_evaluator.py58
-rw-r--r--yaksh/scilab_code_evaluator.py55
-rw-r--r--yaksh/settings.py16
-rw-r--r--yaksh/static/yaksh/js/add_question.js67
-rw-r--r--yaksh/static/yaksh/js/show_testcase.js24
-rw-r--r--yaksh/templates/yaksh/add_question.html36
-rw-r--r--yaksh/templates/yaksh/question.html18
-rw-r--r--yaksh/tests.py213
-rw-r--r--yaksh/urls.py2
-rw-r--r--yaksh/views.py160
-rw-r--r--yaksh/xmlrpc_clients.py4
28 files changed, 1144 insertions, 882 deletions
diff --git a/yaksh/admin.py b/yaksh/admin.py
index 71dfb3b..c31b99b 100644
--- a/yaksh/admin.py
+++ b/yaksh/admin.py
@@ -1,6 +1,9 @@
-from yaksh.models import Question, Quiz, TestCase
+from yaksh.models import Question, Quiz
+from yaksh.models import TestCase, StandardTestCase, StdoutBasedTestCase
from django.contrib import admin
admin.site.register(Question)
admin.site.register(TestCase)
+admin.site.register(StandardTestCase)
+admin.site.register(StdoutBasedTestCase)
admin.site.register(Quiz)
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index a468fd7..a0af0e2 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -3,6 +3,7 @@ import traceback
import pwd
import os
from os.path import join, isfile
+import sys
import subprocess
import importlib
@@ -11,34 +12,18 @@ from code_evaluator import CodeEvaluator
class BashCodeEvaluator(CodeEvaluator):
- """Tests the Bash code obtained from Code Server"""
- def __init__(self, test_case_data, test, language, user_answer,
- ref_code_path=None, in_dir=None):
- super(BashCodeEvaluator, self).__init__(test_case_data, test, language, user_answer,
- ref_code_path, in_dir)
- self.test_case_args = self._setup()
-
# Private Protocol ##########
- def _setup(self):
- super(BashCodeEvaluator, self)._setup()
-
- self.submit_path = self.create_submit_code_file('submit.sh')
- self._set_file_as_executable(self.submit_path)
- get_ref_path, get_test_case_path = self.ref_code_path.strip().split(',')
- get_ref_path = get_ref_path.strip()
- get_test_case_path = get_test_case_path.strip()
- ref_path, test_case_path = self._set_test_code_file_path(get_ref_path,
- get_test_case_path)
-
- return ref_path, self.submit_path, test_case_path
+ def setup(self):
+ super(BashCodeEvaluator, self).setup()
+ self.submit_code_path = self.create_submit_code_file('submit.sh')
+ self._set_file_as_executable(self.submit_code_path)
- def _teardown(self):
+ def teardown(self):
# Delete the created file.
- super(BashCodeEvaluator, self)._teardown()
- os.remove(self.submit_path)
+ super(BashCodeEvaluator, self).teardown()
+ os.remove(self.submit_code_path)
- def _check_code(self, ref_path, submit_path,
- test_case_path=None):
+ def check_code(self, user_answer, test_case):
""" Function validates student script using instructor script as
reference. Test cases can optionally be provided. The first argument
ref_path, is the path to instructor script, it is assumed to
@@ -61,62 +46,87 @@ class BashCodeEvaluator(CodeEvaluator):
the required permissions are not given to the file(s).
"""
- if not isfile(ref_path):
- return False, "No file at %s or Incorrect path" % ref_path
- if not isfile(submit_path):
- return False, "No file at %s or Incorrect path" % submit_path
- if not os.access(ref_path, os.X_OK):
- return False, "Script %s is not executable" % ref_path
- if not os.access(submit_path, os.X_OK):
- return False, "Script %s is not executable" % submit_path
+ ref_code_path = test_case
+ get_ref_path, get_test_case_path = ref_code_path.strip().split(',')
+ get_ref_path = get_ref_path.strip()
+ get_test_case_path = get_test_case_path.strip()
+ clean_ref_code_path, clean_test_case_path = \
+ self._set_test_code_file_path(get_ref_path, get_test_case_path)
+
+ if not isfile(clean_ref_code_path):
+ msg = "No file at %s or Incorrect path" % clean_ref_code_path
+ return False, msg
+ if not isfile(self.submit_code_path):
+ msg = "No file at %s or Incorrect path" % self.submit_code_path
+ return False, msg
+ if not os.access(clean_ref_code_path, os.X_OK):
+ msg = "Script %s is not executable" % clean_ref_code_path
+ return False, msg
+ if not os.access(self.submit_code_path, os.X_OK):
+ msg = "Script %s is not executable" % self.submit_code_path
+ return False, msg
success = False
-
- if test_case_path is None or "":
- ret = self._run_command(ref_path, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ self.write_to_submit_code_file(self.submit_code_path, user_answer)
+
+ if clean_test_case_path is None or "":
+ ret = self._run_command(clean_ref_code_path,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
proc, inst_stdout, inst_stderr = ret
- ret = self._run_command(submit_path, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ ret = self._run_command(self.submit_code_path,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
return True, "Correct answer"
else:
err = "Error: expected %s, got %s" % (inst_stderr,
- stdnt_stderr)
+ stdnt_stderr
+ )
return False, err
else:
- if not isfile(test_case_path):
- return False, "No test case at %s" % test_case_path
- if not os.access(ref_path, os.R_OK):
- return False, "Test script %s, not readable" % test_case_path
+ if not isfile(clean_test_case_path):
+ msg = "No test case at %s" % clean_test_case_path
+ return False, msg
+ if not os.access(clean_ref_code_path, os.R_OK):
+ msg = "Test script %s, not readable" % clean_test_case_path
+ return False, msg
# valid_answer is True, so that we can stop once a test case fails
valid_answer = True
# loop_count has to be greater than or equal to one.
# Useful for caching things like empty test files,etc.
loop_count = 0
- test_cases = open(test_case_path).readlines()
+ test_cases = open(clean_test_case_path).readlines()
num_lines = len(test_cases)
for test_case in test_cases:
loop_count += 1
if valid_answer:
- args = [ref_path] + [x for x in test_case.split()]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ args = [clean_ref_code_path] + \
+ [x for x in test_case.split()]
+ ret = self._run_command(args,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
proc, inst_stdout, inst_stderr = ret
- args = [submit_path]+[x for x in test_case.split()]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ args = [self.submit_code_path] + \
+ [x for x in test_case.split()]
+ ret = self._run_command(args,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
proc, stdnt_stdout, stdnt_stderr = ret
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
return True, "Correct answer"
else:
- err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
- stdnt_stdout+stdnt_stderr)
+ err = ("Error:expected"
+ " %s, got %s").format(inst_stdout+inst_stderr,
+ stdnt_stdout+stdnt_stderr
+ )
return False, err
-
diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py
index 381b2e8..aab99eb 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/code_evaluator.py
@@ -3,16 +3,15 @@ from SimpleXMLRPCServer import SimpleXMLRPCServer
import pwd
import os
import stat
-from os.path import isdir, dirname, abspath, join, isfile
+from os.path import isdir, dirname, abspath, join, isfile, exists
import signal
+import traceback
from multiprocessing import Process, Queue
import subprocess
import re
-import json
# Local imports.
from settings import SERVER_TIMEOUT
-
MY_DIR = abspath(dirname(__file__))
@@ -50,33 +49,13 @@ def delete_signal_handler():
class CodeEvaluator(object):
"""Tests the code obtained from Code Server"""
- def __init__(self, test_case_data, test, language, user_answer,
- ref_code_path=None, in_dir=None):
+ def __init__(self, in_dir=None):
msg = 'Code took more than %s seconds to run. You probably '\
'have an infinite loop in your code.' % SERVER_TIMEOUT
self.timeout_msg = msg
- self.test_case_data = test_case_data
- self.language = language.lower()
- self.user_answer = user_answer
- self.ref_code_path = ref_code_path
- self.test = test
self.in_dir = in_dir
- self.test_case_args = None
-
- # Public Protocol ##########
- @classmethod
- def from_json(cls, language, json_data, in_dir):
- json_data = json.loads(json_data)
- test_case_data = json_data.get("test_case_data")
- user_answer = json_data.get("user_answer")
- ref_code_path = json_data.get("ref_code_path")
- test = json_data.get("test")
-
- instance = cls(test_case_data, test, language, user_answer, ref_code_path,
- in_dir)
- return instance
-
- def evaluate(self):
+
+ def evaluate(self, **kwargs):
"""Evaluates given code with the test cases based on
given arguments in test_case_data.
@@ -98,54 +77,72 @@ class CodeEvaluator(object):
A tuple: (success, error message).
"""
- self._setup()
- success, err = self._evaluate(self.test_case_args)
- self._teardown()
+ self.setup()
+ success, err = self.safe_evaluate(**kwargs)
+ self.teardown()
result = {'success': success, 'error': err}
return result
# Private Protocol ##########
- def _setup(self):
+ def setup(self):
self._change_dir(self.in_dir)
- def _evaluate(self, args):
+ def safe_evaluate(self, user_answer, test_case_data):
+ """
+ Handles code evaluation along with compilation, signal handling
+ and Exception handling
+ """
+
# Add a new signal handler for the execution of this code.
prev_handler = create_signal_handler()
success = False
- args = args or []
# Do whatever testing needed.
try:
- success, err = self._check_code(*args)
+ for test_case in test_case_data:
+ self.compile_code(user_answer, **test_case)
+ success, err = self.check_code(user_answer, **test_case)
+ if not success:
+ break
except TimeoutException:
err = self.timeout_msg
- except:
- _type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
+ except Exception:
+ err = "Error: {0}".format(traceback.format_exc(limit=0))
+
finally:
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
return success, err
- def _teardown(self):
+ def teardown(self):
# Cancel the signal
delete_signal_handler()
- def _check_code(self):
+ def check_code(self):
raise NotImplementedError("check_code method not implemented")
+ def compile_code(self, user_answer, **kwargs):
+ pass
+
def create_submit_code_file(self, file_name):
- """ Write the code (`answer`) to a file and set the file path"""
- submit_f = open(file_name, 'w')
- submit_f.write(self.user_answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
+ """ Set the file path for code (`answer`)"""
+ submit_path = abspath(file_name)
+ if not exists(submit_path):
+ submit_f = open(submit_path, 'w')
+ submit_f.close()
return submit_path
+
+ def write_to_submit_code_file(self, file_path, user_answer):
+ """ Write the code (`answer`) to a file"""
+ submit_f = open(file_path, 'w')
+ submit_f.write(user_answer.lstrip())
+ submit_f.close()
+
def _set_file_as_executable(self, fname):
os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
| stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
@@ -175,24 +172,6 @@ class CodeEvaluator(object):
raise
return proc, stdout, stderr
- def _compile_command(self, cmd, *args, **kw):
- """Compiles C/C++/java code and returns errors if any.
- Run a command in a subprocess while blocking, the process is killed
- if it takes more than 2 seconds to run. Return the Popen object, the
- stderr.
- """
- try:
- proc_compile = subprocess.Popen(cmd, shell=True, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = proc_compile.communicate()
- except TimeoutException:
- # Runaway code, so kill it.
- proc_compile.kill()
- # Re-raise exception.
- raise
- return proc_compile, err
-
def _change_dir(self, in_dir):
if in_dir is not None and isdir(in_dir):
os.chdir(in_dir)
diff --git a/yaksh/code_server.py b/yaksh/code_server.py
index faf9c0d..2d8567e 100755
--- a/yaksh/code_server.py
+++ b/yaksh/code_server.py
@@ -31,7 +31,7 @@ import re
import json
# Local imports.
from settings import SERVER_PORTS, SERVER_POOL_PORT
-from language_registry import set_registry, get_registry
+from language_registry import create_evaluator_instance, unpack_json
MY_DIR = abspath(dirname(__file__))
@@ -58,13 +58,17 @@ class CodeServer(object):
self.queue = queue
# Public Protocol ##########
- def check_code(self, language, json_data, in_dir=None):
+ def check_code(self, language, test_case_type, json_data, in_dir=None):
"""Calls relevant EvaluateCode class based on language to check the
answer code
"""
- code_evaluator = self._create_evaluator_instance(language, json_data,
- in_dir)
- result = code_evaluator.evaluate()
+ code_evaluator = create_evaluator_instance(language,
+ test_case_type,
+ json_data,
+ in_dir
+ )
+ data = unpack_json(json_data)
+ result = code_evaluator.evaluate(**data)
# Put us back into the server pool queue since we are free now.
self.queue.put(self.port)
@@ -79,15 +83,6 @@ class CodeServer(object):
self.queue.put(self.port)
server.serve_forever()
- # Private Protocol ##########
- def _create_evaluator_instance(self, language, json_data, in_dir):
- """Create instance of relevant EvaluateCode class based on language"""
- set_registry()
- registry = get_registry()
- cls = registry.get_class(language)
- instance = cls.from_json(language, json_data, in_dir)
- return instance
-
###############################################################################
# `ServerPool` class.
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index 7242884..b869442 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -12,46 +12,71 @@ from code_evaluator import CodeEvaluator
class CppCodeEvaluator(CodeEvaluator):
"""Tests the C code obtained from Code Server"""
- def __init__(self, test_case_data, test, language, user_answer,
- ref_code_path=None, in_dir=None):
- super(CppCodeEvaluator, self).__init__(test_case_data, test, language,
- user_answer, ref_code_path,
- in_dir)
- self.test_case_args = self._setup()
-
- # Private Protocol ##########
- def _setup(self):
- super(CppCodeEvaluator, self)._setup()
-
- get_ref_path = self.ref_code_path
- ref_path, test_case_path = self._set_test_code_file_path(get_ref_path)
- self.submit_path = self.create_submit_code_file('submit.c')
-
- # Set file paths
- c_user_output_path = os.getcwd() + '/output'
- c_ref_output_path = os.getcwd() + '/executable'
-
- # Set command variables
- compile_command = 'g++ {0} -c -o {1}'.format(self.submit_path,
- c_user_output_path)
- compile_main = 'g++ {0} {1} -o {2}'.format(ref_path,
- c_user_output_path,
- c_ref_output_path)
- run_command_args = [c_ref_output_path]
- remove_user_output = c_user_output_path
- remove_ref_output = c_ref_output_path
-
- return (ref_path, self.submit_path, compile_command, compile_main,
- run_command_args, remove_user_output, remove_ref_output)
-
- def _teardown(self):
+ def setup(self):
+ super(CppCodeEvaluator, self).setup()
+ self.submit_code_path = self.create_submit_code_file('submit.c')
+ self.compiled_user_answer = None
+ self.compiled_test_code = None
+
+ def teardown(self):
+ super(CppCodeEvaluator, self).teardown()
# Delete the created file.
- super(CppCodeEvaluator, self)._teardown()
- os.remove(self.submit_path)
-
- def _check_code(self, ref_code_path, submit_code_path, compile_command,
- compile_main, run_command_args, remove_user_output,
- remove_ref_output):
+ os.remove(self.submit_code_path)
+
+ def set_file_paths(self):
+ user_output_path = os.getcwd() + '/output'
+ ref_output_path = os.getcwd() + '/executable'
+
+ return user_output_path, ref_output_path
+
+ def get_commands(self, clean_ref_code_path, user_output_path,
+ ref_output_path):
+ compile_command = 'g++ {0} -c -o {1}'.format(self.submit_code_path,
+ user_output_path)
+ compile_main = 'g++ {0} {1} -o {2}'.format(clean_ref_code_path,
+ user_output_path,
+ ref_output_path)
+ return compile_command, compile_main
+
+ def compile_code(self, user_answer, test_case):
+ if self.compiled_user_answer and self.compiled_test_code:
+ return None
+ else:
+ ref_code_path = test_case
+ clean_ref_code_path, clean_test_case_path = \
+ self._set_test_code_file_path(ref_code_path)
+
+ if not isfile(clean_ref_code_path):
+ msg = "No file at %s or Incorrect path" % clean_ref_code_path
+ return False, msg
+ if not isfile(self.submit_code_path):
+ msg = "No file at %s or Incorrect path" % self.submit_code_path
+ return False, msg
+
+ self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.user_output_path, self.ref_output_path = self.set_file_paths()
+ self.compile_command, self.compile_main = self.get_commands(
+ clean_ref_code_path,
+ self.user_output_path,
+ self.ref_output_path
+ )
+ self.compiled_user_answer = self._run_command(
+ self.compile_command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+
+ self.compiled_test_code = self._run_command(
+ self.compile_main,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+
+ return self.compiled_user_answer, self.compiled_test_code
+
+ def check_code(self, user_answer, test_case):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -69,35 +94,30 @@ class CppCodeEvaluator(CodeEvaluator):
Returns (False, error_msg): If mandatory arguments are not files or
if the required permissions are not given to the file(s).
-
"""
- if not isfile(ref_code_path):
- return False, "No file at %s or Incorrect path" % ref_code_path
- if not isfile(submit_code_path):
- return False, 'No file at %s or Incorrect path' % submit_code_path
success = False
- ret = self._compile_command(compile_command)
- proc, stdnt_stderr = ret
+ proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
# Only if compilation is successful, the program is executed
# And tested with testcases
if stdnt_stderr == '':
- ret = self._compile_command(compile_main)
- proc, main_err = ret
+ proc, main_out, main_err = self.compiled_test_code
main_err = self._remove_null_substitute_char(main_err)
if main_err == '':
- ret = self._run_command(run_command_args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ ret = self._run_command([self.ref_output_path],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
else:
err = stdout + "\n" + stderr
- os.remove(remove_ref_output)
+ os.remove(self.ref_output_path)
else:
err = "Error:"
try:
@@ -109,7 +129,7 @@ class CppCodeEvaluator(CodeEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + main_err
- os.remove(remove_user_output)
+ os.remove(self.user_output_path)
else:
err = "Compilation Error:"
try:
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index c6a5408..4ff3e0a 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -5,37 +5,48 @@ from yaksh.settings import SERVER_TIMEOUT
class BashEvaluationTestCases(unittest.TestCase):
def setUp(self):
- self.language = "bash"
- self.ref_code_path = "bash_files/sample.sh,bash_files/sample.args"
+ self.test_case_data = [
+ {"test_case": "bash_files/sample.sh,bash_files/sample.args"}
+ ]
self.in_dir = "/tmp"
- self.test_case_data = []
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- self.test = None
+ "You probably have an infinite loop in your"
+ " code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
- user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
- get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
- self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
+ user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]"
+ " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
+ )
+ get_class = BashCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertTrue(result.get('success'))
+ self.assertEquals(result.get('error'), "Correct answer")
def test_error(self):
- user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 - $2 )) && exit $(( $1 - $2 ))"
- get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
+ user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] "
+ "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))")
+ get_class = BashCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertTrue("Error" in result.get("error"))
def test_infinite_loop(self):
- user_answer = "#!/bin/bash\nwhile [ 1 ] ; do echo "" > /dev/null ; done"
- get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
+ user_answer = ("#!/bin/bash\nwhile [ 1 ] ;"
+ " do echo "" > /dev/null ; done")
+ get_class = BashCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertEquals(result.get("error"), self.timeout_msg)
+
if __name__ == '__main__':
unittest.main()
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index d01cc9c..71fb843 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -5,71 +5,50 @@ from yaksh.settings import SERVER_TIMEOUT
class CEvaluationTestCases(unittest.TestCase):
def setUp(self):
- self.language = "C"
- self.ref_code_path = "c_cpp_files/main.cpp"
+ self.test_case_data = [{"test_case": "c_cpp_files/main.cpp"}]
self.in_dir = "/tmp"
- self.test_case_data = []
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- self.test = None
+ "You probably have an infinite loop in your"
+ " code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
user_answer = "int add(int a, int b)\n{return a+b;}"
- get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
- self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
-
- def test_compilation_error(self):
- user_answer = "int add(int a, int b)\n{return a+b}"
- get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
- self.assertFalse(result.get("success"))
- self.assertTrue("Compilation Error" in result.get("error"))
-
- def test_infinite_loop(self):
- user_answer = "int add(int a, int b)\n{while(1>0){}}"
- get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
- self.assertFalse(result.get("success"))
- self.assertEquals(result.get("error"), self.timeout_msg)
-
-
-###############################################################################
-class CppEvaluationTestCases(unittest.TestCase):
- def setUp(self):
- self.language = "CPP"
- self.ref_code_path = "c_cpp_files/main.cpp"
- self.in_dir = "/tmp"
- self.test_case_data = []
- self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- self.test = None
-
- def test_correct_answer(self):
- user_answer = "int add(int a, int b)\n{return a+b;}"
- get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
- self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
+ get_class = CppCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertTrue(result.get('success'))
+ self.assertEquals(result.get('error'), "Correct answer")
+
+ def test_incorrect_answer(self):
+ user_answer = "int add(int a, int b)\n{return a-b;}"
+ get_class = CppCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertFalse(result.get('success'))
+ self.assertIn("Incorrect:", result.get('error'))
+ self.assertTrue(result.get('error').splitlines > 1)
def test_compilation_error(self):
user_answer = "int add(int a, int b)\n{return a+b}"
- get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
+ get_class = CppCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
def test_infinite_loop(self):
user_answer = "int add(int a, int b)\n{while(1>0){}}"
- get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
+ get_class = CppCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertEquals(result.get("error"), self.timeout_msg)
diff --git a/yaksh/evaluator_tests/test_code_evaluation.py b/yaksh/evaluator_tests/test_code_evaluation.py
index 84701fb..51c0c51 100644
--- a/yaksh/evaluator_tests/test_code_evaluation.py
+++ b/yaksh/evaluator_tests/test_code_evaluation.py
@@ -1,20 +1,44 @@
import unittest
import os
-from yaksh import python_code_evaluator
-from yaksh.language_registry import _LanguageRegistry, set_registry, get_registry
-from yaksh.settings import SERVER_TIMEOUT
+from yaksh import python_assertion_evaluator
+from yaksh.language_registry import _LanguageRegistry, get_registry
+from yaksh.settings import SERVER_TIMEOUT, code_evaluators
class RegistryTestCase(unittest.TestCase):
def setUp(self):
- set_registry()
self.registry_object = get_registry()
self.language_registry = _LanguageRegistry()
+ assertion_evaluator_path = ("yaksh.python_assertion_evaluator"
+ ".PythonAssertionEvaluator"
+ )
+ stdout_evaluator_path = ("yaksh.python_stdout_evaluator."
+ "PythonStdoutEvaluator"
+ )
+ code_evaluators['python'] = \
+ {"standardtestcase": assertion_evaluator_path,
+ "stdoutbasedtestcase": stdout_evaluator_path
+ }
def test_set_register(self):
- class_name = getattr(python_code_evaluator, 'PythonCodeEvaluator')
- self.registry_object.register("python", "yaksh.python_code_evaluator.PythonCodeEvaluator")
- self.assertEquals(self.registry_object.get_class("python"), class_name)
+ evaluator_class = self.registry_object.get_class("python",
+ "standardtestcase"
+ )
+ assertion_evaluator_path = ("yaksh.python_assertion_evaluator"
+ ".PythonAssertionEvaluator"
+ )
+ stdout_evaluator_path = ("yaksh.python_stdout_evaluator."
+ "PythonStdoutEvaluator"
+ )
+ class_name = getattr(python_assertion_evaluator,
+ 'PythonAssertionEvaluator'
+ )
+ self.registry_object.register("python",
+ {"standardtestcase": assertion_evaluator_path,
+ "stdoutbasedtestcase": stdout_evaluator_path
+ }
+ )
+ self.assertEquals(evaluator_class, class_name)
def tearDown(self):
self.registry_object = None
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index dfa1066..801277f 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -2,52 +2,64 @@ import unittest
import os
from yaksh import code_evaluator as evaluator
from yaksh.java_code_evaluator import JavaCodeEvaluator
+from yaksh.settings import SERVER_TIMEOUT
class JavaEvaluationTestCases(unittest.TestCase):
def setUp(self):
- self.language = "java"
- self.ref_code_path = "java_files/main_square.java"
+ self.test_case_data = [
+ {"test_case": "java_files/main_square.java"}
+ ]
self.in_dir = "/tmp"
- self.test_case_data = []
evaluator.SERVER_TIMEOUT = 9
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in "
- "your code.").format(evaluator.SERVER_TIMEOUT)
- self.test = None
+ "You probably have an infinite loop in"
+ " your code.").format(evaluator.SERVER_TIMEOUT)
def tearDown(self):
evaluator.SERVER_TIMEOUT = 2
def test_correct_answer(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}"
- get_class = JavaCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer,
- self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
+ get_class = JavaCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertEquals(result.get('error'), "Correct answer")
+ self.assertTrue(result.get('success'))
- self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
+ def test_incorrect_answer(self):
+ user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}"
+ get_class = JavaCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertFalse(result.get('success'))
+ self.assertIn("Incorrect:", result.get('error'))
+ self.assertTrue(result.get('error').splitlines > 1)
def test_error(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a"
- get_class = JavaCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer,
- self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
+ get_class = JavaCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertTrue("Error" in result.get("error"))
def test_infinite_loop(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}"
- get_class = JavaCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer,
- self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
+ get_class = JavaCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertEquals(result.get("error"), self.timeout_msg)
+
if __name__ == '__main__':
unittest.main()
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index c55f04f..1e867a3 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -1,67 +1,71 @@
import unittest
import os
-from yaksh.python_code_evaluator import PythonCodeEvaluator
+from yaksh.python_assertion_evaluator import PythonAssertionEvaluator
+from yaksh.python_stdout_evaluator import PythonStdoutEvaluator
from yaksh.settings import SERVER_TIMEOUT
from textwrap import dedent
-class PythonEvaluationTestCases(unittest.TestCase):
+class PythonAssertionEvaluationTestCases(unittest.TestCase):
def setUp(self):
- self.language = "Python"
- self.test = None
- self.test_case_data = [{"func_name": "add",
- "expected_answer": "5",
- "test_id": u'null',
- "pos_args": ["3", "2"],
- "kw_args": {}
- }]
+ self.test_case_data = [{"test_case": 'assert(add(1,2)==3)'},
+ {"test_case": 'assert(add(-1,2)==1)'},
+ {"test_case": 'assert(add(-1,-2)==-3)'},
+ ]
+ self.timeout_msg = ("Code took more than {0} seconds to run. "
+ "You probably have an infinite loop in"
+ " your code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
- user_answer = dedent("""
- def add(a, b):
- return a + b
- """)
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
- self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
+ user_answer = "def add(a,b):\n\treturn a + b"
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertTrue(result.get('success'))
+ self.assertEqual(result.get('error'), "Correct answer")
def test_incorrect_answer(self):
- user_answer = dedent("""
- def add(a, b):
- return a - b
- """)
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
- self.assertFalse(result.get("success"))
- self.assertEqual(result.get("error"), "AssertionError in: assert add(3, 2) == 5")
+ user_answer = "def add(a,b):\n\treturn a - b"
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertFalse(result.get('success'))
+ self.assertEqual(result.get('error'),
+ "AssertionError in: assert(add(1,2)==3)"
+ )
def test_infinite_loop(self):
- user_answer = dedent("""
- def add(a, b):
- while True:
- pass
- """)
- timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
- self.assertFalse(result.get("success"))
- self.assertEquals(result.get("error"), timeout_msg)
+ user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertFalse(result.get('success'))
+ self.assertEqual(result.get('error'), self.timeout_msg)
def test_syntax_error(self):
user_answer = dedent("""
def add(a, b);
return a + b
""")
- syntax_error_msg = ["Traceback", "call", "File", "line", "<string>",
- "SyntaxError", "invalid syntax"]
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
+ syntax_error_msg = ["Traceback",
+ "call",
+ "File",
+ "line",
+ "<string>",
+ "SyntaxError",
+ "invalid syntax"
+ ]
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
self.assertEqual(5, len(err))
@@ -73,11 +77,18 @@ class PythonEvaluationTestCases(unittest.TestCase):
def add(a, b):
return a + b
""")
- indent_error_msg = ["Traceback", "call", "File", "line", "<string>",
- "IndentationError", "indented block"]
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
+ indent_error_msg = ["Traceback", "call",
+ "File",
+ "line",
+ "<string>",
+ "IndentationError",
+ "indented block"
+ ]
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
self.assertEqual(5, len(err))
@@ -86,10 +97,17 @@ class PythonEvaluationTestCases(unittest.TestCase):
def test_name_error(self):
user_answer = ""
- name_error_msg = ["Traceback", "call", "NameError", "name", "defined"]
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
+ name_error_msg = ["Traceback",
+ "call",
+ "NameError",
+ "name",
+ "defined"
+ ]
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
self.assertEqual(2, len(err))
@@ -101,11 +119,16 @@ class PythonEvaluationTestCases(unittest.TestCase):
def add(a, b):
return add(3, 3)
""")
- recursion_error_msg = ["Traceback", "call", "RuntimeError",
- "maximum recursion depth exceeded"]
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
+ recursion_error_msg = ["Traceback",
+ "call",
+ "RuntimeError",
+ "maximum recursion depth exceeded"
+ ]
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
self.assertEqual(2, len(err))
@@ -117,10 +140,17 @@ class PythonEvaluationTestCases(unittest.TestCase):
def add(a):
return a + b
""")
- type_error_msg = ["Traceback", "call", "TypeError", "exactly", "argument"]
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
+ type_error_msg = ["Traceback",
+ "call",
+ "TypeError",
+ "exactly",
+ "argument"
+ ]
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
self.assertEqual(2, len(err))
@@ -133,15 +163,73 @@ class PythonEvaluationTestCases(unittest.TestCase):
c = 'a'
return int(a) + int(b) + int(c)
""")
- value_error_msg = ["Traceback", "call", "ValueError", "invalid literal", "base"]
- get_evaluator = PythonCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer)
- result = get_evaluator.evaluate()
+ value_error_msg = ["Traceback",
+ "call",
+ "ValueError",
+ "invalid literal",
+ "base"
+ ]
+ get_class = PythonAssertionEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
self.assertEqual(2, len(err))
for msg in value_error_msg:
self.assertIn(msg, result.get("error"))
+class PythonStdoutEvaluationTestCases(unittest.TestCase):
+ def setUp(self):
+ self.test_case_data = [{"expected_output": "0 1 1 2 3"}]
+ self.timeout_msg = ("Code took more than {0} seconds to run. "
+ "You probably have an infinite loop"
+ " in your code.").format(SERVER_TIMEOUT)
+
+ def test_correct_answer(self):
+ user_answer = "a,b=0,1\nfor i in range(5):\n\tprint a,\n\ta,b=b,a+b"
+ get_class = PythonStdoutEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertEqual(result.get('error'), "Correct answer")
+ self.assertTrue(result.get('success'))
+
+ def test_incorrect_answer(self):
+ user_answer = "a,b=0,1\nfor i in range(5):\n\tprint b,\n\ta,b=b,a+b"
+ get_class = PythonStdoutEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertFalse(result.get('success'))
+ self.assertEqual(result.get('error'), "Incorrect Answer")
+
+ def test_direct_printed_answer(self):
+ user_answer = "print '0 1 1 2 3'"
+ error_msg = ("Incorrect Answer: Please avoid printing"
+ " the expected output directly"
+ )
+ get_class = PythonStdoutEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertFalse(result.get('success'))
+ self.assertEqual(result.get('error'), error_msg)
+
+ def test_infinite_loop(self):
+ user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
+ get_class = PythonStdoutEvaluator()
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertFalse(result.get('success'))
+ self.assertEqual(result.get('error'), 'Incorrect Answer')
+
+
if __name__ == '__main__':
- unittest.main() \ No newline at end of file
+ unittest.main()
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index 30af041..242f260 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -1,45 +1,60 @@
import unittest
import os
+from yaksh import code_evaluator as evaluator
from yaksh.scilab_code_evaluator import ScilabCodeEvaluator
from yaksh.settings import SERVER_TIMEOUT
class ScilabEvaluationTestCases(unittest.TestCase):
def setUp(self):
- self.language = "scilab"
- self.ref_code_path = "scilab_files/test_add.sce"
+ self.test_case_data = [{"test_case": "scilab_files/test_add.sce"}]
self.in_dir = "/tmp"
- self.test_case_data = []
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- self.test = None
+ "You probably have an infinite loop"
+ " in your code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
- user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\nendfunction"
- get_class = ScilabCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer,
- self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
- self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
+ user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
+ "\n\tc=a+b;\nendfunction")
+ get_class = ScilabCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertEquals(result.get('error'), "Correct answer")
+ self.assertTrue(result.get('success'))
def test_error(self):
- user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\ndis(\tendfunction"
- get_class = ScilabCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer,
- self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
-
+ user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
+ "\n\tc=a+b;\ndis(\tendfunction")
+ get_class = ScilabCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertTrue("error" in result.get("error"))
- def test_infinite_loop(self):
- user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a;\nwhile(1==1)\nend\nendfunction"
- get_class = ScilabCodeEvaluator(self.test_case_data, self.test,
- self.language, user_answer,
- self.ref_code_path, self.in_dir)
- result = get_class.evaluate()
+ def test_incorrect_answer(self):
+ user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
+ "\n\tc=a-b;\nendfunction")
+ get_class = ScilabCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
+ self.assertFalse(result.get('success'))
+ self.assertIn("Message", result.get('error'))
+ self.assertTrue(result.get('error').splitlines > 1)
+
+ def test_infinite_loop(self):
+ user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
+ "\n\tc=a;\nwhile(1==1)\nend\nendfunction")
+ get_class = ScilabCodeEvaluator(self.in_dir)
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': self.test_case_data
+ }
+ result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertEquals(result.get("error"), self.timeout_msg)
diff --git a/yaksh/forms.py b/yaksh/forms.py
index c5bec4c..808262b 100644
--- a/yaksh/forms.py
+++ b/yaksh/forms.py
@@ -1,8 +1,10 @@
from django import forms
-from yaksh.models import Profile, Quiz, Question, TestCase, Course
+from yaksh.models import get_model_class, Profile, Quiz, Question, TestCase, Course, StandardTestCase, StdoutBasedTestCase
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
+from django.contrib.contenttypes.models import ContentType
+
from taggit.managers import TaggableManager
from taggit.forms import TagField
from django.forms.models import inlineformset_factory
@@ -28,6 +30,12 @@ question_types = (
("upload", "Assignment Upload"),
)
+test_case_types = (
+ ("standardtestcase", "Standard Testcase"),
+ ("stdoutbasedtestcase", "Stdout Based Testcase"),
+ ("mcqtestcase", "MCQ Testcase"),
+ )
+
UNAME_CHARS = letters + "._" + digits
PWD_CHARS = letters + punctuation + digits
@@ -35,6 +43,14 @@ attempts = [(i, i) for i in range(1, 6)]
attempts.append((-1, 'Infinite'))
days_between_attempts = ((j, j) for j in range(401))
+def get_object_form(model, exclude_fields=None):
+ model_class = get_model_class(model)
+ class _ObjectForm(forms.ModelForm):
+ class Meta:
+ model = model_class
+ exclude = exclude_fields
+ return _ObjectForm
+
class UserRegisterForm(forms.Form):
"""A Class to create new form for User's Registration.
@@ -180,10 +196,6 @@ class QuestionFilterForm(forms.Form):
(choices=question_types))
-TestCaseFormSet = inlineformset_factory(Question, TestCase, fields='__all__',
- can_order=False, can_delete=False, extra=1)
-
-
class CourseForm(forms.ModelForm):
class Meta:
model = Course
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index 4367259..c64aa3b 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -12,48 +12,78 @@ from code_evaluator import CodeEvaluator
class JavaCodeEvaluator(CodeEvaluator):
"""Tests the Java code obtained from Code Server"""
- def __init__(self, test_case_data, test, language, user_answer,
- ref_code_path=None, in_dir=None):
- super(JavaCodeEvaluator, self).__init__(test_case_data, test,
- language, user_answer,
- ref_code_path, in_dir)
- self.test_case_args = self._setup()
-
- # Private Protocol ##########
- def _setup(self):
- super(JavaCodeEvaluator, self)._setup()
-
- ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path)
- self.submit_path = self.create_submit_code_file('Test.java')
-
- # Set file paths
- java_student_directory = os.getcwd() + '/'
- java_ref_file_name = (ref_path.split('/')[-1]).split('.')[0]
-
- # Set command variables
- compile_command = 'javac {0}'.format(self.submit_path),
- compile_main = ('javac {0} -classpath '
- '{1} -d {2}').format(ref_path,
- java_student_directory,
- java_student_directory)
- run_command_args = "java -cp {0} {1}".format(java_student_directory,
- java_ref_file_name)
- remove_user_output = "{0}{1}.class".format(java_student_directory,
- 'Test')
- remove_ref_output = "{0}{1}.class".format(java_student_directory,
- java_ref_file_name)
-
- return (ref_path, self.submit_path, compile_command, compile_main,
- run_command_args, remove_user_output, remove_ref_output)
-
- def _teardown(self):
+ def setup(self):
+ super(JavaCodeEvaluator, self).setup()
+ self.submit_code_path = self.create_submit_code_file('Test.java')
+ self.compiled_user_answer = None
+ self.compiled_test_code = None
+
+ def teardown(self):
+ super(JavaCodeEvaluator, self).teardown()
# Delete the created file.
- super(JavaCodeEvaluator, self)._teardown()
- os.remove(self.submit_path)
+ os.remove(self.submit_code_path)
- def _check_code(self, ref_code_path, submit_code_path, compile_command,
- compile_main, run_command_args, remove_user_output,
- remove_ref_output):
+ def get_commands(self, clean_ref_code_path, user_code_directory):
+ compile_command = 'javac {0}'.format(self.submit_code_path),
+ compile_main = ('javac {0} -classpath '
+ '{1} -d {2}').format(clean_ref_code_path,
+ user_code_directory,
+ user_code_directory)
+ return compile_command, compile_main
+
+ def set_file_paths(self, directory, file_name):
+ output_path = "{0}{1}.class".format(directory, file_name)
+ return output_path
+
+ def compile_code(self, user_answer, test_case):
+ if self.compiled_user_answer and self.compiled_test_code:
+ return None
+ else:
+ ref_code_path = test_case
+ clean_ref_code_path, clean_test_case_path = \
+ self._set_test_code_file_path(ref_code_path)
+
+ if not isfile(clean_ref_code_path):
+ msg = "No file at %s or Incorrect path" % clean_ref_code_path
+ return False, msg
+ if not isfile(self.submit_code_path):
+ msg = "No file at %s or Incorrect path" % self.submit_code_path
+ return False, msg
+
+ user_code_directory = os.getcwd() + '/'
+ self.write_to_submit_code_file(self.submit_code_path,
+ user_answer
+ )
+ ref_file_name = (clean_ref_code_path.split('/')[-1]).split('.')[0]
+ self.user_output_path = self.set_file_paths(user_code_directory,
+ 'Test'
+ )
+ self.ref_output_path = self.set_file_paths(user_code_directory,
+ ref_file_name
+ )
+ compile_command, self.compile_main = self.get_commands(
+ clean_ref_code_path,
+ user_code_directory
+ )
+ self.run_command_args = "java -cp {0} {1}".format(
+ user_code_directory,
+ ref_file_name
+ )
+ self.compiled_user_answer = self._run_command(compile_command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+
+ self.compiled_test_code = self._run_command(self.compile_main,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+
+ return self.compiled_user_answer, self.compiled_test_code
+
+ def check_code(self, user_answer, test_case):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -73,25 +103,18 @@ class JavaCodeEvaluator(CodeEvaluator):
if the required permissions are not given to the file(s).
"""
- if not isfile(ref_code_path):
- return False, "No file at %s or Incorrect path" % ref_code_path
- if not isfile(submit_code_path):
- return False, 'No file at %s or Incorrect path' % submit_code_path
-
success = False
- ret = self._compile_command(compile_command)
- proc, stdnt_stderr = ret
+ proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
# Only if compilation is successful, the program is executed
# And tested with testcases
if stdnt_stderr == '':
- ret = self._compile_command(compile_main)
- proc, main_err = ret
+ proc, main_out, main_err = self.compiled_test_code
main_err = self._remove_null_substitute_char(main_err)
if main_err == '':
- ret = self._run_command(run_command_args, shell=True,
+ ret = self._run_command(self.run_command_args, shell=True,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
@@ -100,7 +123,7 @@ class JavaCodeEvaluator(CodeEvaluator):
success, err = True, "Correct answer"
else:
err = stdout + "\n" + stderr
- os.remove(remove_ref_output)
+ os.remove(self.ref_output_path)
else:
err = "Error:"
try:
@@ -112,7 +135,7 @@ class JavaCodeEvaluator(CodeEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + main_err
- os.remove(remove_user_output)
+ os.remove(self.user_output_path)
else:
err = "Compilation Error:"
try:
diff --git a/yaksh/language_registry.py b/yaksh/language_registry.py
index 76a23d7..24aef7d 100644
--- a/yaksh/language_registry.py
+++ b/yaksh/language_registry.py
@@ -1,15 +1,26 @@
from settings import code_evaluators
import importlib
+import json
registry = None
-
-def set_registry():
- global registry
- registry = _LanguageRegistry()
def get_registry():
+ global registry
+ if registry is None:
+ registry = _LanguageRegistry()
return registry
+def unpack_json(json_data):
+ data = json.loads(json_data)
+ return data
+
+def create_evaluator_instance(language, test_case_type, json_data, in_dir):
+ """Create instance of relevant EvaluateCode class based on language"""
+ registry = get_registry()
+ cls = registry.get_class(language, test_case_type)
+ instance = cls(in_dir)
+ return instance
+
class _LanguageRegistry(object):
def __init__(self):
self._register = {}
@@ -17,20 +28,22 @@ class _LanguageRegistry(object):
self._register[language] = None
# Public Protocol ##########
- def get_class(self, language):
+ def get_class(self, language, test_case_type):
""" Get the code evaluator class for the given language """
if not self._register.get(language):
self._register[language] = code_evaluators.get(language)
- cls = self._register[language]
+ test_case_register = self._register[language]
+ cls = test_case_register.get(test_case_type)
module_name, class_name = cls.rsplit(".", 1)
+ import yaksh.python_assertion_evaluator
# load the module, will raise ImportError if module cannot be loaded
get_module = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
get_class = getattr(get_module, class_name)
return get_class
- def register(self, language, class_name):
+ def register(self, language, class_names):
""" Register a new code evaluator class for language"""
- self._register[language] = class_name
+ self._register[language] = class_names
diff --git a/yaksh/models.py b/yaksh/models.py
index 6e59d7a..32fb0d0 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -5,24 +5,11 @@ from itertools import islice, cycle
from collections import Counter
from django.db import models
from django.contrib.auth.models import User
+from django.forms.models import model_to_dict
+from django.contrib.contenttypes.models import ContentType
from taggit.managers import TaggableManager
-###############################################################################
-class ConcurrentUser(models.Model):
- concurrent_user = models.OneToOneField(User, null=False)
- session_key = models.CharField(null=False, max_length=40)
-
-
-###############################################################################
-class Profile(models.Model):
- """Profile for a user to store roll number and other details."""
- user = models.OneToOneField(User)
- roll_number = models.CharField(max_length=20)
- institute = models.CharField(max_length=128)
- department = models.CharField(max_length=64)
- position = models.CharField(max_length=64)
-
languages = (
("python", "Python"),
("bash", "Bash"),
@@ -45,6 +32,12 @@ enrollment_methods = (
("open", "Open Course"),
)
+test_case_types = (
+ ("standardtestcase", "Standard Testcase"),
+ ("stdoutbasedtestcase", "Stdout Based Testcase"),
+ ("mcqtestcase", "MCQ Testcase"),
+ )
+
attempts = [(i, i) for i in range(1, 6)]
attempts.append((-1, 'Infinite'))
days_between_attempts = ((j, j) for j in range(401))
@@ -54,10 +47,15 @@ test_status = (
('completed', 'Completed'),
)
-
def get_assignment_dir(instance, filename):
return '%s/%s' % (instance.user.roll_number, instance.assignmentQuestion.id)
+def get_model_class(model):
+ ctype = ContentType.objects.get(app_label="yaksh", model=model)
+ model_class = ctype.model_class()
+
+ return model_class
+
###############################################################################
class Course(models.Model):
@@ -132,6 +130,21 @@ class Course(models.Model):
def __unicode__(self):
return self.name
+###############################################################################
+class ConcurrentUser(models.Model):
+ concurrent_user = models.OneToOneField(User, null=False)
+ session_key = models.CharField(null=False, max_length=40)
+
+
+###############################################################################
+class Profile(models.Model):
+ """Profile for a user to store roll number and other details."""
+ user = models.OneToOneField(User)
+ roll_number = models.CharField(max_length=20)
+ institute = models.CharField(max_length=128)
+ department = models.CharField(max_length=64)
+ position = models.CharField(max_length=64)
+
###############################################################################
class Question(models.Model):
@@ -146,16 +159,6 @@ class Question(models.Model):
# Number of points for the question.
points = models.FloatField(default=1.0)
- # Answer for MCQs.
- test = models.TextField(blank=True)
-
- # Test cases file paths (comma seperated for reference code path and test case code path)
- # Applicable for CPP, C, Java and Scilab
- ref_code_path = models.TextField(blank=True)
-
- # Any multiple choice options. Place one option per line.
- options = models.TextField(blank=True)
-
# The language for question.
language = models.CharField(max_length=24,
choices=languages)
@@ -163,64 +166,49 @@ class Question(models.Model):
# The type of question.
type = models.CharField(max_length=24, choices=question_types)
+ # The type of evaluator
+ test_case_type = models.CharField(max_length=24, choices=test_case_types)
+
# Is this question active or not. If it is inactive it will not be used
# when creating a QuestionPaper.
active = models.BooleanField(default=True)
- # Snippet of code provided to the user.
- snippet = models.CharField(max_length=256, blank=True)
-
# Tags for the Question.
tags = TaggableManager(blank=True)
+ # Snippet of code provided to the user.
+ snippet = models.CharField(max_length=256, blank=True)
+
# user for particular question
user = models.ForeignKey(User, related_name="user")
- def consolidate_answer_data(self, test_cases, user_answer):
- test_case_data_dict = []
- question_info_dict = {}
-
- for test_case in test_cases:
- kw_args_dict = {}
- pos_args_list = []
-
- test_case_data = {}
- test_case_data['test_id'] = test_case.id
- test_case_data['func_name'] = test_case.func_name
- test_case_data['expected_answer'] = test_case.expected_answer
-
- if test_case.kw_args:
- for args in test_case.kw_args.split(","):
- arg_name, arg_value = args.split("=")
- kw_args_dict[arg_name.strip()] = arg_value.strip()
+ def consolidate_answer_data(self, user_answer):
+ question_data = {}
+ test_case_data = []
- if test_case.pos_args:
- for args in test_case.pos_args.split(","):
- pos_args_list.append(args.strip())
+ test_cases = self.get_test_cases()
- test_case_data['kw_args'] = kw_args_dict
- test_case_data['pos_args'] = pos_args_list
- test_case_data_dict.append(test_case_data)
+ for test in test_cases:
+ test_case_as_dict = test.get_field_value()
+ test_case_data.append(test_case_as_dict)
- # question_info_dict['language'] = self.language
- question_info_dict['id'] = self.id
- question_info_dict['user_answer'] = user_answer
- question_info_dict['test_parameter'] = test_case_data_dict
- question_info_dict['ref_code_path'] = self.ref_code_path
- question_info_dict['test'] = self.test
+ question_data['test_case_data'] = test_case_data
+ question_data['user_answer'] = user_answer
- return json.dumps(question_info_dict)
+ return json.dumps(question_data)
def dump_into_json(self, question_ids, user):
questions = Question.objects.filter(id__in = question_ids, user_id = user.id)
questions_dict = []
for question in questions:
- q_dict = {'summary': question.summary, 'description': question.description,
- 'points': question.points, 'test': question.test,
- 'ref_code_path': question.ref_code_path,
- 'options': question.options, 'language': question.language,
- 'type': question.type, 'active': question.active,
- 'snippet': question.snippet}
+ q_dict = {'summary': question.summary,
+ 'description': question.description,
+ 'points': question.points,
+ 'language': question.language,
+ 'type': question.type,
+ 'active': question.active,
+ 'test_case_type': question.test_case_type,
+ 'snippet': question.snippet}
questions_dict.append(q_dict)
return json.dumps(questions_dict, indent=2)
@@ -231,6 +219,28 @@ class Question(models.Model):
question['user'] = user
Question.objects.get_or_create(**question)
+ def get_test_cases(self, **kwargs):
+ test_case_ctype = ContentType.objects.get(app_label="yaksh",
+ model=self.test_case_type
+ )
+ test_cases = test_case_ctype.get_all_objects_for_this_type(
+ question=self,
+ **kwargs
+ )
+
+ return test_cases
+
+ def get_test_case(self, **kwargs):
+ test_case_ctype = ContentType.objects.get(app_label="yaksh",
+ model=self.test_case_type
+ )
+ test_case = test_case_ctype.get_object_for_this_type(
+ question=self,
+ **kwargs
+ )
+
+ return test_case
+
def __unicode__(self):
return self.summary
@@ -371,10 +381,13 @@ class QuestionPaper(models.Model):
def make_answerpaper(self, user, ip, attempt_num):
"""Creates an answer paper for the user to attempt the quiz"""
- ans_paper = AnswerPaper(user=user, user_ip=ip, attempt_number=attempt_num)
+ ans_paper = AnswerPaper(user=user,
+ user_ip=ip,
+ attempt_number=attempt_num
+ )
ans_paper.start_time = datetime.now()
- ans_paper.end_time = ans_paper.start_time \
- + timedelta(minutes=self.quiz.duration)
+ ans_paper.end_time = ans_paper.start_time + \
+ timedelta(minutes=self.quiz.duration)
ans_paper.question_paper = self
ans_paper.save()
questions = self._get_questions_for_answerpaper()
@@ -746,14 +759,38 @@ class AssignmentUpload(models.Model):
class TestCase(models.Model):
question = models.ForeignKey(Question, blank=True, null = True)
- # Test case function name
- func_name = models.CharField(blank=True, null = True, max_length=200)
+class StandardTestCase(TestCase):
+ test_case = models.TextField(blank=True)
+
+ def get_field_value(self):
+ return {"test_case": self.test_case}
+
+ def __unicode__(self):
+ return u'Question: {0} | Test Case: {1}'.format(self.question,
+ self.test_case
+ )
+
- # Test case Keyword arguments in dict form
- kw_args = models.TextField(blank=True, null = True)
+class StdoutBasedTestCase(TestCase):
+ expected_output = models.TextField(blank=True)
- # Test case Positional arguments in list form
- pos_args = models.TextField(blank=True, null = True)
+ def get_field_value(self):
+ return {"expected_output": self.expected_output}
- # Test case Expected answer in list form
- expected_answer = models.TextField(blank=True, null = True)
+ def __unicode__(self):
+ return u'Question: {0} | Exp. Output: {1}'.format(self.question,
+ self.expected_output
+ )
+
+
+class McqTestCase(TestCase):
+ options = models.TextField()
+ correct = models.BooleanField(default=False)
+
+ def get_field_value(self):
+ return {"options": self.options, "correct": self.correct}
+
+ def __unicode__(self):
+ return u'Question: {0} | Correct: {1}'.format(self.question,
+ self.correct
+ )
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
new file mode 100644
index 0000000..bf6a4be
--- /dev/null
+++ b/yaksh/python_assertion_evaluator.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+import sys
+import traceback
+import os
+from os.path import join
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator, TimeoutException
+
+
+class PythonAssertionEvaluator(CodeEvaluator):
+ """Tests the Python code obtained from Code Server"""
+
+ def setup(self):
+ super(PythonAssertionEvaluator, self).setup()
+ self.exec_scope = None
+
+ def compile_code(self, user_answer, test_case):
+ if self.exec_scope:
+ return None
+ else:
+ submitted = compile(user_answer, '<string>', mode='exec')
+ self.exec_scope = {}
+ exec submitted in self.exec_scope
+ return self.exec_scope
+
+ def check_code(self, user_answer, test_case):
+ success = False
+ try:
+ tb = None
+ _tests = compile(test_case, '<string>', mode='exec')
+ exec _tests in self.exec_scope
+ except AssertionError:
+ type, value, tb = sys.exc_info()
+ info = traceback.extract_tb(tb)
+ fname, lineno, func, text = info[-1]
+ text = str(test_case).splitlines()[lineno-1]
+ err = "{0} {1} in: {2}".format(type.__name__, str(value), text)
+ except TimeoutException:
+ raise
+ else:
+ success = True
+ err = 'Correct answer'
+
+ del tb
+ return success, err
diff --git a/yaksh/python_code_evaluator.py b/yaksh/python_code_evaluator.py
deleted file mode 100644
index c87c420..0000000
--- a/yaksh/python_code_evaluator.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-import sys
-import traceback
-import os
-from os.path import join
-import importlib
-
-# local imports
-from code_evaluator import CodeEvaluator, TimeoutException
-
-
-class PythonCodeEvaluator(CodeEvaluator):
- """Tests the Python code obtained from Code Server"""
- # Private Protocol ##########
- def _check_code(self):
- success = False
-
- try:
- tb = None
- test_code = self._create_test_case()
- submitted = compile(self.user_answer, '<string>', mode='exec')
- g = {}
- exec submitted in g
- _tests = compile(test_code, '<string>', mode='exec')
- exec _tests in g
- except AssertionError:
- type, value, tb = sys.exc_info()
- info = traceback.extract_tb(tb)
- fname, lineno, func, text = info[-1]
- text = str(test_code).splitlines()[lineno-1]
- err = "{0} {1} in: {2}".format(type.__name__, str(value), text)
- except TimeoutException:
- raise
- except Exception:
- err = traceback.format_exc(limit=0)
- else:
- success = True
- err = 'Correct answer'
-
- del tb
- return success, err
-
- def _create_test_case(self):
- """
- Create assert based test cases in python
- """
- test_code = ""
- if self.test:
- return self.test
- elif self.test_case_data:
- for test_case in self.test_case_data:
- pos_args = ", ".join(str(i) for i in test_case.get('pos_args')) \
- if test_case.get('pos_args') else ""
- kw_args = ", ".join(str(k+"="+a) for k, a
- in test_case.get('kw_args').iteritems()) \
- if test_case.get('kw_args') else ""
- args = pos_args + ", " + kw_args if pos_args and kw_args \
- else pos_args or kw_args
- function_name = test_case.get('func_name')
- expected_answer = test_case.get('expected_answer')
-
- tcode = "assert {0}({1}) == {2}".format(function_name, args,
- expected_answer)
- test_code += tcode + "\n"
- return test_code
diff --git a/yaksh/python_stdout_evaluator.py b/yaksh/python_stdout_evaluator.py
new file mode 100644
index 0000000..6606581
--- /dev/null
+++ b/yaksh/python_stdout_evaluator.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+import sys
+import traceback
+import os
+from os.path import join
+import importlib
+from contextlib import contextmanager
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+@contextmanager
+def redirect_stdout():
+ from StringIO import StringIO
+ new_target = StringIO()
+
+ old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout
+ try:
+ yield new_target # run some code with the replaced stdout
+ finally:
+ sys.stdout = old_target # restore to the previous value
+
+
+class PythonStdoutEvaluator(CodeEvaluator):
+ """Tests the Python code obtained from Code Server"""
+
+ def compile_code(self, user_answer, expected_output):
+ if hasattr(self, 'output_value'):
+ return None
+ else:
+ submitted = compile(user_answer, '<string>', mode='exec')
+ with redirect_stdout() as output_buffer:
+ exec_scope = {}
+ exec submitted in exec_scope
+ self.output_value = output_buffer.getvalue()
+ return self.output_value
+
+ def check_code(self, user_answer, expected_output):
+ success = False
+
+ tb = None
+ if expected_output in user_answer:
+ success = False
+ err = ("Incorrect Answer: Please avoid "
+ "printing the expected output directly"
+ )
+ elif self.output_value == expected_output:
+ success = True
+ err = "Correct answer"
+
+ else:
+ success = False
+ err = "Incorrect Answer"
+
+ del tb
+ return success, err
+
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index 392cd45..91b4cb3 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -12,47 +12,42 @@ from code_evaluator import CodeEvaluator
class ScilabCodeEvaluator(CodeEvaluator):
"""Tests the Scilab code obtained from Code Server"""
- def __init__(self, test_case_data, test, language, user_answer,
- ref_code_path=None, in_dir=None):
- super(ScilabCodeEvaluator, self).__init__(test_case_data, test,
- language, user_answer,
- ref_code_path, in_dir)
+ def setup(self):
+ super(ScilabCodeEvaluator, self).setup()
+ self.submit_code_path = \
+ self.create_submit_code_file('function.sci')
- # Removes all the commands that terminates scilab
- self.user_answer, self.terminate_commands = self._remove_scilab_exit(user_answer.lstrip())
- self.test_case_args = self._setup()
-
- # Private Protocol ##########
- def _setup(self):
- super(ScilabCodeEvaluator, self)._setup()
-
- ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path)
- self.submit_path = self.create_submit_code_file('function.sci')
-
- return ref_path, # Return as a tuple
-
- def _teardown(self):
+ def teardown(self):
+ super(ScilabCodeEvaluator, self).teardown()
# Delete the created file.
- super(ScilabCodeEvaluator, self)._teardown()
- os.remove(self.submit_path)
+ os.remove(self.submit_code_path)
- def _check_code(self, ref_path):
- success = False
+ def check_code(self, user_answer, test_case):
+ ref_code_path = test_case
+ clean_ref_path, clean_test_case_path = \
+ self._set_test_code_file_path(ref_code_path)
+ user_answer, terminate_commands = \
+ self._remove_scilab_exit(user_answer.lstrip())
+ success = False
+ self.write_to_submit_code_file(self.submit_code_path, user_answer)
# Throw message if there are commmands that terminates scilab
- add_err=""
- if self.terminate_commands:
+ add_err = ""
+ if terminate_commands:
add_err = "Please do not use exit, quit and abort commands in your\
code.\n Otherwise your code will not be evaluated\
correctly.\n"
- cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(ref_path)
+ cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(
+ clean_ref_path
+ )
cmd += ' | timeout 8 scilab-cli -nb'
ret = self._run_command(cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdout, stderr = ret
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ proc, stdout, stderr = ret
# Get only the error.
stderr = self._get_error(stdout)
diff --git a/yaksh/settings.py b/yaksh/settings.py
index 63bd875..70e5471 100644
--- a/yaksh/settings.py
+++ b/yaksh/settings.py
@@ -20,10 +20,12 @@ SERVER_TIMEOUT = 2
URL_ROOT = ''
code_evaluators = {
- "python": "python_code_evaluator.PythonCodeEvaluator",
- "c": "cpp_code_evaluator.CppCodeEvaluator",
- "cpp": "cpp_code_evaluator.CppCodeEvaluator",
- "java": "java_code_evaluator.JavaCodeEvaluator",
- "bash": "bash_code_evaluator.BashCodeEvaluator",
- "scilab": "scilab_code_evaluator.ScilabCodeEvaluator",
- }
+ "python": {"standardtestcase": "python_assertion_evaluator.PythonAssertionEvaluator",
+ "stdoutbasedtestcase": "python_stdout_evaluator.PythonStdoutEvaluator"
+ },
+ "c": {"standardtestcase": "cpp_code_evaluator.CppCodeEvaluator"},
+ "cpp": {"standardtestcase": "cpp_code_evaluator.CppCodeEvaluator"},
+ "java": {"standardtestcase": "java_code_evaluator.JavaCodeEvaluator"},
+ "bash": {"standardtestcase": "bash_code_evaluator.BashCodeEvaluator"},
+ "scilab": {"standardtestcase": "scilab_code_evaluator.ScilabCodeEvaluator"},
+}
diff --git a/yaksh/static/yaksh/js/add_question.js b/yaksh/static/yaksh/js/add_question.js
index 946c139..e435dde 100644
--- a/yaksh/static/yaksh/js/add_question.js
+++ b/yaksh/static/yaksh/js/add_question.js
@@ -109,34 +109,6 @@ function textareaformat()
document.getElementById('my').innerHTML = document.getElementById('id_description').value ;
});
- $('#id_test').bind('focus', function( event ){
- document.getElementById("id_test").rows=5;
- document.getElementById("id_test").cols=40;
- });
-
- $('#id_test').bind('blur', function( event ){
- document.getElementById("id_test").rows=1;
- document.getElementById("id_test").cols=40;
- });
-
- $('#id_options').bind('focus', function( event ){
- document.getElementById("id_options").rows=5;
- document.getElementById("id_options").cols=40;
- });
- $('#id_options').bind('blur', function( event ){
- document.getElementById("id_options").rows=1;
- document.getElementById("id_options").cols=40;
- });
-
- $('#id_snippet').bind('focus', function( event ){
- document.getElementById("id_snippet").rows=5;
- document.getElementById("id_snippet").cols=40;
- });
- $('#id_snippet').bind('blur', function( event ){
- document.getElementById("id_snippet").rows=1;
- document.getElementById("id_snippet").cols=40;
- });
-
$('#id_type').bind('focus', function(event){
var type = document.getElementById('id_type');
@@ -147,32 +119,7 @@ function textareaformat()
var language = document.getElementById('id_language');
language.style.border = '1px solid #ccc';
});
-
- $('#id_type').bind('change',function(event){
- var value = document.getElementById('id_type').value;
- if(value == 'mcq' || value == 'mcc')
- {
- document.getElementById('id_options').style.visibility='visible';
- document.getElementById('label_option').innerHTML="Options :";
- }
- else
- {
- document.getElementById('id_options').style.visibility='hidden';
- document.getElementById('label_option').innerHTML = "";
- }
- });
- document.getElementById('my').innerHTML = document.getElementById('id_description').value ;
- var value = document.getElementById('id_type').value;
- if(value == 'mcq' || value == 'mcc')
- {
- document.getElementById('id_options').style.visibility='visible';
- document.getElementById('label_option').innerHTML="Options :"
- }
- else
- {
- document.getElementById('id_options').style.visibility='hidden';
- document.getElementById('label_option').innerHTML = "";
- }
+ document.getElementById('my').innerHTML = document.getElementById('id_description').value ;
}
function autosubmit()
@@ -190,16 +137,4 @@ function autosubmit()
return false;
}
-
- if (type.value == 'mcq' || type.value == 'mcc')
- {
- var value = document.getElementById('id_options').value;
- if(value.split('\n').length < 4)
- {
- alert("Please Enter 4 options. One option per line.");
- return false;
- }
- return true;
- }
-
}
diff --git a/yaksh/static/yaksh/js/show_testcase.js b/yaksh/static/yaksh/js/show_testcase.js
new file mode 100644
index 0000000..71be9dc
--- /dev/null
+++ b/yaksh/static/yaksh/js/show_testcase.js
@@ -0,0 +1,24 @@
+function confirm_delete(frm)
+{
+ var n=0;
+ test_case = document.getElementsByName('test_case');
+ for (var i =0;i<test_case.length;i++)
+ {
+ if (test_case[i].checked == false)
+ n = n + 1 ;
+ }
+ if(n==test_case.length)
+ {
+ alert("Please Select at least one test case");
+ return false;
+ }
+ var r = confirm("Are you Sure ?");
+ if(r==false)
+ {
+ for(i=0;i<test_case.length;i++)
+ {
+ test_case[i].checked=false;
+ }
+ return false;
+ }
+} \ No newline at end of file
diff --git a/yaksh/templates/yaksh/add_question.html b/yaksh/templates/yaksh/add_question.html
index 61b146c..d38aa1c 100644
--- a/yaksh/templates/yaksh/add_question.html
+++ b/yaksh/templates/yaksh/add_question.html
@@ -1,6 +1,5 @@
{% extends "manage.html" %}
-
{% block subtitle %}Add Question{% endblock %}
{% block css %}
@@ -25,25 +24,24 @@
<tr><td>Points:<td><button class="btn-mini" type="button" onClick="increase(frm);">+</button>{{ form.points }}<button class="btn-mini" type="button" onClick="decrease(frm);">-</button>{{ form.points.errors }}
<tr><td><strong>Rendered: </strong><td><p id='my'></p>
<tr><td>Description: <td>{{ form.description}} {{form.description.errors}}
- <tr><td>Snippet: <td>{{ form.snippet }}{{ form.snippet.errors }}</td></tD></td></tr>
<tr><td>Tags: <td>{{ form.tags }}
- <tr><td id='label_option'>Options: <td>{{ form.options }} {{form.options.errors}}
- <tr><td id='label_solution'>Test: <td>{{ form.test }} {{form.test.errors}}
- <tr><td id='label_ref_code_path'>Reference Code Path: <td>{{ form.ref_code_path }} {{form.ref_code_path.errors}}
-
- <form method="post" action="">
- {% if formset%}
- {{ formset.management_form }}
- {% for form in formset %}
- {{ form }}
- {% endfor %}
- {% endif %}
- </form>
+ <tr><td> Test Case Type: <td> {{ form.test_case_type }}{{ form.test_case_type.errors }}
+
+ <div class="form-group">
+ {{ test_case_formset.management_form }}
+
+ {% for form in test_case_formset %}
+ <div class="link-formset">
+ {{ form }}
+ </div>
+ {% endfor %}
+
+ </div>
+
</table></center>
- <center><button class="btn" type="submit" name="add_test">Add Test Case</button>
- <button class="btn" type="submit" name="delete_test">Remove Test Case</button>
- </center><br>
- <center><button class="btn" type="submit" name="save_question">Save</button>
- <button class="btn" type="button" name="button" onClick='location.replace("{{URL_ROOT}}/exam/manage/questions/");'>Cancel</button> </center>
+ <center>
+ <button class="btn" type="submit" name="save_question">Save & Add Testcase</button>
+ <button class="btn" type="button" name="button" onClick='location.replace("{{URL_ROOT}}/exam/manage/questions/");'>Back to Questions</button>
+ </center>
</form>
{% endblock %}
diff --git a/yaksh/templates/yaksh/question.html b/yaksh/templates/yaksh/question.html
index e542fe9..8a67818 100644
--- a/yaksh/templates/yaksh/question.html
+++ b/yaksh/templates/yaksh/question.html
@@ -165,8 +165,14 @@ function call_skip(url)
<input type=hidden name="question_id" id="question_id" value={{ question.id }}></input>
{% if question.type == "mcq" %}
- {% for option in question.options.strip.splitlines %}
- <input name="answer" type="radio" value="{{option}}" />{{option}} <br/>
+ {% for test_case in test_cases %}
+ <input name="answer" type="radio" value="{{ test_case.options }}" />{{ test_case.options }} <br/>
+ {% endfor %}
+ {% endif %}
+ {% if question.type == "mcc" %}
+ {% for test_case in test_cases %}
+ <input name="answer" type="checkbox" value="{{ test_case.options }}"> {{ test_case.options }}
+ <br>
{% endfor %}
{% endif %}
{% if question.type == "upload" %}
@@ -174,12 +180,6 @@ function call_skip(url)
<input type=file id="assignment" name="assignment">
<hr>
{% endif %}
- {% if question.type == "mcc" %}
- {% for option in question.options.strip.splitlines %}
- <input name="answer" type="checkbox" value="{{ option }}"> {{ option }}
- <br>
- {% endfor %}
- {% endif %}
{% if question.type == "code" %}
<h3>Program:</h3>
<textarea rows="1" class="bash" readonly="yes" name="snippet" id="snippet" wrap="off" >{% if last_attempt %}{{ question.snippet }}{% else %}{% if question.type == "bash" %} #!/bin/bash&#13;&#10;{{ question.snippet }}{% else %}{{ question.snippet }}{% endif %}{% endif %}</textarea>
@@ -204,7 +204,7 @@ function call_skip(url)
{% endif %}
</form>
</div>
-
+
<!-- Modal -->
<div class="modal fade " id="upload_alert" >
<div class="modal-dialog">
diff --git a/yaksh/tests.py b/yaksh/tests.py
index 58b8518..f2083dd 100644
--- a/yaksh/tests.py
+++ b/yaksh/tests.py
@@ -1,6 +1,7 @@
import unittest
from yaksh.models import User, Profile, Question, Quiz, QuestionPaper,\
- QuestionSet, AnswerPaper, Answer, TestCase, Course
+ QuestionSet, AnswerPaper, Answer, Course, StandardTestCase,\
+ StdoutBasedTestCase
import json
from datetime import datetime, timedelta
from django.contrib.auth.models import Group
@@ -74,40 +75,38 @@ class QuestionTestCases(unittest.TestCase):
# Single question details
self.user1 = User.objects.get(pk=1)
self.user2 = User.objects.get(pk=2)
- self.question1 = Question(summary='Demo question', language='Python',
- type='Code', active=True,
- description='Write a function', points=1.0,
- snippet='def myfunc()', user=self.user1)
+ self.question1 = Question(summary='Demo question',
+ language='Python',
+ type='Code',
+ active=True,
+ test_case_type='standardtestcase',
+ description='Write a function',
+ points=1.0,
+ snippet='def myfunc()',
+ user=self.user1
+ )
self.question1.save()
- self.question2 = Question(summary='Demo Json', language='python',
- type='code', active=True,
- description='factorial of a no', points=2.0,
- snippet='def fact()', user=self.user2)
+ self.question2 = Question(summary='Demo Json',
+ language='python',
+ type='code',
+ active=True,
+ description='factorial of a no',
+ points=2.0,
+ snippet='def fact()',
+ user=self.user2
+ )
self.question2.save()
self.question1.tags.add('python', 'function')
- self.testcase = TestCase(question=self.question1,
- func_name='def myfunc', kw_args='a=10,b=11',
- pos_args='12,13', expected_answer='15')
- answer_data = { "test": "",
- "user_answer": "demo_answer",
- "test_parameter": [{"func_name": "def myfunc",
- "expected_answer": "15",
- "test_id": self.testcase.id,
- "pos_args": ["12", "13"],
- "kw_args": {"a": "10",
- "b": "11"}
- }],
- "id": self.question1.id,
- "ref_code_path": "",
- }
- self.answer_data_json = json.dumps(answer_data)
+ self.assertion_testcase = StandardTestCase(question=self.question1,
+ test_case='assert myfunc(12, 13) == 15'
+ )
self.user_answer = "demo_answer"
questions_data = [{"snippet": "def fact()", "active": True, "points": 1.0,
- "ref_code_path": "", "description": "factorial of a no",
- "language": "Python", "test": "", "type": "Code",
- "options": "", "summary": "Json Demo"}]
+ "description": "factorial of a no",
+ "language": "Python", "type": "Code",
+ "summary": "Json Demo"}]
self.json_questions_data = json.dumps(questions_data)
def test_question(self):
@@ -115,7 +114,6 @@ class QuestionTestCases(unittest.TestCase):
self.assertEqual(self.question1.summary, 'Demo question')
self.assertEqual(self.question1.language, 'Python')
self.assertEqual(self.question1.type, 'Code')
- self.assertFalse(self.question1.options)
self.assertEqual(self.question1.description, 'Write a function')
self.assertEqual(self.question1.points, 1.0)
self.assertTrue(self.question1.active)
@@ -125,12 +123,6 @@ class QuestionTestCases(unittest.TestCase):
tag_list.append(tag.name)
self.assertEqual(tag_list, ['python', 'function'])
- def test_consolidate_answer_data(self):
- """ Test consolidate_answer_data function """
- result = self.question1.consolidate_answer_data([self.testcase],
- self.user_answer)
- self.assertEqual(result, self.answer_data_json)
-
def test_dump_questions_into_json(self):
""" Test dump questions into json """
question = Question()
@@ -149,7 +141,7 @@ class QuestionTestCases(unittest.TestCase):
""" Test load questions into database from json """
question = Question()
result = question.load_from_json(self.json_questions_data, self.user1)
- question_data = Question.objects.get(pk=27)
+ question_data = Question.objects.get(pk=25)
self.assertEqual(question_data.summary, 'Json Demo')
self.assertEqual(question_data.language, 'Python')
self.assertEqual(question_data.type, 'Code')
@@ -159,28 +151,6 @@ class QuestionTestCases(unittest.TestCase):
self.assertEqual(question_data.snippet, 'def fact()')
###############################################################################
-class TestCaseTestCases(unittest.TestCase):
- def setUp(self):
- self.user = User.objects.get(pk=1)
- self.question = Question(summary='Demo question', language='Python',
- type='Code', active=True,
- description='Write a function', points=1.0,
- snippet='def myfunc()', user=self.user)
- self.question.save()
- self.testcase = TestCase(question=self.question,
- func_name='def myfunc', kw_args='a=10,b=11',
- pos_args='12,13', expected_answer='15')
-
- def test_testcase(self):
- """ Test question """
- self.assertEqual(self.testcase.question, self.question)
- self.assertEqual(self.testcase.func_name, 'def myfunc')
- self.assertEqual(self.testcase.kw_args, 'a=10,b=11')
- self.assertEqual(self.testcase.pos_args, '12,13')
- self.assertEqual(self.testcase.expected_answer, '15')
-
-
-###############################################################################
class QuizTestCases(unittest.TestCase):
def setUp(self):
self.quiz1 = Quiz.objects.get(pk=1)
@@ -223,7 +193,9 @@ class QuestionPaperTestCases(unittest.TestCase):
# create question paper
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
- total_marks=0.0, shuffle_questions=True)
+ total_marks=0.0,
+ shuffle_questions=True
+ )
# add fixed set of questions to the question paper
self.question_paper.fixed_questions.add(self.questions[3],
@@ -231,23 +203,29 @@ class QuestionPaperTestCases(unittest.TestCase):
# create two QuestionSet for random questions
# QuestionSet 1
self.question_set_1 = QuestionSet.objects.create(marks=2,
- num_questions=2)
+ num_questions=2
+ )
# add pool of questions for random sampling
- self.question_set_1.questions.add(self.questions[6], self.questions[7],
- self.questions[8], self.questions[9])
+ self.question_set_1.questions.add(self.questions[6],
+ self.questions[7],
+ self.questions[8],
+ self.questions[9]
+ )
# add question set 1 to random questions in Question Paper
self.question_paper.random_questions.add(self.question_set_1)
# QuestionSet 2
self.question_set_2 = QuestionSet.objects.create(marks=3,
- num_questions=3)
+ num_questions=3
+ )
# add pool of questions
self.question_set_2.questions.add(self.questions[11],
- self.questions[12],
- self.questions[13],
- self.questions[14])
+ self.questions[12],
+ self.questions[13],
+ self.questions[14]
+ )
# add question set 2
self.question_paper.random_questions.add(self.question_set_2)
@@ -256,8 +234,10 @@ class QuestionPaperTestCases(unittest.TestCase):
self.user = User.objects.get(pk=1)
- self.attempted_papers = AnswerPaper.objects.filter(question_paper=self.question_paper,
- user=self.user)
+ self.attempted_papers = AnswerPaper.objects.filter(
+ question_paper=self.question_paper,
+ user=self.user
+ )
def test_questionpaper(self):
""" Test question paper"""
@@ -276,7 +256,8 @@ class QuestionPaperTestCases(unittest.TestCase):
""" Test get_random_questions() method of Question Paper"""
random_questions_set_1 = self.question_set_1.get_random_questions()
random_questions_set_2 = self.question_set_2.get_random_questions()
- total_random_questions = len(random_questions_set_1 + random_questions_set_2)
+ total_random_questions = len(random_questions_set_1 + \
+ random_questions_set_2)
self.assertEqual(total_random_questions, 5)
# To check whether random questions are from random_question_set
@@ -327,12 +308,15 @@ class AnswerPaperTestCases(unittest.TestCase):
# create answerpaper
self.answerpaper = AnswerPaper(user=self.user,
- question_paper=self.question_paper,
- start_time=self.start_time,
- end_time=self.end_time,
- user_ip=self.ip)
- self.attempted_papers = AnswerPaper.objects.filter(question_paper=self.question_paper,
- user=self.user)
+ question_paper=self.question_paper,
+ start_time=self.start_time,
+ end_time=self.end_time,
+ user_ip=self.ip
+ )
+ self.attempted_papers = AnswerPaper.objects.filter(
+ question_paper=self.question_paper,
+ user=self.user
+ )
already_attempted = self.attempted_papers.count()
self.answerpaper.attempt_number = already_attempted + 1
self.answerpaper.save()
@@ -341,9 +325,14 @@ class AnswerPaperTestCases(unittest.TestCase):
self.answerpaper.save()
# answers for the Answer Paper
self.answer_right = Answer(question=Question.objects.get(id=1),
- answer="Demo answer", correct=True, marks=1)
+ answer="Demo answer",
+ correct=True, marks=1
+ )
self.answer_wrong = Answer(question=Question.objects.get(id=2),
- answer="My answer", correct=False, marks=0)
+ answer="My answer",
+ correct=False,
+ marks=0
+ )
self.answer_right.save()
self.answer_wrong.save()
self.answerpaper.answers.add(self.answer_right)
@@ -489,12 +478,14 @@ class CourseTestCases(unittest.TestCase):
def test_get_quizzes(self):
""" Test get_quizzes method of Courses"""
- self.assertSequenceEqual(self.course.get_quizzes(), [self.quiz1, self.quiz2])
+ self.assertSequenceEqual(self.course.get_quizzes(),
+ [self.quiz1, self.quiz2])
def test_add_teachers(self):
""" Test to add teachers to a course"""
self.course.add_teachers(self.student1, self.student2)
- self.assertSequenceEqual(self.course.get_teachers(), [self.student1, self.student2])
+ self.assertSequenceEqual(self.course.get_teachers(),
+ [self.student1, self.student2])
def test_remove_teachers(self):
""" Test to remove teachers from a course"""
@@ -507,3 +498,67 @@ class CourseTestCases(unittest.TestCase):
self.course.add_teachers(self.student2)
result = self.course.is_teacher(self.student2)
self.assertTrue(result)
+
+
+###############################################################################
+class TestCaseTestCases(unittest.TestCase):
+ def setUp(self):
+ self.user = User.objects.get(pk=1)
+ self.question1 = Question(summary='Demo question 1',
+ language='Python',
+ type='Code',
+ active=True,
+ description='Write a function',
+ points=1.0,
+ test_case_type="standardtestcase",
+ user=self.user,
+ snippet='def myfunc()'
+ )
+ self.question2 = Question(summary='Demo question 2',
+ language='Python',
+ type='Code',
+ active=True,
+ description='Write to standard output',
+ points=1.0,
+ test_case_type="stdoutbasedtestcase",
+ user=self.user,
+ snippet='def myfunc()'
+ )
+ self.question1.save()
+ self.question2.save()
+ self.assertion_testcase = StandardTestCase(
+ question=self.question1,
+ test_case='assert myfunc(12, 13) == 15'
+ )
+ self.stdout_based_testcase = StdoutBasedTestCase(
+ question=self.question2,
+ expected_output='Hello World'
+ )
+ self.assertion_testcase.save()
+ self.stdout_based_testcase.save()
+ answer_data = {"user_answer": "demo_answer",
+ "test_case_data": [
+ {"test_case": "assert myfunc(12, 13) == 15"}
+ ]
+ }
+ self.answer_data_json = json.dumps(answer_data)
+
+ def test_assertion_testcase(self):
+ """ Test question """
+ self.assertEqual(self.assertion_testcase.question, self.question1)
+ self.assertEqual(self.assertion_testcase.test_case,
+ 'assert myfunc(12, 13) == 15')
+
+ def test_stdout_based_testcase(self):
+ """ Test question """
+ self.assertEqual(self.stdout_based_testcase.question, self.question2)
+ self.assertEqual(self.stdout_based_testcase.expected_output,
+ 'Hello World'
+ )
+
+ def test_consolidate_answer_data(self):
+ """ Test consolidate answer data model method """
+ result = self.question1.consolidate_answer_data(
+ user_answer="demo_answer"
+ )
+ self.assertEqual(result, self.answer_data_json) \ No newline at end of file
diff --git a/yaksh/urls.py b/yaksh/urls.py
index 18a64c2..feac8c1 100644
--- a/yaksh/urls.py
+++ b/yaksh/urls.py
@@ -41,7 +41,7 @@ urlpatterns += [
url(r'^self_enroll/(?P<course_id>\d+)/$', views.self_enroll),
url(r'^manage/$', views.prof_manage),
url(r'^manage/addquestion/$', views.add_question),
- url(r'^manage/addquestion/(?P<question_id>\d+)/$', views.add_question),
+ url(r'^manage/addquestion/(?P<question_id>\d+)/$', views.edit_question),
url(r'^manage/addquiz/$', views.add_quiz),
url(r'^manage/addquiz/(?P<quiz_id>\d+)/$', views.add_quiz),
url(r'^manage/gradeuser/$', views.grade_user),
diff --git a/yaksh/views.py b/yaksh/views.py
index a986d4c..2a3adbf 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -15,15 +15,17 @@ from django.db.models import Sum, Max, Q
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
+from django.forms.models import inlineformset_factory
from taggit.models import Tag
from itertools import chain
import json
# Local imports.
-from yaksh.models import Quiz, Question, QuestionPaper, QuestionSet, Course
+from yaksh.models import get_model_class, Quiz, Question, QuestionPaper, QuestionSet, Course
from yaksh.models import Profile, Answer, AnswerPaper, User, TestCase
from yaksh.forms import UserRegisterForm, UserLoginForm, QuizForm,\
- QuestionForm, RandomQuestionForm, TestCaseFormSet,\
- QuestionFilterForm, CourseForm, ProfileForm, UploadFileForm
+ QuestionForm, RandomQuestionForm,\
+ QuestionFilterForm, CourseForm, ProfileForm, UploadFileForm,\
+ get_object_form
from yaksh.xmlrpc_clients import code_server
from settings import URL_ROOT
from yaksh.models import AssignmentUpload
@@ -142,90 +144,77 @@ def results_user(request):
@login_required
-def add_question(request, question_id=None):
+def add_question(request):
"""To add a new question in the database.
Create a new question and store it."""
-
- def add_or_delete_test_form(post_request, instance):
- request_copy = post_request.copy()
- if 'add_test' in post_request:
- request_copy['test-TOTAL_FORMS'] = int(request_copy['test-TOTAL_FORMS']) + 1
- elif 'delete_test' in post_request:
- request_copy['test-TOTAL_FORMS'] = int(request_copy['test-TOTAL_FORMS']) - 1
- test_case_formset = TestCaseFormSet(request_copy, prefix='test', instance=instance)
- return test_case_formset
-
user = request.user
ci = RequestContext(request)
- if not user.is_authenticated() or not is_moderator(user):
- raise Http404('You are not allowed to view this page!')
- if request.method == "POST":
- form = QuestionForm(request.POST)
- if form.is_valid():
- if question_id is None:
- test_case_formset = add_or_delete_test_form(request.POST, form.save(commit=False))
- if 'save_question' in request.POST:
- qtn = form.save(commit=False)
- qtn.user = user
- qtn.save()
- test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=qtn)
- form.save()
- question = Question.objects.order_by("-id")[0]
- if test_case_formset.is_valid():
- test_case_formset.save()
- else:
- return my_render_to_response('yaksh/add_question.html',
- {'form': form,
- 'formset': test_case_formset},
- context_instance=ci)
-
- return my_redirect("/exam/manage/questions")
-
- return my_render_to_response('yaksh/add_question.html',
- {'form': form,
- 'formset': test_case_formset},
- context_instance=ci)
- else:
- d = Question.objects.get(id=question_id)
- form = QuestionForm(request.POST, instance=d)
- test_case_formset = add_or_delete_test_form(request.POST, d)
- if 'save_question' in request.POST:
- qtn = form.save(commit=False)
- test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=qtn)
- form.save()
- question = Question.objects.get(id=question_id)
- if test_case_formset.is_valid():
- test_case_formset.save()
- return my_redirect("/exam/manage/questions")
- return my_render_to_response('yaksh/add_question.html',
- {'form': form,
- 'formset': test_case_formset},
- context_instance=ci)
+ if request.method == "POST" and 'save_question' in request.POST:
+ question_form = QuestionForm(request.POST)
+ if question_form.is_valid():
+ new_question = question_form.save(commit=False)
+ new_question.user = user
+ new_question.save()
+ return my_redirect("/exam/manage/addquestion/{0}".format(new_question.id))
else:
- test_case_formset = TestCaseFormSet(prefix='test', instance=Question())
return my_render_to_response('yaksh/add_question.html',
- {'form': form,
- 'formset': test_case_formset},
+ {'form': question_form},
context_instance=ci)
else:
- if question_id is None:
- form = QuestionForm()
- test_case_formset = TestCaseFormSet(prefix='test', instance=Question())
- return my_render_to_response('yaksh/add_question.html',
- {'form': form,
- 'formset': test_case_formset},
- context_instance=ci)
- else:
- d = Question.objects.get(id=question_id)
- form = QuestionForm(instance=d)
- test_case_formset = TestCaseFormSet(prefix='test', instance=d)
+ question_form = QuestionForm()
+ return my_render_to_response('yaksh/add_question.html',
+ {'form': question_form},
+ context_instance=ci)
+@login_required
+def edit_question(request, question_id=None):
+ """To add a new question in the database.
+ Create a new question and store it."""
+ user = request.user
+ ci = RequestContext(request)
+ if not question_id:
+ raise Http404('No Question Found')
+
+ question_instance = Question.objects.get(id=question_id)
+
+ if request.method == "POST" and 'save_question' in request.POST:
+ question_form = QuestionForm(request.POST, instance=question_instance)
+ if question_form.is_valid():
+ new_question = question_form.save(commit=False)
+ test_case_type = question_form.cleaned_data.get('test_case_type')
+ test_case_form_class = get_object_form(model=test_case_type, exclude_fields=['question'])
+ test_case_model_class = get_model_class(test_case_type)
+ TestCaseInlineFormSet = inlineformset_factory(Question, test_case_model_class, form=test_case_form_class, extra=1)
+ test_case_formset = TestCaseInlineFormSet(request.POST, request.FILES, instance=new_question)
+ if test_case_formset.is_valid():
+ new_question.save()
+ test_case_formset.save()
+ return my_redirect("/exam/manage/addquestion/{0}".format(new_question.id))
+ else:
+ test_case_type = question_form.cleaned_data.get('test_case_type')
+ test_case_form_class = get_object_form(model=test_case_type, exclude_fields=['question'])
+ test_case_model_class = get_model_class(test_case_type)
+ TestCaseInlineFormSet = inlineformset_factory(Question, test_case_model_class, form=test_case_form_class, extra=1)
+ test_case_formset = TestCaseInlineFormSet(request.POST, request.FILES, instance=question_instance)
return my_render_to_response('yaksh/add_question.html',
- {'form': form,
- 'formset': test_case_formset},
+ {'form': question_form,
+ 'test_case_formset': test_case_formset,
+ 'question_id': question_id},
context_instance=ci)
-
+ else:
+ question_form = QuestionForm(instance=question_instance)
+ test_case_type = question_instance.test_case_type
+ test_case_form_class = get_object_form(model=test_case_type, exclude_fields=['question'])
+ test_case_model_class = get_model_class(test_case_type)
+ TestCaseInlineFormSet = inlineformset_factory(Question, test_case_model_class, form=test_case_form_class, extra=1)
+ test_case_formset = TestCaseInlineFormSet(instance=question_instance)
+
+ return my_render_to_response('yaksh/add_question.html',
+ {'form': question_form,
+ 'test_case_formset': test_case_formset,
+ 'question_id': question_id},
+ context_instance=ci)
@login_required
def add_quiz(request, quiz_id=None):
@@ -393,7 +382,9 @@ def show_question(request, question, paper, error_message=None):
if paper.time_left() <= 0:
reason='Your time is up!'
return complete(request, reason, paper.attempt_number, paper.question_paper.id)
- context = {'question': question, 'paper': paper, 'error_message': error_message}
+ test_cases = question.get_test_cases()
+ context = {'question': question, 'paper': paper, 'error_message': error_message,
+ 'test_cases': test_cases}
answers = paper.get_previous_answers(question)
if answers:
context['last_attempt'] = answers[0]
@@ -462,8 +453,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
# If we were not skipped, we were asked to check. For any non-mcq
# questions, we obtain the results via XML-RPC with the code executed
# safely in a separate process (the code_server.py) running as nobody.
- test_cases = TestCase.objects.filter(question=question)
- json_data = question.consolidate_answer_data(test_cases, user_answer) \
+ json_data = question.consolidate_answer_data(user_answer) \
if question.type == 'code' else None
correct, result = validate_answer(user, user_answer, question, json_data)
if correct:
@@ -510,17 +500,18 @@ def validate_answer(user, user_answer, question, json_data=None):
if user_answer is not None:
if question.type == 'mcq':
- if user_answer.strip() == question.test.strip():
+ expected_answer = question.get_test_case(correct=True).options
+ if user_answer.strip() == expected_answer.strip():
correct = True
- message = 'Correct answer'
elif question.type == 'mcc':
- answers = set(question.test.splitlines())
- if set(user_answer) == answers:
+ expected_answers = []
+ for opt in question.get_test_cases(correct=True):
+ expected_answers.append(opt.options)
+ if set(user_answer) == set(expected_answers):
correct = True
- message = 'Correct answer'
elif question.type == 'code':
user_dir = get_user_dir(user)
- json_result = code_server.run_code(question.language, json_data, user_dir)
+ json_result = code_server.run_code(question.language, question.test_case_type, json_data, user_dir)
result = json.loads(json_result)
if result.get('success'):
correct = True
@@ -855,6 +846,7 @@ def show_all_questions(request):
return my_render_to_response('yaksh/showquestions.html', context,
context_instance=ci)
+
@login_required
def user_data(request, user_id, questionpaper_id=None):
"""Render user data."""
diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py
index 3a3c0c6..7124550 100644
--- a/yaksh/xmlrpc_clients.py
+++ b/yaksh/xmlrpc_clients.py
@@ -23,7 +23,7 @@ class CodeServerProxy(object):
pool_url = 'http://localhost:%d' % (SERVER_POOL_PORT)
self.pool_server = ServerProxy(pool_url)
- def run_code(self, language, json_data, user_dir):
+ def run_code(self, language, test_case_type, json_data, user_dir):
"""Tests given code (`answer`) with the `test_code` supplied. If the
optional `in_dir` keyword argument is supplied it changes the directory
to that directory (it does not change it back to the original when
@@ -51,7 +51,7 @@ class CodeServerProxy(object):
try:
server = self._get_server()
- result = server.check_code(language, json_data, user_dir)
+ result = server.check_code(language, test_case_type, json_data, user_dir)
except ConnectionError:
result = json.dumps({'success': False, 'error': 'Unable to connect to any code servers!'})
return result