summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--yaksh/bash_code_evaluator.py4
-rwxr-xr-xyaksh/c_cpp_files/main2.c2
-rw-r--r--yaksh/cpp_code_evaluator.py5
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py16
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py40
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py28
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py87
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py13
-rw-r--r--yaksh/grader.py22
-rw-r--r--yaksh/java_code_evaluator.py3
-rw-r--r--yaksh/models.py30
-rw-r--r--yaksh/python_assertion_evaluator.py13
-rw-r--r--yaksh/python_stdio_evaluator.py2
-rw-r--r--yaksh/scilab_code_evaluator.py2
-rw-r--r--yaksh/stdio_evaluator.py2
-rw-r--r--yaksh/templates/yaksh/grade_user.html36
-rw-r--r--yaksh/templates/yaksh/question.html40
-rw-r--r--yaksh/templates/yaksh/user_data.html8
-rw-r--r--yaksh/test_models.py14
-rw-r--r--yaksh/tests/test_code_server.py7
-rw-r--r--yaksh/views.py70
-rw-r--r--yaksh/xmlrpc_clients.py3
22 files changed, 231 insertions, 216 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index 1e6fc9c..975af82 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -104,7 +104,7 @@ class BashCodeEvaluator(BaseEvaluator):
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
mark_fraction = float(self.weight) if self.partial_grading else 0.0
- return True, "Correct answer", mark_fraction
+ return True, None, mark_fraction
else:
err = "Error: expected %s, got %s" % (inst_stderr,
stdnt_stderr
@@ -147,7 +147,7 @@ class BashCodeEvaluator(BaseEvaluator):
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
mark_fraction = float(self.weight) if self.partial_grading else 0.0
- return True, "Correct answer", mark_fraction
+ return True, None, mark_fraction
else:
err = ("Error:expected"
" {0}, got {1}").format(inst_stdout+inst_stderr,
diff --git a/yaksh/c_cpp_files/main2.c b/yaksh/c_cpp_files/main2.c
index ccd1768..a62195f 100755
--- a/yaksh/c_cpp_files/main2.c
+++ b/yaksh/c_cpp_files/main2.c
@@ -13,7 +13,7 @@ void check(T expect,T result)
else
{
printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
- exit (0);
+ exit (1);
}
}
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index f0c2029..91ba703 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -15,7 +15,7 @@ class CppCodeEvaluator(BaseEvaluator):
"""Tests the C code obtained from Code Server"""
def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('submit.c')
+ self.submit_code_path = ''
self.compiled_user_answer = None
self.compiled_test_code = None
self.user_output_path = ""
@@ -62,6 +62,7 @@ class CppCodeEvaluator(BaseEvaluator):
ref_code_path = self.test_case
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
+ self.submit_code_path = self.create_submit_code_file('submit.c')
if self.file_paths:
self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
@@ -133,7 +134,7 @@ class CppCodeEvaluator(BaseEvaluator):
)
proc, stdout, stderr = ret
if proc.returncode == 0:
- success, err = True, "Correct answer"
+ success, err = True, None
mark_fraction = float(self.weight) if self.partial_grading else 0.0
else:
err = "{0} \n {1}".format(stdout, stderr)
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index 06a56e4..abadf26 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -6,11 +6,12 @@ import tempfile
from yaksh.grader import Grader
from yaksh.bash_code_evaluator import BashCodeEvaluator
from yaksh.bash_stdio_evaluator import BashStdIOEvaluator
+from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest
from yaksh.settings import SERVER_TIMEOUT
from textwrap import dedent
-class BashAssertionEvaluationTestCases(unittest.TestCase):
+class BashAssertionEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
@@ -48,7 +49,6 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertTrue(result.get('success'))
- self.assertEqual(result.get('error'), "Correct answer\n")
def test_error(self):
user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] "
@@ -67,7 +67,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertTrue("Error" in result.get("error"))
+ self.assert_correct_output("Error", result.get("error"))
def test_infinite_loop(self):
user_answer = ("#!/bin/bash\nwhile [ 1 ] ;"
@@ -86,7 +86,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertEqual(result.get("error"), self.timeout_msg)
+ self.assert_correct_output(self.timeout_msg, result.get("error"))
def test_file_based_assert(self):
self.file_paths = [('/tmp/test.txt', False)]
@@ -111,9 +111,8 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer\n")
-class BashStdIOEvaluationTestCases(unittest.TestCase):
+class BashStdIOEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
@@ -147,7 +146,6 @@ class BashStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
@@ -177,7 +175,6 @@ class BashStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
@@ -204,7 +201,7 @@ class BashStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertIn("Incorrect", result.get('error'))
+ self.assert_correct_output("Incorrect", result.get('error'))
self.assertFalse(result.get('success'))
def test_stdout_only(self):
@@ -231,7 +228,6 @@ class BashStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
if __name__ == '__main__':
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index dc6fdc9..ec59a6b 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -9,11 +9,11 @@ from textwrap import dedent
from yaksh.grader import Grader
from yaksh.cpp_code_evaluator import CppCodeEvaluator
from yaksh.cpp_stdio_evaluator import CppStdIOEvaluator
+from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest
from yaksh.settings import SERVER_TIMEOUT
-
-class CAssertionEvaluationTestCases(unittest.TestCase):
+class CAssertionEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
@@ -48,7 +48,6 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertTrue(result.get('success'))
- self.assertEqual(result.get('error'), "Correct answer\n")
def test_incorrect_answer(self):
user_answer = "int add(int a, int b)\n{return a-b;}"
@@ -65,9 +64,9 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- lines_of_error = len(result.get('error').splitlines())
+ lines_of_error = len(result.get('error')[0].splitlines())
self.assertFalse(result.get('success'))
- self.assertIn("Incorrect:", result.get('error'))
+ self.assert_correct_output("Incorrect:", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_compilation_error(self):
@@ -86,7 +85,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertTrue("Compilation Error" in result.get("error"))
+ self.assert_correct_output("Compilation Error", result.get("error"))
def test_infinite_loop(self):
user_answer = "int add(int a, int b)\n{while(1>0){}}"
@@ -104,7 +103,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertEqual(result.get("error"), self.timeout_msg)
+ self.assert_correct_output(self.timeout_msg, result.get("error"))
def test_file_based_assert(self):
self.file_paths = [('/tmp/test.txt', False)]
@@ -138,9 +137,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertTrue(result.get('success'))
- self.assertEqual(result.get('error'), "Correct answer\n")
-class CppStdIOEvaluationTestCases(unittest.TestCase):
+class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
@@ -174,7 +172,6 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
@@ -205,7 +202,6 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_string_input(self):
@@ -234,7 +230,6 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
@@ -257,9 +252,9 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- lines_of_error = len(result.get('error').splitlines())
+ lines_of_error = len(result.get('error')[0].splitlines())
self.assertFalse(result.get('success'))
- self.assertIn("Incorrect", result.get('error'))
+ self.assert_correct_output("Incorrect", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_error(self):
@@ -283,7 +278,7 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertTrue("Compilation Error" in result.get("error"))
+ self.assert_correct_output("Compilation Error", result.get("error"))
def test_infinite_loop(self):
user_answer = dedent("""
@@ -306,7 +301,7 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertEqual(result.get("error"), self.timeout_msg)
+ self.assert_correct_output(self.timeout_msg, result.get("error"))
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
@@ -333,7 +328,6 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_correct_answer(self):
@@ -358,7 +352,6 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_array_input(self):
@@ -390,7 +383,6 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_string_input(self):
@@ -420,7 +412,6 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_incorrect_answer(self):
@@ -444,9 +435,9 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- lines_of_error = len(result.get('error').splitlines())
+ lines_of_error = len(result.get('error')[0].splitlines())
self.assertFalse(result.get('success'))
- self.assertIn("Incorrect", result.get('error'))
+ self.assert_correct_output("Incorrect", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_cpp_error(self):
@@ -471,7 +462,7 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertTrue("Compilation Error" in result.get("error"))
+ self.assert_correct_output("Compilation Error", result.get("error"))
def test_cpp_infinite_loop(self):
user_answer = dedent("""
@@ -495,7 +486,7 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertEqual(result.get("error"), self.timeout_msg)
+ self.assert_correct_output(self.timeout_msg, result.get("error"))
def test_cpp_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
@@ -523,7 +514,6 @@ class CppStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
if __name__ == '__main__':
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index 36eb6a5..bfba38f 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -10,9 +10,10 @@ from yaksh import grader as gd
from yaksh.grader import Grader
from yaksh.java_code_evaluator import JavaCodeEvaluator
from yaksh.java_stdio_evaluator import JavaStdIOEvaluator
+from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest
-class JavaAssertionEvaluationTestCases(unittest.TestCase):
+class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
@@ -51,7 +52,6 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
@@ -70,9 +70,9 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get('success'))
- lines_of_error = len(result.get('error').splitlines())
+ lines_of_error = len(result.get('error')[0].splitlines())
self.assertFalse(result.get('success'))
- self.assertIn("Incorrect", result.get('error'))
+ self.assert_correct_output("Incorrect", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_error(self):
@@ -91,7 +91,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertTrue("Error" in result.get("error"))
+ self.assert_correct_output("Error", result.get("error"))
def test_infinite_loop(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}"
@@ -109,7 +109,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertEqual(result.get("error"), self.timeout_msg)
+ self.assert_correct_output(self.timeout_msg, result.get("error"))
def test_file_based_assert(self):
self.file_paths = [("/tmp/test.txt", False)]
@@ -151,9 +151,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer\n")
-class JavaStdIOEvaluationTestCases(unittest.TestCase):
+class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
@@ -198,7 +197,6 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
@@ -230,7 +228,6 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
@@ -256,9 +253,9 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- lines_of_error = len(result.get('error').splitlines())
+ lines_of_error = len(result.get('error')[0].splitlines())
self.assertFalse(result.get('success'))
- self.assertIn("Incorrect", result.get('error'))
+ self.assert_correct_output("Incorrect", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_error(self):
@@ -281,7 +278,7 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertTrue("Compilation Error" in result.get("error"))
+ self.assertTrue("Compilation Error" in '\n'.join(result.get("error")))
def test_infinite_loop(self):
user_answer = dedent("""
@@ -305,7 +302,7 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertEqual(result.get("error"), self.timeout_msg)
+ self.assert_correct_output(self.timeout_msg, result.get("error"))
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
@@ -333,7 +330,6 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_string_input(self):
@@ -364,7 +360,6 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_file_based_stdout(self):
@@ -406,7 +401,6 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer\n")
if __name__ == '__main__':
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index e638049..c58d7f1 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -11,7 +11,14 @@ from yaksh.python_assertion_evaluator import PythonAssertionEvaluator
from yaksh.python_stdio_evaluator import PythonStdIOEvaluator
from yaksh.settings import SERVER_TIMEOUT
-class PythonAssertionEvaluationTestCases(unittest.TestCase):
+
+class EvaluatorBaseTest(unittest.TestCase):
+ def assert_correct_output(self, expected_output, actual_output):
+ actual_output_as_string = ''.join(actual_output)
+ self.assertIn(expected_output, actual_output_as_string)
+
+
+class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
@@ -49,7 +56,6 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertTrue(result.get('success'))
- self.assertIn("Correct answer", result.get('error'))
def test_incorrect_answer(self):
# Given
@@ -70,13 +76,13 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get('success'))
- self.assertIn('AssertionError in: assert(add(1,2)==3)',
+ self.assert_correct_output('AssertionError in: assert(add(1,2)==3)',
result.get('error')
)
- self.assertIn('AssertionError in: assert(add(-1,2)==1)',
+ self.assert_correct_output('AssertionError in: assert(add(-1,2)==1)',
result.get('error')
)
- self.assertIn('AssertionError in: assert(add(-1,-2)==-3)',
+ self.assert_correct_output('AssertionError in: assert(add(-1,-2)==-3)',
result.get('error')
)
@@ -104,10 +110,10 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get('success'))
self.assertEqual(result.get('weight'), 2.0)
- self.assertIn('AssertionError in: assert(add(-1,2)==1)',
+ self.assert_correct_output('AssertionError in: assert(add(-1,2)==1)',
result.get('error')
)
- self.assertIn('AssertionError in: assert(add(-1,-2)==-3)',
+ self.assert_correct_output('AssertionError in: assert(add(-1,-2)==-3)',
result.get('error')
)
@@ -130,7 +136,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get('success'))
- self.assertEqual(result.get('error'), self.timeout_msg)
+ self.assert_correct_output(self.timeout_msg, result.get('error'))
def test_syntax_error(self):
# Given
@@ -159,13 +165,14 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error").splitlines()
+ error_as_str = ''.join(result.get("error"))
+ err = error_as_str.splitlines()
# Then
self.assertFalse(result.get("success"))
self.assertEqual(5, len(err))
for msg in syntax_error_msg:
- self.assertIn(msg, result.get("error"))
+ self.assert_correct_output(msg, result.get("error"))
def test_indent_error(self):
# Given
@@ -193,13 +200,13 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error").splitlines()
+ err = result.get("error")[0].splitlines()
# Then
self.assertFalse(result.get("success"))
self.assertEqual(5, len(err))
for msg in indent_error_msg:
- self.assertIn(msg, result.get("error"))
+ self.assert_correct_output(msg, result.get("error"))
def test_name_error(self):
# Given
@@ -224,13 +231,14 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error").splitlines()
+ error_as_str = ''.join(result.get("error"))
+ err = error_as_str.splitlines()
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(9, len(err))
+ self.assertEqual(6, len(err))
for msg in name_error_msg:
- self.assertIn(msg, result.get("error"))
+ self.assert_correct_output(msg, result.get("error"))
def test_recursion_error(self):
# Given
@@ -256,12 +264,13 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error").splitlines()
+ error_as_str = ''.join(result.get("error"))
+ err = error_as_str.splitlines()
# Then
self.assertFalse(result.get("success"))
for msg in recursion_error_msg:
- self.assertIn(msg, result.get("error"))
+ self.assert_correct_output(msg, result.get("error"))
def test_type_error(self):
# Given
@@ -288,13 +297,14 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error").splitlines()
+ error_as_str = ''.join(result.get("error"))
+ err = error_as_str.splitlines()
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(9, len(err))
+ self.assertEqual(6, len(err))
for msg in type_error_msg:
- self.assertIn(msg, result.get("error"))
+ self.assert_correct_output(msg, result.get("error"))
def test_value_error(self):
# Given
@@ -323,13 +333,14 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error").splitlines()
+ error_as_str = ''.join(result.get("error"))
+ err = error_as_str.splitlines()
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(9, len(err))
+ self.assertEqual(6, len(err))
for msg in value_error_msg:
- self.assertIn(msg, result.get("error"))
+ self.assert_correct_output(msg, result.get("error"))
def test_file_based_assert(self):
# Given
@@ -356,7 +367,6 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
# Then
- self.assertIn("Correct answer", result.get('error'))
self.assertTrue(result.get('success'))
def test_single_testcase_error(self):
@@ -378,8 +388,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"invalid syntax"
]
- kwargs = {
- 'metadata': {
+ kwargs = {'metadata': {
'user_answer': user_answer,
'file_paths': self.file_paths,
'partial_grading': False,
@@ -391,13 +400,14 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error").splitlines()
-
+ error_as_str = ''.join(result.get("error"))
+ err = error_as_str.splitlines()
+
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(6, len(err))
+ self.assertEqual(5, len(err))
for msg in syntax_error_msg:
- self.assertIn(msg, result.get("error"))
+ self.assert_correct_output(msg, result.get("error"))
def test_multiple_testcase_error(self):
@@ -432,15 +442,16 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error").splitlines()
+ error_as_str = ''.join(result.get("error"))
+ err = error_as_str.splitlines()
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(7, len(err))
+ self.assertEqual(2, len(err))
for msg in name_error_msg:
- self.assertIn(msg, result.get("error"))
+ self.assert_correct_output(msg, result.get("error"))
-class PythonStdIOEvaluationTestCases(unittest.TestCase):
+class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
@@ -476,7 +487,6 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Then
self.assertTrue(result.get('success'))
- self.assertIn("Correct answer", result.get('error'))
def test_correct_answer_list(self):
# Given
@@ -510,7 +520,6 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Then
self.assertTrue(result.get('success'))
- self.assertIn("Correct answer", result.get('error'))
def test_correct_answer_string(self):
# Given
@@ -542,7 +551,6 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Then
self.assertTrue(result.get('success'))
- self.assertIn("Correct answer", result.get('error'))
def test_incorrect_answer_integer(self):
# Given
@@ -572,7 +580,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get('success'))
- self.assertIn("Incorrect answer", result.get('error'))
+ self.assert_correct_output("Incorrect answer", result.get('error'))
def test_file_based_answer(self):
# Given
@@ -603,7 +611,6 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
# Then
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_infinite_loop(self):
@@ -632,7 +639,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
# Then
- self.assertEqual(result.get('error'), timeout_msg)
+ self.assert_correct_output(timeout_msg, result.get('error'))
self.assertFalse(result.get('success'))
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index 0275ee8..938d0e5 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -7,8 +7,10 @@ import tempfile
from yaksh import grader as gd
from yaksh.grader import Grader
from yaksh.scilab_code_evaluator import ScilabCodeEvaluator
+from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest
-class ScilabEvaluationTestCases(unittest.TestCase):
+
+class ScilabEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [{"test_case": "scilab_files/test_add.sce",
@@ -42,7 +44,6 @@ class ScilabEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_error(self):
@@ -62,7 +63,7 @@ class ScilabEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertTrue('error' in result.get("error"))
+ self.assert_correct_output('error', result.get("error"))
def test_incorrect_answer(self):
@@ -81,9 +82,9 @@ class ScilabEvaluationTestCases(unittest.TestCase):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- lines_of_error = len(result.get('error').splitlines())
+ lines_of_error = len(result.get('error')[0].splitlines())
self.assertFalse(result.get('success'))
- self.assertIn("Message", result.get('error'))
+ self.assert_correct_output("Message", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_infinite_loop(self):
@@ -103,7 +104,7 @@ class ScilabEvaluationTestCases(unittest.TestCase):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assertEqual(result.get("error"), self.timeout_msg)
+ self.assert_correct_output(self.timeout_msg, result.get("error"))
if __name__ == '__main__':
unittest.main()
diff --git a/yaksh/grader.py b/yaksh/grader.py
index ef349e0..0c057c2 100644
--- a/yaksh/grader.py
+++ b/yaksh/grader.py
@@ -24,7 +24,6 @@ from .language_registry import create_evaluator_instance
MY_DIR = abspath(dirname(__file__))
-
registry = None
# Raised when the code times-out.
@@ -34,7 +33,7 @@ class TimeoutException(Exception):
@contextlib.contextmanager
def change_dir(path):
- cur_dir = os.getcwd()
+ cur_dir = abspath(dirname(MY_DIR))
os.chdir(path)
try:
yield
@@ -77,7 +76,7 @@ class Grader(object):
self.in_dir = in_dir if in_dir else MY_DIR
- def evaluate(self, kwargs): #language, test_case_type,
+ def evaluate(self, kwargs):
"""Evaluates given code with the test cases based on
given arguments in test_case_data.
@@ -96,9 +95,8 @@ class Grader(object):
Returns
-------
- A tuple: (success, error message, weight).
+ A tuple: (success, error, weight).
"""
-
self.setup()
test_case_instances = self.get_evaluator_objects(kwargs)
with change_dir(self.in_dir):
@@ -135,7 +133,7 @@ class Grader(object):
prev_handler = create_signal_handler()
success = False
test_case_success_status = [False] * len(test_case_instances)
- error = ""
+ error = []
weight = 0.0
# Do whatever testing needed.
@@ -147,8 +145,8 @@ class Grader(object):
test_case_success, err, mark_fraction = test_case_instance.check_code()
if test_case_success:
weight += mark_fraction
-
- error += err + "\n"
+ else:
+ error.append(err)
test_case_success_status[idx] = test_case_success
success = all(test_case_success_status)
@@ -157,16 +155,16 @@ class Grader(object):
test_case_instance.teardown()
except TimeoutException:
- error = self.timeout_msg
+ error.append(self.timeout_msg)
except OSError:
msg = traceback.format_exc(limit=0)
- error = "Error: {0}".format(msg)
- except Exception as e:
+ error.append("Error: {0}".format(msg))
+ except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
if len(tb_list) > 2:
del tb_list[1:3]
- error = "Error: {0}".format("".join(tb_list))
+ error.append("Error: {0}".format("".join(tb_list)))
finally:
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index 5d3fd28..91e5840 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -18,6 +18,7 @@ class JavaCodeEvaluator(BaseEvaluator):
self.files = []
self.compiled_user_answer = None
self.compiled_test_code = None
+ self.submit_code_path = ""
self.user_output_path = ""
self.ref_output_path = ""
@@ -142,7 +143,7 @@ class JavaCodeEvaluator(BaseEvaluator):
stderr=subprocess.PIPE)
proc, stdout, stderr = ret
if proc.returncode == 0:
- success, err = True, "Correct answer"
+ success, err = True, None
mark_fraction = float(seelf.weight) if self.partial_grading else 0.0
else:
err = stdout + "\n" + stderr
diff --git a/yaksh/models.py b/yaksh/models.py
index d7e5964..94a3a55 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1024,9 +1024,15 @@ class AnswerPaper(models.Model):
for answer in self.answers.all():
question = answer.question
if question in q_a:
- q_a[question].append(answer)
+ q_a[question].append({'answer': answer,
+ 'error_list': [e for e in json.loads(answer.error)]
+ }
+ )
else:
- q_a[question] = [answer]
+ q_a[question] = [{'answer': answer,
+ 'error_list': [e for e in json.loads(answer.error)]
+ }
+ ]
return q_a
def get_questions(self):
@@ -1061,20 +1067,20 @@ class AnswerPaper(models.Model):
For code questions success is True only if the answer is correct.
"""
- result = {'success': True, 'error': 'Incorrect answer', 'weight': 0.0}
+ result = {'success': True, 'error': ['Incorrect answer'], 'weight': 0.0}
correct = False
if user_answer is not None:
if question.type == 'mcq':
expected_answer = question.get_test_case(correct=True).options
if user_answer.strip() == expected_answer.strip():
correct = True
- result['error'] = 'Correct answer'
+ result['error'] = ['Correct answer']
elif question.type == 'mcc':
expected_answers = []
for opt in question.get_test_cases(correct=True):
expected_answers.append(opt.options)
if set(user_answer) == set(expected_answers):
- result['error'] = 'Correct answer'
+ result['error'] = ['Correct answer']
correct = True
elif question.type == 'code':
user_dir = self.user.profile.get_user_dir()
@@ -1152,9 +1158,7 @@ class StandardTestCase(TestCase):
"weight": self.weight}
def __str__(self):
- return u'Question: {0} | Test Case: {1}'.format(self.question,
- self.test_case
- )
+ return u'Standard TestCase | Test Case: {0}'.format(self.test_case)
class StdIOBasedTestCase(TestCase):
@@ -1169,9 +1173,9 @@ class StdIOBasedTestCase(TestCase):
"weight": self.weight}
def __str__(self):
- return u'Question: {0} | Exp. Output: {1} | Exp. Input: {2}'\
- .format(self.question, self.expected_output,
- self.expected_input)
+ return u'StdIO Based Testcase | Exp. Output: {0} | Exp. Input: {1}'.format(
+ self.expected_output, self.expected_input
+ )
class McqTestCase(TestCase):
@@ -1182,9 +1186,7 @@ class McqTestCase(TestCase):
return {"test_case_type": "mcqtestcase", "options": self.options, "correct": self.correct}
def __str__(self):
- return u'Question: {0} | Correct: {1}'.format(self.question,
- self.correct
- )
+ return u'MCQ Testcase | Correct: {0}'.format(self.correct)
class HookTestCase(TestCase):
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index 4d44838..749a6ec 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -75,9 +75,14 @@ class PythonAssertionEvaluator(BaseEvaluator):
type, value, tb = sys.exc_info()
info = traceback.extract_tb(tb)
fname, lineno, func, text = info[-1]
- text = str(self.test_case).splitlines()[lineno-1]
- err = ("-----\nExpected Test Case:\n{0}\n"
- "Error - {1} {2} in: {3}\n-----").format(self.test_case, type.__name__, str(value), text)
+ text = str(self.test_case)
+ err = "Expected Test Case:\n{0}\n" \
+ "Error - {1} {2} in: {3}\n".format(
+ self.test_case,
+ type.__name__,
+ str(value),
+ text
+ )
except TimeoutException:
raise
except Exception:
@@ -85,7 +90,7 @@ class PythonAssertionEvaluator(BaseEvaluator):
err = "Error in Test case: {0}".format(msg)
else:
success = True
- err = '-----\nCorrect answer\nTest Case: {0}\n-----'.format(self.test_case)
+ err = None
mark_fraction = float(self.weight) if self.partial_grading else 0.0
del tb
return success, err, mark_fraction
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index da0c954..67f57a9 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -70,7 +70,7 @@ class PythonStdIOEvaluator(BaseEvaluator):
tb = None
if self.output_value == self.expected_output:
success = True
- err = "Correct answer"
+ err = None
mark_fraction = self.weight
else:
success = False
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index cc3c401..bf16c84 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -69,7 +69,7 @@ class ScilabCodeEvaluator(BaseEvaluator):
# Clean output
stdout = self._strip_output(stdout)
if proc.returncode == 5:
- success, err = True, "Correct answer"
+ success, err = True, None
test_case_weight = float(self.weight) if self.partial_grading else 0.0
else:
err = add_err + stdout
diff --git a/yaksh/stdio_evaluator.py b/yaksh/stdio_evaluator.py
index 106facd..fb9dfb3 100644
--- a/yaksh/stdio_evaluator.py
+++ b/yaksh/stdio_evaluator.py
@@ -21,7 +21,7 @@ class StdIOEvaluator(BaseEvaluator):
format(expected_input, repr(expected_output))
if output_err == '':
if user_output == expected_output:
- success, err = True, "Correct answer"
+ success, err = True, None
else:
err = " Incorrect answer\n" + error_msg +\
"\n Your output is {0}".format(repr(user_output))
diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html
index 38f31ca..6fb8187 100644
--- a/yaksh/templates/yaksh/grade_user.html
+++ b/yaksh/templates/yaksh/grade_user.html
@@ -118,6 +118,7 @@ Status : <b style="color: green;"> Passed </b><br/>
method="post">
{% csrf_token %}
{% for question, answers in paper.get_question_answers.items %}
+
<div class="panel panel-info">
<div class="panel-heading">
<strong> Details: {{forloop.counter}}. {{ question.summary }}
@@ -134,44 +135,31 @@ Status : <b style="color: green;"> Passed </b><br/>
{% else %}
<h5> <u>Test cases: </u></h5>
{% for testcase in question.get_test_cases %}
- <br/><strong>{{ forloop.counter }}. {{ testcase.test_case }}</strong>
+ <br/><strong>{{ forloop.counter }}. {{ testcase }}</strong>
{% endfor %}
{%endif%}
</div>
</div>
- {% if question.type == "mcq" or question.type == "mcc" %}
- {% if "Correct answer" in answers.0.error %}
- <div class="panel panel-success">
- {% else %}
- <div class="panel panel-danger">
- {% endif %}
- <div class="panel-heading">
- Autocheck: {{ answers.0.error }}
- </div>
- <div class="panel-body">
- <h5><u>Student answer:</u></h5>
- <pre><code>{{forloop.counter}}. {{ answers.0 }}</code></pre>
- </div>
- </div>
- {% else %}
<h5>Student answer: </h5>
- {% for answer in answers %}
- {% if not answer.skipped %}
- {% if answer.correct %}
+ {% for ans in answers %}
+ {% if ans.answer.correct %}
<div class="panel panel-success">
+ <div class="panel-heading">Correct:
{% else %}
<div class="panel panel-danger">
+ <div class="panel-heading">Error:
{% endif %}
- <div class="panel-heading">Autocheck: {{ answer.error }}</div>
- <div class="panel-body"><pre><code>{{ answer.answer.strip }}</code></pre></div>
+ {% for err in ans.error_list %}
+ <div><pre>{{ err }}</pre></div>
+ {% endfor %}
+ </div>
+ <div class="panel-body"><pre><code>{{ ans.answer.answer.strip }}</code></pre></div>
</div>
- {% endif %}
{% endfor %}
- {% endif %}
{% with answers|last as answer %}
Marks: <input id="q{{ question.id }}" type="text"
name="q{{ question.id }}_marks" size="4"
- value="{{ answer.marks }}"><br><br>
+ value="{{ answer.0.marks }}"><br><br>
{% endwith %}
<hr/>
{% endfor %} {# for question, answers ... #}
diff --git a/yaksh/templates/yaksh/question.html b/yaksh/templates/yaksh/question.html
index 74ac786..0279f0d 100644
--- a/yaksh/templates/yaksh/question.html
+++ b/yaksh/templates/yaksh/question.html
@@ -162,16 +162,32 @@ function call_skip(url)
<div class="panel-body">
{% if question.type == "mcq" %}
{% if error_message %}
- <p>{{ error_message }}</p>
- {% endif %}
+ <p>
+ <div class="panel panel-danger">
+ <div class="panel-heading">
+ {% for err in error_message %}
+ {{ err }}
+ {% endfor %}
+ </div>
+ </div>
+ </p>
+ {% endif %}
{% for test_case in test_cases %}
<input name="answer" type="radio" value="{{ test_case.options }}" />{{ test_case.options }} <br/>
{% endfor %}
{% endif %}
{% if question.type == "mcc" %}
{% if error_message %}
- <p>{{ error_message }}</p>
- {% endif %}
+ <p>
+ <div class="panel panel-danger">
+ <div class="panel-heading">
+ {% for err in error_message %}
+ {{ err }}
+ {% endfor %}
+ </div>
+ </div>
+ </p>
+ {% endif %}
{% for test_case in test_cases %}
<input name="answer" type="checkbox" value="{{ test_case.options }}"> {{ test_case.options }}
<br>
@@ -209,11 +225,19 @@ function call_skip(url)
</div>
{% if question.type == "code" %}
<div class="panel-footer">
+
{% if error_message %}
- <p> Output Message</p>
- <div class="alert alert-danger" role="alert">
- <textarea style="width:100%" class="error" readonly="yes">{{ error_message }}</textarea>
- </div>
+ {% for error in error_message %}
+ {% if error == "Correct answer" %}
+ <div class="panel panel-success">
+ {% else %}
+ <div class="panel panel-danger">
+ {% endif %}
+ <div class="panel-heading">Testcase No. {{ forloop.counter }}</div>
+ <div class="panel-body"><pre><code>{{ error }}</code></pre></div>
+ </div>
+ {% endfor %}
+
{% endif %}
</div>
{% endif %}
diff --git a/yaksh/templates/yaksh/user_data.html b/yaksh/templates/yaksh/user_data.html
index 378e7fd..856433d 100644
--- a/yaksh/templates/yaksh/user_data.html
+++ b/yaksh/templates/yaksh/user_data.html
@@ -99,7 +99,13 @@ User IP address: {{ paper.user_ip }}
{% else %}
<div class="panel panel-danger">
{% endif %}
- <div class="panel-heading">Autocheck: {{ answer.error }}</div>
+ <div class="panel-heading">
+ {% for error in error %}
+ <div class="panel-body">
+ <pre><code>{{ error }}</code></pre>
+ </div>
+ {% endfor %}
+ </div>
<div class="panel-body"><pre><code>{{ answer.answer.strip }}</code></pre></div>
</div>
{% endif %}
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index 317c832..6764dd0 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -487,12 +487,14 @@ class AnswerPaperTestCases(unittest.TestCase):
# answers for the Answer Paper
self.answer_right = Answer(question=Question.objects.get(id=1),
answer="Demo answer",
- correct=True, marks=1
+ correct=True, marks=1,
+ error=json.dumps([])
)
self.answer_wrong = Answer(question=Question.objects.get(id=2),
answer="My answer",
correct=False,
- marks=0
+ marks=0,
+ error=json.dumps(['error1', 'error2'])
)
self.answer_right.save()
self.answer_wrong.save()
@@ -550,7 +552,7 @@ class AnswerPaperTestCases(unittest.TestCase):
# Then
self.assertTrue(correct)
self.assertTrue(result['success'])
- self.assertEqual(result['error'], 'Correct answer')
+ self.assertEqual(result['error'], ['Correct answer'])
self.answer.correct = True
self.answer.marks = 1
@@ -587,7 +589,6 @@ class AnswerPaperTestCases(unittest.TestCase):
# Then
self.assertTrue(correct)
self.assertTrue(result['success'])
- self.assertEqual(result['error'], 'Correct answer')
self.answer.correct = True
self.answer.marks = 1
@@ -729,8 +730,9 @@ class AnswerPaperTestCases(unittest.TestCase):
""" Test get_question_answer() method of Answer Paper"""
answered = self.answerpaper.get_question_answers()
first_answer = list(answered.values())[0][0]
- self.assertEqual(first_answer.answer, 'Demo answer')
- self.assertTrue(first_answer.correct)
+ first_answer_obj = first_answer['answer']
+ self.assertEqual(first_answer_obj.answer, 'Demo answer')
+ self.assertTrue(first_answer_obj.correct)
self.assertEqual(len(answered), 2)
def test_is_answer_correct(self):
diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py
index d46c9dd..19698a5 100644
--- a/yaksh/tests/test_code_server.py
+++ b/yaksh/tests/test_code_server.py
@@ -55,7 +55,7 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertFalse(data['success'])
- self.assertTrue('infinite loop' in data['error'])
+ self.assertTrue('infinite loop' in data['error'][0])
def test_correct_answer(self):
# Given
@@ -77,7 +77,6 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertTrue(data['success'])
- self.assertIn('Correct answer', data['error'])
def test_wrong_answer(self):
# Given
@@ -99,7 +98,7 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertFalse(data['success'])
- self.assertTrue('AssertionError' in data['error'])
+ self.assertTrue('AssertionError' in data['error'][0])
def test_multiple_simultaneous_hits(self):
# Given
@@ -139,7 +138,7 @@ class TestCodeServer(unittest.TestCase):
for i in range(N):
data = results.get()
self.assertFalse(data['success'])
- self.assertTrue('infinite loop' in data['error'])
+ self.assertTrue('infinite loop' in data['error'][0])
def test_server_pool_status(self):
# Given
diff --git a/yaksh/views.py b/yaksh/views.py
index 89274df..7ecf6aa 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -430,7 +430,8 @@ def skip(request, q_id, next_q=None, attempt_num=None, questionpaper_id=None):
if request.method == 'POST' and question.type == 'code':
user_code = request.POST.get('answer')
new_answer = Answer(question=question, answer=user_code,
- correct=False, skipped=True)
+ correct=False, skipped=True,
+ error=json.dumps([]))
new_answer.save()
paper.answers.add(new_answer)
if next_q is not None:
@@ -448,69 +449,68 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
user = request.user
paper = get_object_or_404(AnswerPaper, user=request.user, attempt_number=attempt_num,
question_paper=questionpaper_id)
- question = get_object_or_404(Question, pk=q_id)
- if question in paper.questions_answered.all():
+ current_question = get_object_or_404(Question, pk=q_id)
+ if current_question in paper.questions_answered.all():
next_q = paper.next_question(q_id)
return show_question(request, next_q, paper)
if request.method == 'POST':
snippet_code = request.POST.get('snippet')
# Add the answer submitted, regardless of it being correct or not.
- if question.type == 'mcq':
+ if current_question.type == 'mcq':
user_answer = request.POST.get('answer')
- elif question.type == 'mcc':
+ elif current_question.type == 'mcc':
user_answer = request.POST.getlist('answer')
- elif question.type == 'upload':
+ elif current_question.type == 'upload':
assign = AssignmentUpload()
assign.user = user.profile
- assign.assignmentQuestion = question
+ assign.assignmentQuestion = current_question
# if time-up at upload question then the form is submitted without
# validation
if 'assignment' in request.FILES:
assign.assignmentFile = request.FILES['assignment']
assign.save()
user_answer = 'ASSIGNMENT UPLOADED'
- next_q = paper.completed_question(question.id)
+ next_q = paper.completed_question(current_question.id)
return show_question(request, next_q, paper)
else:
user_code = request.POST.get('answer')
user_answer = snippet_code + "\n" + user_code if snippet_code else user_code
- new_answer = Answer(question=question, answer=user_answer,
- correct=False)
+ if not user_answer:
+ msg = ["Please submit a valid option or code"]
+ return show_question(request, current_question, paper, msg)
+ new_answer = Answer(question=current_question, answer=user_answer,
+ correct=False, error=json.dumps([]))
new_answer.save()
paper.answers.add(new_answer)
- if not user_answer:
- msg = "Please submit a valid option or code"
- return show_question(request, question, paper, msg)
# If we were not skipped, we were asked to check. For any non-mcq
# questions, we obtain the results via XML-RPC with the code executed
# safely in a separate process (the code_server.py) running as nobody.
- json_data = question.consolidate_answer_data(user_answer) \
- if question.type == 'code' else None
- correct, result = paper.validate_answer(user_answer, question, json_data)
- if correct:
- new_answer.marks = (question.points * result['weight'] /
- question.get_maximum_test_case_weight()) \
- if question.partial_grading and question.type == 'code' else question.points
+ json_data = current_question.consolidate_answer_data(user_answer) \
+ if current_question.type == 'code' else None
+ correct, result = paper.validate_answer(user_answer, current_question, json_data)
+ if correct or result.get('success'):
+ new_answer.marks = (current_question.points * result['weight'] /
+ current_question.get_maximum_test_case_weight()) \
+ if current_question.partial_grading and current_question.type == 'code' else current_question.points
new_answer.correct = correct
- new_answer.error = result.get('error')
+ error_message = None
+ new_answer.error = json.dumps(result.get('error'))
+ next_question = paper.completed_question(current_question.id)
else:
- new_answer.error = result.get('error')
- new_answer.marks = (question.points * result['weight'] /
- question.get_maximum_test_case_weight()) \
- if question.partial_grading and question.type == 'code' else 0
+ new_answer.marks = (current_question.points * result['weight'] /
+ current_question.get_maximum_test_case_weight()) \
+ if current_question.partial_grading and current_question.type == 'code' else 0
+ error_message = result.get('error')
+ new_answer.error = json.dumps(result.get('error'))
+ next_question = current_question if current_question.type == 'code' \
+ else paper.completed_question(current_question.id)
new_answer.save()
paper.update_marks('inprogress')
paper.set_end_time(timezone.now())
- if not result.get('success'): # Should only happen for non-mcq questions.
- new_answer.answer = user_code
- new_answer.save()
- return show_question(request, question, paper, result.get('error'))
- else:
- next_q = paper.completed_question(question.id)
- return show_question(request, next_q, paper)
+ return show_question(request, next_question, paper, error_message)
else:
- return show_question(request, question, paper)
+ return show_question(request, current_question, paper)
@@ -1036,7 +1036,7 @@ def grade_user(request, quiz_id=None, user_id=None, attempt_number=None):
user = User.objects.get(id=user_id)
data = AnswerPaper.objects.get_user_data(user, questionpaper_id,
attempt_number
- )
+ )
context = {'data': data, "quiz_id": quiz_id, "users": user_details,
"attempts": attempts, "user_id": user_id
@@ -1044,7 +1044,7 @@ def grade_user(request, quiz_id=None, user_id=None, attempt_number=None):
if request.method == "POST":
papers = data['papers']
for paper in papers:
- for question, answers in six.iteritems(paper.get_question_answers()):
+ for question, answers, errors in six.iteritems(paper.get_question_answers()):
marks = float(request.POST.get('q%d_marks' % question.id, 0))
answers = answers[-1]
answers.set_marks(marks)
diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py
index bb8260d..fbcd6a5 100644
--- a/yaksh/xmlrpc_clients.py
+++ b/yaksh/xmlrpc_clients.py
@@ -69,7 +69,8 @@ class CodeServerProxy(object):
except ConnectionError:
result = json.dumps({'success': False,
'weight': 0.0,
- 'error': 'Unable to connect to any code servers!'})
+ 'error': ['Unable to connect to any code servers!']
+ })
return result
def _get_server(self):