summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPrabhu Ramachandran2017-11-10 17:50:48 +0530
committerGitHub2017-11-10 17:50:48 +0530
commit96f8e0af5b39338741c758de918e32e02b95f0c8 (patch)
tree80fb17501e7995ed3abb34ac3b0d62dd8decc560
parentcfcb2ed39c724639fe17338e29e327d08ae641b2 (diff)
parent95f862caee8ca6077ee8f9a8fc88d9ca44db1cdf (diff)
downloadonline_test-96f8e0af5b39338741c758de918e32e02b95f0c8.tar.gz
online_test-96f8e0af5b39338741c758de918e32e02b95f0c8.tar.bz2
online_test-96f8e0af5b39338741c758de918e32e02b95f0c8.zip
Merge pull request #380 from maheshgudi/beautify_assertions
Prettify assertion error output
-rw-r--r--yaksh/error_messages.py (renamed from yaksh/compare_stdio.py)29
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py8
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py16
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py12
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py154
-rw-r--r--yaksh/evaluator_tests/test_python_stdio_evaluator.py2
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py4
-rw-r--r--yaksh/grader.py19
-rw-r--r--yaksh/hook_evaluator.py25
-rw-r--r--yaksh/python_assertion_evaluator.py36
-rw-r--r--yaksh/python_stdio_evaluator.py2
-rw-r--r--yaksh/static/yaksh/css/exam.css2
-rw-r--r--yaksh/stdio_evaluator.py2
-rw-r--r--yaksh/templates/exam.html45
-rw-r--r--yaksh/templates/yaksh/grade_user.html31
-rw-r--r--yaksh/templates/yaksh/user_data.html32
-rw-r--r--yaksh/templates/yaksh/view_answerpaper.html31
-rw-r--r--yaksh/tests/test_code_server.py6
18 files changed, 297 insertions, 159 deletions
diff --git a/yaksh/compare_stdio.py b/yaksh/error_messages.py
index c4076de..7ea8618 100644
--- a/yaksh/compare_stdio.py
+++ b/yaksh/error_messages.py
@@ -3,7 +3,24 @@ try:
except ImportError:
from itertools import izip_longest as zip_longest
-
+def prettify_exceptions(exception, message, traceback=None, testcase=None):
+ err = {"type": "assertion",
+ "exception": exception,
+ "traceback": traceback,
+ "message": message
+ }
+ if exception == 'RuntimeError' or exception == 'RecursionError':
+ err["traceback"] = None
+
+ if exception == 'AssertionError':
+ value = ("Expected answer from the"
+ + " test case did not match the output")
+ err["message"] = value
+ err["traceback"] = None
+ if testcase:
+ err["test_case"] = testcase
+ return err
+
def _get_incorrect_user_lines(exp_lines, user_lines):
err_line_numbers = []
for line_no, (expected_line, user_line) in \
@@ -16,17 +33,19 @@ def _get_incorrect_user_lines(exp_lines, user_lines):
def compare_outputs(expected_output, user_output, given_input=None):
given_lines = user_output.splitlines()
exp_lines = expected_output.splitlines()
- msg = {"given_input":given_input,
+ msg = {"type": "stdio",
+ "given_input": given_input,
"expected_output": exp_lines,
- "user_output":given_lines
- }
+ "user_output": given_lines
+ }
ng = len(given_lines)
ne = len(exp_lines)
err_line_numbers = _get_incorrect_user_lines(exp_lines, given_lines)
msg["error_line_numbers"] = err_line_numbers
if ng != ne:
msg["error_msg"] = ("Incorrect Answer: "
- + "We had expected {} number of lines. ".format(ne)
+ + "We had expected {} number of lines. "\
+ .format(ne)
+ "We got {} number of lines.".format(ng)
)
return False, msg
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index 2faa7bf..5542710 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -104,7 +104,9 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get("success"))
- self.assert_correct_output(self.timeout_msg, result.get("error"))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
@@ -533,7 +535,9 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output(self.timeout_msg, result.get('error'))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index 0898b3f..162d90c 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -151,7 +151,9 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get("success"))
- self.assert_correct_output(self.timeout_msg, result.get("error"))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
@@ -406,7 +408,9 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get("success"))
- self.assert_correct_output(self.timeout_msg, result.get("error"))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
@@ -616,7 +620,9 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get("success"))
- self.assert_correct_output(self.timeout_msg, result.get("error"))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
def test_cpp_only_stdout(self):
# Given
@@ -976,7 +982,9 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output(self.timeout_msg, result.get('error'))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index 5ddf8cd..35b64d0 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -160,7 +160,9 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get("success"))
- self.assert_correct_output(self.timeout_msg, result.get("error"))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
@@ -405,7 +407,9 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get("success"))
- self.assert_correct_output(self.timeout_msg, result.get("error"))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
@@ -845,7 +849,9 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output(self.timeout_msg, result.get('error'))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index a2faf77..71d7732 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -24,9 +24,15 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.in_dir = tmp_in_dir_path
- self.test_case_data = [{"test_case_type": "standardtestcase", "test_case": 'assert(add(1,2)==3)', 'weight': 0.0},
- {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,2)==1)', 'weight': 0.0},
- {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0},
+ self.test_case_data = [{"test_case_type": "standardtestcase",
+ "test_case": 'assert(add(1,2)==3)',
+ 'weight': 0.0},
+ {"test_case_type": "standardtestcase",
+ "test_case": 'assert(add(-1,2)==1)',
+ 'weight': 0.0},
+ {"test_case_type": "standardtestcase",
+ "test_case": 'assert(add(-1,-2)==-3)',
+ 'weight': 0.0},
]
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
@@ -76,23 +82,29 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output('AssertionError in:\n assert(add(1,2)==3)',
- result.get('error')
- )
- self.assert_correct_output('AssertionError in:\n assert(add(-1,2)==1)',
- result.get('error')
- )
- self.assert_correct_output('AssertionError in:\n assert(add(-1,-2)==-3)',
- result.get('error')
- )
+ given_test_case_list = [tc["test_case"] for tc in self.test_case_data]
+ for error in result.get("error"):
+ self.assertEqual(error['exception'], 'AssertionError')
+ self.assertEqual(error['message'],
+ "Expected answer from the test case did not match the output"
+ )
+ error_testcase_list = [tc['test_case'] for tc in result.get('error')]
+ self.assertEqual(error_testcase_list, given_test_case_list)
+
def test_partial_incorrect_answer(self):
# Given
user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)"
- test_case_data = [{"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,2)==1)', 'weight': 1.0},
- {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,-2)==-3)', 'weight': 1.0},
- {"test_case_type": "standardtestcase", "test_case": 'assert(add(1,2)==3)', 'weight': 2.0}
- ]
+ test_case_data = [{"test_case_type": "standardtestcase",
+ "test_case": 'assert(add(-1,2)==1)',
+ 'weight': 1.0},
+ {"test_case_type": "standardtestcase",
+ "test_case": 'assert(add(-1,-2)==-3)',
+ 'weight': 1.0},
+ {"test_case_type": "standardtestcase",
+ "test_case": 'assert(add(1,2)==3)',
+ 'weight': 2.0}
+ ]
kwargs = {
'metadata': {
'user_answer': user_answer,
@@ -110,13 +122,15 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
self.assertEqual(result.get('weight'), 2.0)
- self.assert_correct_output('AssertionError in:\n assert(add(-1,2)==1)',
- result.get('error')
- )
- self.assert_correct_output('AssertionError in:\n assert(add(-1,-2)==-3)',
- result.get('error')
- )
-
+ given_test_case_list = [tc["test_case"] for tc in self.test_case_data]
+ given_test_case_list.remove('assert(add(1,2)==3)')
+ for error in result.get("error"):
+ self.assertEqual(error['exception'], 'AssertionError')
+ self.assertEqual(error['message'],
+ "Expected answer from the test case did not match the output"
+ )
+ error_testcase_list = [tc['test_case'] for tc in result.get('error')]
+ self.assertEqual(error_testcase_list, given_test_case_list)
def test_infinite_loop(self):
# Given
user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
@@ -136,7 +150,9 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output(self.timeout_msg, result.get('error'))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
def test_syntax_error(self):
# Given
@@ -165,14 +181,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- error_as_str = ''.join(result.get("error"))
- err = error_as_str.splitlines()
+ err = result.get("error")[0]['traceback']
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(5, len(err))
for msg in syntax_error_msg:
- self.assert_correct_output(msg, result.get("error"))
+ self.assert_correct_output(msg, err)
def test_indent_error(self):
# Given
@@ -200,13 +214,15 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error")[0].splitlines()
+ err = result.get("error")[0]["traceback"].splitlines()
# Then
self.assertFalse(result.get("success"))
self.assertEqual(5, len(err))
for msg in indent_error_msg:
- self.assert_correct_output(msg, result.get("error"))
+ self.assert_correct_output(msg,
+ result.get("error")[0]['traceback']
+ )
def test_name_error(self):
# Given
@@ -231,14 +247,9 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- error_as_str = ''.join(result.get("error"))
- err = error_as_str.splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- self.assertEqual(25, len(err))
+ err = result.get("error")[0]["traceback"]
for msg in name_error_msg:
- self.assert_correct_output(msg, result.get("error"))
+ self.assertIn(msg, err)
def test_recursion_error(self):
# Given
@@ -246,10 +257,7 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
def add(a, b):
return add(3, 3)
""")
- recursion_error_msg = ["Traceback",
- "maximum recursion depth exceeded"
- ]
-
+ recursion_error_msg = "maximum recursion depth exceeded"
kwargs = {
'metadata': {
'user_answer': user_answer,
@@ -263,13 +271,11 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- error_as_str = ''.join(result.get("error"))
- err = error_as_str.splitlines()
+ err = result.get("error")[0]['message']
# Then
self.assertFalse(result.get("success"))
- for msg in recursion_error_msg:
- self.assert_correct_output(msg, result.get("error"))
+ self.assert_correct_output(recursion_error_msg, err)
def test_type_error(self):
# Given
@@ -296,14 +302,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- error_as_str = ''.join(result.get("error"))
- err = error_as_str.splitlines()
+ err = result.get("error")[0]['traceback']
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(25, len(err))
for msg in type_error_msg:
- self.assert_correct_output(msg, result.get("error"))
+ self.assert_correct_output(msg, err)
def test_value_error(self):
# Given
@@ -332,18 +336,19 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- error_as_str = ''.join(result.get("error"))
- err = error_as_str.splitlines()
+ err = result.get("error")[0]['traceback']
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(28, len(err))
for msg in value_error_msg:
- self.assert_correct_output(msg, result.get("error"))
+ self.assert_correct_output(msg, err)
def test_file_based_assert(self):
# Given
- self.test_case_data = [{"test_case_type": "standardtestcase", "test_case": "assert(ans()=='2')", "weight": 0.0}]
+ self.test_case_data = [{"test_case_type": "standardtestcase",
+ "test_case": "assert(ans()=='2')",
+ "weight": 0.0}
+ ]
self.file_paths = [(self.tmp_file, False)]
user_answer = dedent("""
def ans():
@@ -369,20 +374,17 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
self.assertTrue(result.get('success'))
def test_single_testcase_error(self):
- # Given
""" Tests the user answer with just an incorrect test case """
+ # Given
user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
test_case_data = [{"test_case_type": "standardtestcase",
- "test_case": 's="abbb"\nasert palindrome(s)==False',
- "weight": 0.0
- }
+ "test_case": 's="abbb"\nasert palindrome(s)==False',
+ "weight": 0.0
+ }
]
syntax_error_msg = ["Traceback",
"call",
- "File",
- "line",
- "<string>",
"SyntaxError",
"invalid syntax"
]
@@ -399,14 +401,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- error_as_str = ''.join(result.get("error"))
- err = error_as_str.splitlines()
+ err = result.get("error")[0]['traceback']
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(13, len(err))
for msg in syntax_error_msg:
- self.assert_correct_output(msg, result.get("error"))
+ self.assert_correct_output(msg, err)
def test_multiple_testcase_error(self):
@@ -415,13 +415,11 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# Given
user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
test_case_data = [{"test_case_type": "standardtestcase",
- "test_case": 'assert(palindrome("abba")==True)',
- "weight": 0.0
- },
+ "test_case": 'assert(palindrome("abba")==True)',
+ "weight": 0.0},
{"test_case_type": "standardtestcase",
- "test_case": 's="abbb"\nassert palindrome(S)==False',
- "weight": 0.0
- }
+ "test_case": 's="abbb"\nassert palindrome(S)==False',
+ "weight": 0.0}
]
name_error_msg = ["Traceback",
"call",
@@ -441,14 +439,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- error_as_str = ''.join(result.get("error"))
- err = error_as_str.splitlines()
+ err = result.get("error")[0]['traceback']
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(11, len(err))
for msg in name_error_msg:
- self.assert_correct_output(msg, result.get("error"))
+ self.assertIn(msg, err)
def test_unicode_literal_bug(self):
# Given
@@ -674,7 +670,9 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
# Then
- self.assert_correct_output(timeout_msg, result.get('error'))
+ self.assert_correct_output(timeout_msg,
+ result.get("error")[0]["message"]
+ )
self.assertFalse(result.get('success'))
@@ -915,7 +913,9 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output(self.timeout_msg, result.get('error'))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
def test_assignment_upload(self):
# Given
diff --git a/yaksh/evaluator_tests/test_python_stdio_evaluator.py b/yaksh/evaluator_tests/test_python_stdio_evaluator.py
index 8877544..9b8d702 100644
--- a/yaksh/evaluator_tests/test_python_stdio_evaluator.py
+++ b/yaksh/evaluator_tests/test_python_stdio_evaluator.py
@@ -1,4 +1,4 @@
-from yaksh.compare_stdio import compare_outputs
+from yaksh.error_messages import compare_outputs
def test_compare_outputs():
exp = "5\n5\n"
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index c3a1c83..f7a9925 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -137,7 +137,9 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
self.assertFalse(result.get("success"))
- self.assert_correct_output(self.timeout_msg, result.get("error"))
+ self.assert_correct_output(self.timeout_msg,
+ result.get("error")[0]["message"]
+ )
parent_proc = Process(os.getpid()).children()
if parent_proc:
children_procs = Process(parent_proc[0].pid)
diff --git a/yaksh/grader.py b/yaksh/grader.py
index a9a3738..38cce8d 100644
--- a/yaksh/grader.py
+++ b/yaksh/grader.py
@@ -21,7 +21,7 @@ except ImportError:
# Local imports
from .settings import SERVER_TIMEOUT
from .language_registry import create_evaluator_instance
-
+from .error_messages import prettify_exceptions
MY_DIR = abspath(dirname(__file__))
registry = None
@@ -141,7 +141,8 @@ class Grader(object):
for idx, test_case_instance in enumerate(test_case_instances):
test_case_success = False
test_case_instance.compile_code()
- test_case_success, err, mark_fraction = test_case_instance.check_code()
+ eval_result = test_case_instance.check_code()
+ test_case_success, err, mark_fraction = eval_result
if test_case_success:
weight += mark_fraction * test_case_instance.weight
else:
@@ -154,16 +155,20 @@ class Grader(object):
test_case_instance.teardown()
except TimeoutException:
- error.append(self.timeout_msg)
- except OSError:
- msg = traceback.format_exc(limit=0)
- error.append("Error: {0}".format(msg))
+ error.append(prettify_exceptions("TimeoutException",
+ self.timeout_msg
+ )
+ )
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
if len(tb_list) > 2:
del tb_list[1:3]
- error.append("Error: {0}".format("".join(tb_list)))
+ error.append(prettify_exceptions(exc_type.__name__,
+ str(exc_value),
+ "".join(tb_list),
+ )
+ )
finally:
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
diff --git a/yaksh/hook_evaluator.py b/yaksh/hook_evaluator.py
index f5364d6..41ef6e4 100644
--- a/yaksh/hook_evaluator.py
+++ b/yaksh/hook_evaluator.py
@@ -2,13 +2,13 @@
import sys
import traceback
import os
-import signal
import psutil
# Local imports
from .file_utils import copy_files, delete_files
from .base_evaluator import BaseEvaluator
from .grader import TimeoutException
+from .error_messages import prettify_exceptions
class HookEvaluator(BaseEvaluator):
@@ -60,19 +60,32 @@ class HookEvaluator(BaseEvaluator):
success = False
mark_fraction = 0.0
try:
- tb = None
_tests = compile(self.hook_code, '<string>', mode='exec')
hook_scope = {}
exec(_tests, hook_scope)
check = hook_scope["check_answer"]
- success, err, mark_fraction = check(self.user_answer)
+ try:
+ success, err, mark_fraction = check(self.user_answer)
+ except Exception:
+ raise
+
except TimeoutException:
processes = psutil.Process(os.getpid()).children(recursive=True)
for process in processes:
process.kill()
raise
except Exception:
- msg = traceback.format_exc(limit=0)
- err = "Error in Hook code: {0}".format(msg)
- del tb
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ tb_list = traceback.format_exception(exc_type,
+ exc_value,
+ exc_tb
+ )
+ if len(tb_list) > 2:
+ del tb_list[1:3]
+ err = prettify_exceptions(exc_type.__name__,
+ str(exc_value),
+ "Error in Hook Code:\n"
+ + "".join(tb_list)
+ )
+
return success, err, mark_fraction
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index c8f2dd0..440f422 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -10,6 +10,7 @@ import importlib
from .file_utils import copy_files, delete_files
from .base_evaluator import BaseEvaluator
from .grader import TimeoutException
+from .error_messages import prettify_exceptions
class PythonAssertionEvaluator(BaseEvaluator):
@@ -68,39 +69,22 @@ class PythonAssertionEvaluator(BaseEvaluator):
success = False
mark_fraction = 0.0
try:
- tb = None
_tests = compile(self.test_case, '<string>', mode='exec')
exec(_tests, self.exec_scope)
except TimeoutException:
raise
except Exception:
- type, value, tb = sys.exc_info()
- info = traceback.extract_tb(tb)
- fname, lineno, func, text = info[-1]
- text = str(self.test_case)
-
- # Get truncated traceback
- err_tb_lines = traceback.format_exc().splitlines()
- stripped_tb_lines = []
- for line in err_tb_lines:
- line = re.sub(r'File\s+".*?",\s+line',
- 'File <file>, line',
- line
- )
- stripped_tb_lines.append(line)
- stripped_tb = '\n'.join(stripped_tb_lines[-10::])
-
- err = "Expected Test Case:\n{0}\n" \
- "Error Traceback - {1} {2} in:\n {3}\n{4}".format(
- self.test_case,
- type.__name__,
- str(value),
- text,
- stripped_tb
- )
+ exc_type, exc_value, exc_tb = sys.exc_info()
+ tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
+ if len(tb_list) > 2:
+ del tb_list[1:3]
+ err = prettify_exceptions(exc_type.__name__,
+ str(exc_value),
+ "".join(tb_list),
+ self.test_case
+ )
else:
success = True
err = None
mark_fraction = 1.0 if self.partial_grading else 0.0
- del tb
return success, err, mark_fraction
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index 2b443a7..b08103a 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -9,7 +9,7 @@ except ImportError:
# Local imports
from .file_utils import copy_files, delete_files
from .base_evaluator import BaseEvaluator
-from .compare_stdio import compare_outputs
+from .error_messages import compare_outputs
@contextmanager
diff --git a/yaksh/static/yaksh/css/exam.css b/yaksh/static/yaksh/css/exam.css
index fff904e..ec48a14 100644
--- a/yaksh/static/yaksh/css/exam.css
+++ b/yaksh/static/yaksh/css/exam.css
@@ -2,6 +2,6 @@ table td, table th { border: black solid 1px !important;
word-wrap: break-word !important;
white-space: pre-wrap !important;
}
-output{
+#stdio, #assertion {
table-layout: fixed
} \ No newline at end of file
diff --git a/yaksh/stdio_evaluator.py b/yaksh/stdio_evaluator.py
index 5e4ce18..55adb5c 100644
--- a/yaksh/stdio_evaluator.py
+++ b/yaksh/stdio_evaluator.py
@@ -5,7 +5,7 @@ import signal
# Local imports
from .base_evaluator import BaseEvaluator
from .grader import TimeoutException
-from .compare_stdio import compare_outputs
+from .error_messages import compare_outputs
class StdIOEvaluator(BaseEvaluator):
diff --git a/yaksh/templates/exam.html b/yaksh/templates/exam.html
index 9596c1c..a1f0df4 100644
--- a/yaksh/templates/exam.html
+++ b/yaksh/templates/exam.html
@@ -80,27 +80,50 @@
{% block main %}
{% endblock %}
</div>
+ <br/>
{% if question.type == 'code' or question.type == 'upload' %}
{% if error_message %}
<div class="row" id="error_panel">
{% for error in error_message %}
<div class="panel panel-danger">
- <div class="panel-heading">Testcase No. {{ forloop.counter }}</div>
+ <div class="panel-heading">Error No. {{ forloop.counter }}</div>
<div class="panel-body">
<div class="well well-sm">
- {% if not error.expected_output %}
+ {% if not error.type %}
<pre><code> {{error|safe}} </code></pre>
- {% else %}
- {% if error.given_input %}
- <table class="table table-bordered">
- <col width="30%">
- <tr class = "active">
- <td> For given Input value(s):</td>
- <td>{{error.given_input}}</td>
+ {% elif error.type == 'assertion' %}
+ {% if error.test_case %}
+ <strong> We tried your code with the following test case:</strong><br/></br>
+ <pre><code><strong style="color:#d9534f">{{error.test_case}}</strong></code></pre>
+ {% endif %}
+ <p> <b>The following error took place: </b></p>
+ <table class="table table-bordered" width="100%" id='assertion'>
+ <col width="30%">
+ <tr class = "active">
+ <td><b>Exception Name: </b></td>
+ <td><span style="color: #d9534f">{{error.exception}}</span></td>
+ </tr>
+ <tr>
+ <td><b>Exception Message: </b></td><td>{{error.message}}</td>
</tr>
- </table>
+ <tr>
+ {% if error.traceback %}
+ <td><b>Full Traceback: </b></td>
+ <td><pre>{{error.traceback}}</pre></td>
+ {% endif %}
+ </tr>
+ </table>
+ {% elif error.type == 'stdio' %}
+ {% if error.given_input %}
+ <table class="table table-bordered">
+ <col width="30%">
+ <tr class = "active">
+ <td> For given Input value(s):</td>
+ <td>{{error.given_input}}</td>
+ </tr>
+ </table>
{% endif %}
- <table class="table table-bordered" width="100%" id="output">
+ <table class="table table-bordered" width="100%" id="stdio">
<col width="10%">
<col width="40%">
<col width="40%">
diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html
index 37bc788..3339177 100644
--- a/yaksh/templates/yaksh/grade_user.html
+++ b/yaksh/templates/yaksh/grade_user.html
@@ -218,9 +218,8 @@ Status : <b style="color: green;"> Passed </b><br/>
{% endif %}
{% with ans.error_list as err %}
{% for error in err %}
- {% if not error.expected_output %}
- <pre><code> {{error|safe}} </code></pre>
- {% else %}
+
+ {% if error.type == 'stdio' %}
<div class = "well well-sm">
{% if error.given_input %}
<table class="table table-bordered">
@@ -262,6 +261,32 @@ Status : <b style="color: green;"> Passed </b><br/>
</tr>
</table>
</div>
+ {% elif error.type == 'assertion' %}
+ {% if error.test_case %}
+ <strong> We tried you code with the following test case:</strong><br/></br>
+ <pre><code><strong style="color:#d9534f">{{error.test_case}}</strong></code></pre>
+ {% endif %}
+ <p> <b>The following error took place: </b></p>
+ <div class="well well-sm">
+ <table class="table table-bordered" width="100%">
+ <col width="30%">
+ <tr class = "active">
+ <td><b>Exception Name: </b></td>
+ <td><span style="color: #d9534f">{{error.exception}}</span></td>
+ </tr>
+ <tr>
+ <td><b>Exception Message: </b></td><td>{{error.message}}</td>
+ </tr>
+ <tr>
+ {% if error.traceback %}
+ <td><b>Full Traceback: </b></td>
+ <td><pre>{{error.traceback}}</pre></td>
+ {% endif %}
+ </tr>
+ </table>
+ </div> <!-- Closes well -->
+ {% else %}
+ <pre><code> {{error|safe}} </code></pre>
{% endif %}
{% endfor %}
{% endwith %}
diff --git a/yaksh/templates/yaksh/user_data.html b/yaksh/templates/yaksh/user_data.html
index 6dfaac3..a0219dd 100644
--- a/yaksh/templates/yaksh/user_data.html
+++ b/yaksh/templates/yaksh/user_data.html
@@ -136,12 +136,10 @@ User IP address: {{ paper.user_ip }}
<div class="panel-heading">Correct answer
{% else %}
<div class="panel panel-danger">
- <div class="panel-heading">Error
+ <div class="panel-heading">Error<br/>
{% with answer.error_list as err %}
{% for error in err %}
- {% if not error.expected_output %}
- <pre><code> {{error|safe}} </code></pre>
- {% else %}
+ {% if error.type == 'stdio' %}
<div class = "well well-sm">
{% if error.given_input %}
<table class="table table-bordered">
@@ -183,6 +181,32 @@ User IP address: {{ paper.user_ip }}
</tr>
</table>
</div>
+ {% elif error.type == 'assertion' %}
+ {% if error.test_case %}
+ <strong> We tried you code with the following test case:</strong><br/></br>
+ <pre><code><strong style="color:#d9534f">{{error.test_case}}</strong></code></pre>
+ {% endif %}
+ <p> <b>The following error took place: </b></p>
+ <div class="well well-sm">
+ <table class="table table-bordered" width="100%">
+ <col width="30%">
+ <tr class = "active">
+ <td><b>Exception Name: </b></td>
+ <td><span style="color: #d9534f">{{error.exception}}</span></td>
+ </tr>
+ <tr>
+ <td><b>Exception Message: </b></td><td>{{error.message}}</td>
+ </tr>
+ <tr>
+ {% if error.traceback %}
+ <td><b>Full Traceback: </b></td>
+ <td><pre>{{error.traceback}}</pre></td>
+ {% endif %}
+ </tr>
+ </table>
+ </div> <!-- Closes well -->
+ {% else %}
+ <pre><code> {{error|safe}} </code></pre>
{% endif %}
{% endfor %}
{% endwith %}
diff --git a/yaksh/templates/yaksh/view_answerpaper.html b/yaksh/templates/yaksh/view_answerpaper.html
index 79987b1..fa16a08 100644
--- a/yaksh/templates/yaksh/view_answerpaper.html
+++ b/yaksh/templates/yaksh/view_answerpaper.html
@@ -131,9 +131,8 @@
{% with answer.error_list as err %}
{% for error in err %}
- {% if not error.expected_output %}
- <pre><code> {{error|safe}} </code></pre>
- {% else %}
+
+ {% if error.type == 'stdio' %}
<div class = "well well-sm">
{% if error.given_input %}
<table class="table table-bordered">
@@ -175,6 +174,32 @@
</tr>
</table>
</div>
+ {% elif error.type == 'assertion' %}
+ {% if error.test_case %}
+ <strong> We tried you code with the following test case:</strong><br/></br>
+ <pre><code><strong style="color:#d9534f">{{error.test_case}}</strong></code></pre>
+ {% endif %}
+ <p> <b>The following error took place: </b></p>
+ <div class="well well-sm">
+ <table class="table table-bordered" width="100%">
+ <col width="30%">
+ <tr class = "active">
+ <td><b>Exception Name: </b></td>
+ <td><span style="color: #d9534f">{{error.exception}}</span></td>
+ </tr>
+ <tr>
+ <td><b>Exception Message: </b></td><td>{{error.message}}</td>
+ </tr>
+ <tr>
+ {% if error.traceback %}
+ <td><b>Full Traceback: </b></td>
+ <td><pre>{{error.traceback}}</pre></td>
+ {% endif %}
+ </tr>
+ </table>
+ </div> <!-- Closes well -->
+ {% else %}
+ <pre><code> {{error|safe}} </code></pre>
{% endif %}
{% endfor %}
{% endwith %}
diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py
index 5f80f2d..1309624 100644
--- a/yaksh/tests/test_code_server.py
+++ b/yaksh/tests/test_code_server.py
@@ -61,7 +61,7 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result.get('result'))
self.assertFalse(data['success'])
- self.assertTrue('infinite loop' in data['error'][0])
+ self.assertTrue('infinite loop' in data['error'][0]['message'])
def test_correct_answer(self):
# Given
@@ -104,7 +104,7 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result.get('result'))
self.assertFalse(data['success'])
- self.assertTrue('AssertionError' in data['error'][0])
+ self.assertTrue('AssertionError' in data['error'][0]['exception'])
def test_multiple_simultaneous_hits(self):
# Given
@@ -143,7 +143,7 @@ class TestCodeServer(unittest.TestCase):
for i in range(N):
data = results.get()
self.assertFalse(data['success'])
- self.assertTrue('infinite loop' in data['error'][0])
+ self.assertTrue('infinite loop' in data['error'][0]['message'])
def test_server_pool_status(self):
# Given