summaryrefslogtreecommitdiff
path: root/yaksh/evaluator_tests/test_python_evaluation.py
diff options
context:
space:
mode:
authoradityacp2018-04-13 12:17:29 +0530
committeradityacp2018-04-13 12:17:29 +0530
commit575da538337da6afb23cc159870853b6457797d3 (patch)
treefbf75cd5a5dfe701005dd0571e92e74f2a87163e /yaksh/evaluator_tests/test_python_evaluation.py
parenta3e540aa209be57318de6c3e2548c56e68fdeded (diff)
downloadonline_test-575da538337da6afb23cc159870853b6457797d3.tar.gz
online_test-575da538337da6afb23cc159870853b6457797d3.tar.bz2
online_test-575da538337da6afb23cc159870853b6457797d3.zip
Change in python test, grader and request handler
- Add try except in grader to get exceptions for python stdio and python assertion evaluation - Add additional tests - Add condition in request handler to avoid error if codemirror is not instantiated
Diffstat (limited to 'yaksh/evaluator_tests/test_python_evaluation.py')
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py32
1 files changed, 30 insertions, 2 deletions
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 2748d8a..1933d17 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -317,12 +317,15 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error")[0]['traceback']
+ errors = result.get("error")
# Then
self.assertFalse(result.get("success"))
for msg in value_error_msg:
- self.assert_correct_output(msg, err)
+ self.assert_correct_output(msg, errors[0]['traceback'])
+ for index, error in enumerate(errors):
+ self.assertEqual(error['test_case'],
+ self.test_case_data[index]['test_case'])
def test_file_based_assert(self):
# Given
@@ -694,6 +697,31 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertTrue(result.get("success"))
+ def test_get_error_lineno(self):
+ user_answer = dedent("""\
+ print(1/0)
+ """)
+ test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ "expected_input": "",
+ "expected_output": "1",
+ "weight": 0.0
+ }]
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
+ }
+ # When
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+ # Then
+ self.assertFalse(result.get("success"))
+ error = result.get("error")[0]
+ self.assertEqual(error['line_no'], 1)
+ self.assertEqual(error['exception'], "ZeroDivisionError")
+
class PythonHookEvaluationTestCases(EvaluatorBaseTest):