diff options
author | prathamesh | 2020-07-01 19:31:15 +0530 |
---|---|---|
committer | prathamesh | 2020-07-01 19:31:15 +0530 |
commit | 46ef4a4a563b16bb96d09cb17496afb731e616ad (patch) | |
tree | ae8bc21534092c75080cce1d0d4f4befb6be2897 /yaksh | |
parent | 60b1cc983c65654552c4b4a0d52114248ae0228f (diff) | |
download | online_test-46ef4a4a563b16bb96d09cb17496afb731e616ad.tar.gz online_test-46ef4a4a563b16bb96d09cb17496afb731e616ad.tar.bz2 online_test-46ef4a4a563b16bb96d09cb17496afb731e616ad.zip |
Modify tests to handle hidden tests
Diffstat (limited to 'yaksh')
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 25 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_c_cpp_evaluation.py | 49 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_java_evaluation.py | 16 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_python_evaluation.py | 41 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_r_evaluation.py | 5 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_scilab_evaluation.py | 4 |
6 files changed, 86 insertions, 54 deletions
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 689f65b..031a9de 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -25,7 +25,7 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): {"test_case": self.tc_data, "test_case_args": self.tc_data_args, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, "hidden": False } ] self.in_dir = tempfile.mkdtemp() @@ -66,7 +66,8 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): {"test_case": tc_data, "test_case_args": "", "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, + "hidden": True } ] kwargs = {'metadata': { @@ -129,6 +130,7 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): # Then self.assertFalse(result.get("success")) + self.assertFalse(result.get("error")[0]["hidden"]) self.assert_correct_output("Error", result.get("error")[0]["message"]) @@ -171,7 +173,8 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): "test_case": self.tc_data, "test_case_args": self.tc_data_args, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, + "hidden": True }] user_answer = ("#!/bin/bash\ncat $1") kwargs = {'metadata': { @@ -241,7 +244,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', 'expected_input': '1,2,3\n4,5,6\n7,8,9', 'test_case_type': 'stdiobasedtestcase', - 'weight': 0.0 + 'weight': 0.0, }] kwargs = { 'metadata': { @@ -271,7 +274,8 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'expected_output': '11', 'expected_input': '5\n6', 'test_case_type': 'stdiobasedtestcase', - 'weight': 0.0 + 'weight': 0.0, + 'hidden': True }] kwargs = { 'metadata': { @@ -289,6 +293,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): # Then result_error = result.get('error')[0].get('error_msg') self.assert_correct_output("Incorrect", result_error) + self.assertTrue(result.get('error')[0]['hidden']) self.assertFalse(result.get('success')) def test_stdout_only(self): @@ -400,7 +405,8 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): return success, err, mark_fraction """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code, "weight": 1.0}] + "hook_code": hook_code, "weight": 1.0, + "hidden": True}] kwargs = { 'metadata': { @@ -417,6 +423,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertFalse(result.get('success')) + self.assertTrue(result.get('error')[0]['hidden']) self.assert_correct_output('Incorrect Answer', result.get('error')[0]['message']) @@ -448,7 +455,8 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): 'weight': 1.0 }, {"test_case_type": "hooktestcase", - "hook_code": hook_code, 'weight': 1.0}, + "hook_code": hook_code, 'weight': 1.0, + 'hidden': True}, ] kwargs = { 'metadata': { @@ -546,7 +554,8 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code, "weight": 1.0}] + "hook_code": hook_code, "weight": 1.0, + "hidden": False}] kwargs = { 'metadata': { diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index 242d0b0..b7c6018 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -54,7 +54,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): """) self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, "hidden": False }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -184,9 +184,9 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): check(50, result); } """) - self.test_case_data = [{"test_case": self.tc_data, + test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, }] user_answer = dedent(""" #include<stdio.h> @@ -206,7 +206,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, 'test_case_data': self.test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -257,9 +257,9 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): { return a+b; }""") - self.test_case_data = [{"test_case": self.tc_data, + test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, }] kwargs = { 'metadata': { @@ -267,7 +267,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, 'test_case_data': self.test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -287,6 +287,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'expected_input': '5\n6', 'weight': 0.0, 'test_case_type': 'stdiobasedtestcase', + 'hidden': True }] self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -324,7 +325,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): def test_array_input(self): # Given - self.test_case_data = [{'expected_output': '561', + test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', 'weight': 0.0, 'test_case_type': 'stdiobasedtestcase', @@ -344,7 +345,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, 'test_case_data': self.test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -356,7 +357,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): def test_string_input(self): # Given - self.test_case_data = [{'expected_output': 'abc', + test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', 'weight': 0.0, 'test_case_type': 'stdiobasedtestcase', @@ -374,7 +375,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, 'test_case_data': self.test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -408,6 +409,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): # Then lines_of_error = len(result.get('error')[0].get('error_line_numbers')) result_error = result.get('error')[0].get('error_msg') + self.assertTrue(result.get('error')[0].get('hidden')) self.assertFalse(result.get('success')) self.assert_correct_output("Incorrect", result_error) self.assertTrue(lines_of_error > 0) @@ -472,7 +474,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): def test_only_stdout(self): # Given - self.test_case_data = [{'expected_output': '11', + test_case_data = [{'expected_output': '11', 'weight': 0.0, 'test_case_type': 'stdiobasedtestcase', }] @@ -488,7 +490,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, 'test_case_data': self.test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -522,11 +524,12 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): result = grader.evaluate(kwargs) # Then + self.assertFalse(result.get('hidden')) self.assertTrue(result.get('success')) def test_cpp_array_input(self): # Given - self.test_case_data = [{'expected_output': '561', + test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', 'weight': 0.0, 'test_case_type': 'stdiobasedtestcase', @@ -547,7 +550,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, 'test_case_data': self.test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -559,7 +562,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): def test_cpp_string_input(self): # Given - self.test_case_data = [{'expected_output': 'abc', + test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', 'weight': 0.0, 'test_case_type': 'stdiobasedtestcase', @@ -578,7 +581,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, 'test_case_data': self.test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -613,6 +616,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): # Then lines_of_error = len(result.get('error')[0].get('error_line_numbers')) result_error = result.get('error')[0].get('error_msg') + self.assertTrue(result.get('error')[0].get('hidden')) self.assertFalse(result.get('success')) self.assert_correct_output("Incorrect", result_error) self.assertTrue(lines_of_error > 0) @@ -675,7 +679,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): def test_cpp_only_stdout(self): # Given - self.test_case_data = [{'expected_output': '11', + test_case_data = [{'expected_output': '11', 'expected_input': '', 'weight': 0.0, 'test_case_type': 'stdiobasedtestcase', @@ -693,7 +697,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, 'test_case_data': self.test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -806,7 +810,8 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code, "weight": 1.0}] + "hook_code": hook_code, "weight": 1.0, + "hidden": True}] kwargs = { 'metadata': { 'user_answer': user_answer, @@ -821,6 +826,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): result = grader.evaluate(kwargs) # Then + self.assertTrue(result.get('error')[0]['hidden']) self.assertFalse(result.get('success')) self.assert_correct_output('Incorrect Answer', result.get('error')[0]['message']) @@ -1000,7 +1006,8 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code, "weight": 1.0}] + "hook_code": hook_code, "weight": 1.0, + "hidden": False}] kwargs = { 'metadata': { diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index 3e30ba3..e762852 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -62,7 +62,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): self.test_case_data = [ {"test_case": self.tc_data, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, "hidden": False } ] self.in_dir = tmp_in_dir_path @@ -119,6 +119,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): self.assertFalse(result.get('success')) for error in errors: self.assertEqual(error.get('exception'), 'AssertionError') + self.assertFalse(error.get('hidden')) def test_error(self): # Given @@ -140,6 +141,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): self.assertFalse(result.get("success")) for error in errors: self.assertEqual(error.get('exception'), 'CompilationError') + self.assertFalse(result.get('hidden')) def test_infinite_loop(self): # Given @@ -276,7 +278,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): "{\n\treturn a;\n\t}\n}") self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, "hidden": True }] kwargs = { 'metadata': { @@ -408,6 +410,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): # Then lines_of_error = len(result.get('error')[0].get('error_line_numbers')) result_error = result.get('error')[0].get('error_msg') + self.assertFalse(result.get('error')[0].get('hidden')) self.assertFalse(result.get('success')) self.assert_correct_output("Incorrect", result_error) self.assertTrue(lines_of_error > 0) @@ -437,6 +440,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): self.assertFalse(result.get("success")) for error in errors: self.assertEqual(error.get('exception'), 'CompilationError') + self.assertFalse(error.get('hidden')) def test_infinite_loop(self): # Given @@ -474,7 +478,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): # Given self.test_case_data = [{'expected_output': '11', 'test_case_type': 'stdiobasedtestcase', - 'weight': 0.0 + 'weight': 0.0, 'hidden': False }] user_answer = dedent(""" class Test @@ -677,7 +681,8 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code, "weight": 1.0 + "hook_code": hook_code, "weight": 1.0, + "hidden": True }] kwargs = { 'metadata': { @@ -693,6 +698,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): result = grader.evaluate(kwargs) # Then + self.assertTrue(result.get('error')[0]['hidden']) self.assertFalse(result.get('success')) self.assert_correct_output('Incorrect Answer', result.get('error')[0]['message']) @@ -876,7 +882,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code, "weight": 1.0 + "hook_code": hook_code, "weight": 1.0, }] kwargs = { diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 35c2322..343c8fb 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -25,13 +25,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): self.in_dir = tmp_in_dir_path self.test_case_data = [{"test_case_type": "standardtestcase", "test_case": 'assert(add(1,2)==3)', - 'weight': 0.0}, + 'weight': 0.0, 'hidden': True}, {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,2)==1)', - 'weight': 0.0}, + 'weight': 0.0, 'hidden': True}, {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,-2)==-3)', - 'weight': 0.0}, + 'weight': 0.0, 'hidden': True}, ] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -80,6 +80,7 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): given_test_case_list = [tc["test_case"] for tc in self.test_case_data] for error in result.get("error"): self.assertEqual(error['exception'], 'AssertionError') + self.assertTrue(error['hidden']) self.assertEqual( error['message'], "Expected answer from the test case did not match the output" @@ -92,13 +93,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)" test_case_data = [{"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,2)==1)', - 'weight': 1.0}, + 'weight': 1.0, 'hidden': False}, {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,-2)==-3)', - 'weight': 1.0}, + 'weight': 1.0, 'hidden': False}, {"test_case_type": "standardtestcase", "test_case": 'assert(add(1,2)==3)', - 'weight': 2.0} + 'weight': 2.0, 'hidden': False} ] kwargs = {'metadata': { 'user_answer': user_answer, @@ -119,6 +120,7 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): given_test_case_list.remove('assert(add(1,2)==3)') for error in result.get("error"): self.assertEqual(error['exception'], 'AssertionError') + self.assertFalse(error['hidden']) self.assertEqual( error['message'], "Expected answer from the test case did not match the output" @@ -489,7 +491,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer_integer(self): # Given - self.test_case_data = [{"test_case_type": "stdiobasedtestcase", + test_case_data = [{"test_case_type": "stdiobasedtestcase", "expected_input": "1\n2", "expected_output": "3", "weight": 0.0 @@ -505,7 +507,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'python'}, - 'test_case_data': self.test_case_data + 'test_case_data': test_case_data } # When @@ -517,7 +519,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer_list(self): # Given - self.test_case_data = [{"test_case_type": "stdiobasedtestcase", + test_case_data = [{"test_case_type": "stdiobasedtestcase", "expected_input": "1,2,3\n5,6,7", "expected_output": "[1, 2, 3, 5, 6, 7]", "weight": 0.0 @@ -536,7 +538,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'python'}, - 'test_case_data': self.test_case_data + 'test_case_data': test_case_data } # When @@ -548,7 +550,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer_string(self): # Given - self.test_case_data = [{ + test_case_data = [{ "test_case_type": "stdiobasedtestcase", "expected_input": ("the quick brown fox jumps over " "the lazy dog\nthe"), @@ -567,7 +569,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'python'}, - 'test_case_data': self.test_case_data + 'test_case_data': test_case_data } # When @@ -579,10 +581,10 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): def test_incorrect_answer_integer(self): # Given - self.test_case_data = [{"test_case_type": "stdiobasedtestcase", + test_case_data = [{"test_case_type": "stdiobasedtestcase", "expected_input": "1\n2", "expected_output": "3", - "weight": 0.0 + "weight": 0.0, 'hidden': True }] user_answer = dedent(""" a = int(input()) @@ -595,7 +597,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'python'}, - 'test_case_data': self.test_case_data + 'test_case_data': test_case_data } # When @@ -604,6 +606,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): # Then self.assertFalse(result.get('success')) + self.assertTrue(result.get('error')[0].get('hidden')) self.assert_correct_output( "Incorrect Answer: Line number(s) 1 did not match.", result.get('error')[0].get('error_msg') @@ -611,7 +614,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): def test_file_based_answer(self): # Given - self.test_case_data = [{"test_case_type": "stdiobasedtestcase", + test_case_data = [{"test_case_type": "stdiobasedtestcase", "expected_input": "", "expected_output": "2", "weight": 0.0 @@ -629,7 +632,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'python'}, - 'test_case_data': self.test_case_data + 'test_case_data': test_case_data } # When @@ -786,7 +789,8 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): ) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code, "weight": 1.0 + "hook_code": hook_code, "weight": 1.0, + "hidden": True }] kwargs = {'metadata': { @@ -803,6 +807,7 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertFalse(result.get('success')) + self.assertTrue(result.get('error')[0]['hidden']) self.assert_correct_output('Incorrect Answer', result.get('error')[0]['message']) diff --git a/yaksh/evaluator_tests/test_r_evaluation.py b/yaksh/evaluator_tests/test_r_evaluation.py index b4b81ae..a196d91 100644 --- a/yaksh/evaluator_tests/test_r_evaluation.py +++ b/yaksh/evaluator_tests/test_r_evaluation.py @@ -44,7 +44,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest): ) self.test_case_data = [{"test_case": self.test_case, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, "hidden": True }] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -108,6 +108,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest): result = grader.evaluate(kwargs) errors = result.get('error') # Then + self.assertTrue(result.get("error")[0]['hidden']) self.assertFalse(result.get('success')) self.assertEqual(errors[0]['message'], err) @@ -134,6 +135,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest): errors = result.get('error') # Then + self.assertTrue(result.get("error")[0]['hidden']) self.assertFalse(result.get("success")) self.assertIn("object 'a' not found", errors[0]['message']) @@ -159,6 +161,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest): errors = result.get('error') # Then + self.assertTrue(result.get("error")[0]['hidden']) self.assertFalse(result.get("success")) err = errors[0]['message'] self.assertIn("is.null(obj) == FALSE is not TRUE", err) diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index 41abf94..b08d348 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -48,7 +48,7 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest): """) self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", - "weight": 0.0 + "weight": 0.0, 'hidden': True }] self.in_dir = tmp_in_dir_path self.file_paths = None @@ -93,6 +93,7 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest): grader = Grader(self.in_dir) result = grader.evaluate(kwargs) self.assertFalse(result.get("success")) + self.assertTrue(result.get("error")[0]['hidden']) self.assert_correct_output('error', result.get("error")[0]['message']) def test_incorrect_answer(self): @@ -111,6 +112,7 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest): result = grader.evaluate(kwargs) lines_of_error = len(result.get('error')[0]['message'].splitlines()) self.assertFalse(result.get('success')) + self.assertTrue(result.get("error")[0]['hidden']) self.assert_correct_output("Message", result.get('error')[0]["message"]) self.assertTrue(lines_of_error > 1) |