summaryrefslogtreecommitdiff
path: root/yaksh/evaluator_tests
diff options
context:
space:
mode:
Diffstat (limited to 'yaksh/evaluator_tests')
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py11
-rw-r--r--yaksh/evaluator_tests/test_python_stdio_evaluator.py62
2 files changed, 69 insertions, 4 deletions
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 1fba73e..82cf4c3 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -400,7 +400,7 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
error_as_str = ''.join(result.get("error"))
err = error_as_str.splitlines()
-
+
# Then
self.assertFalse(result.get("success"))
self.assertEqual(5, len(err))
@@ -578,7 +578,10 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output("Incorrect answer", result.get('error'))
+ self.assert_correct_output(
+ "ERROR:\nExpected:\n3\nGiven:\n-1\n\nError in line 1 of output.",
+ result.get('error')
+ )
def test_file_based_answer(self):
# Given
@@ -728,7 +731,7 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
self.assert_correct_output('Incorrect Answer', result.get('error'))
-
+
def test_assert_with_hook(self):
# Given
user_answer = "def add(a,b):\n\treturn a + b"
@@ -815,7 +818,7 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertTrue(result.get('success'))
self.assertEqual(result.get("weight"), 1.5)
-
+
def test_infinite_loop(self):
# Given
user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
diff --git a/yaksh/evaluator_tests/test_python_stdio_evaluator.py b/yaksh/evaluator_tests/test_python_stdio_evaluator.py
new file mode 100644
index 0000000..db5028a
--- /dev/null
+++ b/yaksh/evaluator_tests/test_python_stdio_evaluator.py
@@ -0,0 +1,62 @@
+from textwrap import dedent
+
+from yaksh.python_stdio_evaluator import compare_outputs
+
+
+def test_compare_outputs():
+ exp = "5\n5\n"
+ given = "5\n5\n"
+ success, msg = compare_outputs(given, exp)
+ assert success
+
+ exp = "5\n5\n"
+ given = "5\n5"
+ success, msg = compare_outputs(given, exp)
+ assert success
+
+ exp = "5\r5"
+ given = "5\n5"
+ success, msg = compare_outputs(given, exp)
+ assert success
+
+ exp = " 5 \r 5 "
+ given = " 5 \n 5 "
+ success, msg = compare_outputs(given, exp)
+ assert success
+
+ exp = "5\n5\n"
+ given = "5 5"
+ success, msg = compare_outputs(given, exp)
+ assert not success
+ m = dedent("""\
+ ERROR: Got 1 lines in output, we expected 2.
+ Expected:
+ 5
+ 5
+
+ Given:
+ 5 5
+ """)
+ assert m == msg
+
+ exp = "5\n5\n"
+ given = "5\n6"
+ success, msg = compare_outputs(given, exp)
+ assert not success
+ m = dedent("""\
+ ERROR:
+ Expected:
+ 5
+ 5
+
+ Given:
+ 5
+ 6
+
+ Error in line 2 of output.
+ Expected line 2:
+ 5
+ Given line 2:
+ 6
+ """)
+ assert m == msg