diff options
Diffstat (limited to 'yaksh/evaluator_tests')
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 69 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_c_cpp_evaluation.py | 145 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_java_evaluation.py | 103 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_python_evaluation.py | 140 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_scilab_evaluation.py | 18 |
5 files changed, 315 insertions, 160 deletions
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 66ade19..99e5122 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -13,12 +13,12 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): def setUp(self): with open('/tmp/test.txt', 'wb') as f: f.write('2'.encode('ascii')) - tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [ - {"test_case": "bash_files/sample.sh,bash_files/sample.args"} + {"test_case": "bash_files/sample.sh,bash_files/sample.args", + "weight": 0.0 + } ] - tmp_in_dir_path = tempfile.mkdtemp() - self.in_dir = tmp_in_dir_path + self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" " code.").format(SERVER_TIMEOUT) @@ -33,19 +33,21 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" ) get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertTrue(result.get('success')) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") def test_error(self): user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] " "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -57,7 +59,8 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" " do echo "" > /dev/null ; done") get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -68,20 +71,24 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [ - {"test_case": "bash_files/sample1.sh,bash_files/sample1.args"} + {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", + "weight": 0.0 + } ] user_answer = ("#!/bin/bash\ncat $1") get_class = BashCodeEvaluator() kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertTrue(result.get("success")) - self.assertEqual(result.get("error"), "Correct answer") + self.assertEqual(result.get("error"), "Correct answer\n") class BashStdioEvaluationTestCases(unittest.TestCase): def setUp(self): + self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" " code.").format(SERVER_TIMEOUT) @@ -93,13 +100,17 @@ class BashStdioEvaluationTestCases(unittest.TestCase): echo -n `expr $A + $B` """ ) - test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}] + test_case_data = [{'expected_output': '11', + 'expected_input': '5\n6', + 'weight': 0.0 + }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, - "test_case_data": test_case_data - } + "partial_grading": True, + "test_case_data": test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): @@ -112,14 +123,16 @@ class BashStdioEvaluationTestCases(unittest.TestCase): """ ) test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', - 'expected_input': '1,2,3\n4,5,6\n7,8,9' + 'expected_input': '1,2,3\n4,5,6\n7,8,9', + 'weight': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, - "test_case_data": test_case_data - } + "partial_grading": True, + "test_case_data": test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): @@ -129,11 +142,15 @@ class BashStdioEvaluationTestCases(unittest.TestCase): echo -n `expr $A - $B` """ ) - test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}] + test_case_data = [{'expected_output': '11', + 'expected_input': '5\n6', + 'weight': 0.0 + }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, - "test_case_data": test_case_data - } + "partial_grading": True, + "test_case_data": test_case_data + } result = get_class.evaluate(**kwargs) self.assertIn("Incorrect", result.get('error')) self.assertFalse(result.get('success')) @@ -146,14 +163,16 @@ class BashStdioEvaluationTestCases(unittest.TestCase): """ ) test_case_data = [{'expected_output': '10', - 'expected_input': '' + 'expected_input': '', + 'weight': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, - "test_case_data": test_case_data - } + "partial_grading": True, + "test_case_data": test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) if __name__ == '__main__': diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index c990436..d5193d3 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -14,7 +14,9 @@ class CAssertionEvaluationTestCases(unittest.TestCase): with open('/tmp/test.txt', 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() - self.test_case_data = [{"test_case": "c_cpp_files/main.cpp"}] + self.test_case_data = [{"test_case": "c_cpp_files/main.cpp", + "weight": 0.0 + }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" @@ -29,17 +31,19 @@ class CAssertionEvaluationTestCases(unittest.TestCase): user_answer = "int add(int a, int b)\n{return a+b;}" get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, + 'partial_grading': False, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertTrue(result.get('success')) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") def test_incorrect_answer(self): user_answer = "int add(int a, int b)\n{return a-b;}" get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, + 'partial_grading': False, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -52,7 +56,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_compilation_error(self): user_answer = "int add(int a, int b)\n{return a+b}" get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': False, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -63,7 +68,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_infinite_loop(self): user_answer = "int add(int a, int b)\n{while(1>0){}}" get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': False, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -73,7 +79,9 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [('/tmp/test.txt', False)] - self.test_case_data = [{"test_case": "c_cpp_files/file_data.c"}] + self.test_case_data = [{"test_case": "c_cpp_files/file_data.c", + "weight": 0.0 + }] user_answer = dedent(""" #include<stdio.h> char ans() @@ -88,18 +96,21 @@ class CAssertionEvaluationTestCases(unittest.TestCase): """) get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } + 'partial_grading': False, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } result = get_class.evaluate(**kwargs) self.assertTrue(result.get('success')) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") class CppStdioEvaluationTestCases(unittest.TestCase): - def setUp(self): - self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}] - self.in_dir = os.getcwd() + self.test_case_data = [{'expected_output': '11', + 'expected_input': '5\n6', + 'weight': 0.0 + }] + self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" " your code.").format(SERVER_TIMEOUT) @@ -114,15 +125,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): self.test_case_data = [{'expected_output': '561', - 'expected_input': '5\n6\n1'}] + 'expected_input': '5\n6\n1', + 'weight': 0.0 + }] user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -134,15 +148,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_string_input(self): self.test_case_data = [{'expected_output': 'abc', - 'expected_input': 'abc'}] + 'expected_input': 'abc', + 'weight': 0.0 + }] user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -152,10 +169,11 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): @@ -167,8 +185,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) @@ -184,8 +203,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -199,15 +219,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', - 'expected_input': ''}] + 'expected_input': '', + 'weight': 0.0 + }] user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -216,10 +239,11 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_correct_answer(self): @@ -233,15 +257,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_array_input(self): self.test_case_data = [{'expected_output': '561', - 'expected_input': '5\n6\n1'}] + 'expected_input': '5\n6\n1', + 'weight': 0.0 + }] user_answer = dedent(""" #include<iostream> using namespace std; @@ -254,15 +281,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_string_input(self): self.test_case_data = [{'expected_output': 'abc', - 'expected_input': 'abc'}] + 'expected_input': 'abc', + 'weight': 0.0 + }] user_answer = dedent(""" #include<iostream> using namespace std; @@ -273,10 +303,11 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_incorrect_answer(self): @@ -289,8 +320,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) @@ -307,8 +339,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -323,15 +356,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_cpp_only_stdout(self): self.test_case_data = [{'expected_output': '11', - 'expected_input': ''}] + 'expected_input': '', + 'weight': 0.0 + }] user_answer = dedent(""" #include<iostream> using namespace std; @@ -341,10 +377,11 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) if __name__ == '__main__': diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index e375bdb..f7ecd97 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -16,7 +16,9 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [ - {"test_case": "java_files/main_square.java"} + {"test_case": "java_files/main_square.java", + "weight": 0.0 + } ] self.in_dir = tmp_in_dir_path evaluator.SERVER_TIMEOUT = 9 @@ -32,18 +34,20 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_correct_answer(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -57,7 +61,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_error(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -68,7 +73,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_infinite_loop(self): user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -79,7 +85,9 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [ - {"test_case": "java_files/read_file.java"} + {"test_case": "java_files/read_file.java", + "weight": 0.0 + } ] user_answer = dedent(""" import java.io.BufferedReader; @@ -101,12 +109,13 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): """) get_class = JavaCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } + 'partial_grading': True, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } result = get_class.evaluate(**kwargs) self.assertTrue(result.get("success")) - self.assertEqual(result.get("error"), "Correct answer") + self.assertEqual(result.get("error"), "Correct answer\n") class JavaStdioEvaluationTestCases(unittest.TestCase): @@ -116,7 +125,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path self.test_case_data = [{'expected_output': '11', - 'expected_input': '5\n6'}] + 'expected_input': '5\n6', + 'weight': 0.0 + }] evaluator.SERVER_TIMEOUT = 4 self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -139,16 +150,19 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): self.test_case_data = [{'expected_output': '561', - 'expected_input': '5\n6\n1'}] + 'expected_input': '5\n6\n1', + 'weight': 0.0 + }] user_answer = dedent(""" import java.util.Scanner; class Test @@ -161,10 +175,11 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): @@ -180,8 +195,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) @@ -197,8 +213,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -214,15 +231,18 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', - 'expected_input': ''}] + 'expected_input': '', + 'weight': 0.0 + }] user_answer = dedent(""" class Test {public static void main(String[] args){ @@ -232,15 +252,18 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_string_input(self): self.test_case_data = [{'expected_output': 'HelloWorld', - 'expected_input': 'Hello\nWorld'}] + 'expected_input': 'Hello\nWorld', + 'weight': 0.0 + }] user_answer = dedent(""" import java.util.Scanner; class Test @@ -252,16 +275,19 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_file_based_stdout(self): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [{'expected_output': '2', - 'expected_input': ''}] + 'expected_input': '', + 'weight': 0.0 + }] user_answer = dedent(""" import java.io.BufferedReader; import java.io.FileReader; @@ -282,12 +308,13 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): """) get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } + 'partial_grading': True, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } result = get_class.evaluate(**kwargs) self.assertTrue(result.get("success")) - self.assertEqual(result.get("error"), "Correct answer") + self.assertEqual(result.get("error"), "Correct answer\n") if __name__ == '__main__': diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 45cc40d..9796fa2 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -17,9 +17,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path - self.test_case_data = [{"test_case": 'assert(add(1,2)==3)'}, - {"test_case": 'assert(add(-1,2)==1)'}, - {"test_case": 'assert(add(-1,-2)==-3)'}, + self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weight': 0.0}, + {"test_case": 'assert(add(-1,2)==1)', 'weight': 0.0}, + {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0}, ] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -35,7 +35,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): user_answer = "def add(a,b):\n\treturn a + b" kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -44,14 +45,15 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): # Then self.assertTrue(result.get('success')) - self.assertEqual(result.get('error'), "Correct answer") + self.assertIn("Correct answer", result.get('error')) def test_incorrect_answer(self): # Given user_answer = "def add(a,b):\n\treturn a - b" kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -60,16 +62,50 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): # Then self.assertFalse(result.get('success')) - self.assertEqual(result.get('error'), - "AssertionError in: assert(add(1,2)==3)" - ) + self.assertIn('AssertionError in: assert(add(1,2)==3)', + result.get('error') + ) + self.assertIn('AssertionError in: assert(add(-1,2)==1)', + result.get('error') + ) + self.assertIn('AssertionError in: assert(add(-1,-2)==-3)', + result.get('error') + ) + + def test_partial_incorrect_answer(self): + # Given + user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)" + test_case_data = [{"test_case": 'assert(add(-1,2)==1)', 'weight': 1.0}, + {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 1.0}, + {"test_case": 'assert(add(1,2)==3)', 'weight': 2.0} + ] + kwargs = {'user_answer': user_answer, + 'test_case_data': test_case_data, + 'file_paths': self.file_paths, + 'partial_grading': True + } + + # When + evaluator = PythonAssertionEvaluator() + result = evaluator.evaluate(**kwargs) + + # Then + self.assertFalse(result.get('success')) + self.assertEqual(result.get('weight'), 2.0) + self.assertIn('AssertionError in: assert(add(-1,2)==1)', + result.get('error') + ) + self.assertIn('AssertionError in: assert(add(-1,-2)==-3)', + result.get('error') + ) def test_infinite_loop(self): # Given user_answer = "def add(a, b):\n\twhile True:\n\t\tpass" kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -96,7 +132,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -125,7 +162,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False + } # When @@ -150,7 +189,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -176,7 +216,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -202,7 +243,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -231,7 +273,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -247,7 +290,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): # Given - self.test_case_data = [{"test_case": "assert(ans()=='2')"}] + self.test_case_data = [{"test_case": "assert(ans()=='2')", "weight": 0.0}] self.file_paths = [('/tmp/test.txt', False)] user_answer = dedent(""" def ans(): @@ -256,7 +299,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): """) kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -264,7 +308,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): result = evaluator.evaluate(**kwargs) # Then - self.assertEqual(result.get('error'), "Correct answer") + self.assertIn("Correct answer", result.get('error')) self.assertTrue(result.get('success')) def test_single_testcase_error(self): @@ -272,7 +316,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): """ Tests the user answer with just an incorrect test case """ user_answer = "def palindrome(a):\n\treturn a == a[::-1]" - test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False'} + test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False', + "weight": 0.0 + } ] syntax_error_msg = ["Traceback", "call", @@ -284,7 +330,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -304,8 +351,12 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): first and then with an incorrect test case """ # Given user_answer = "def palindrome(a):\n\treturn a == a[::-1]" - test_case_data = [{"test_case": 'assert(palindrome("abba")==True)'}, - {"test_case": 's="abbb"\nassert palindrome(S)==False'} + test_case_data = [{"test_case": 'assert(palindrome("abba")==True)', + "weight": 0.0 + }, + {"test_case": 's="abbb"\nassert palindrome(S)==False', + "weight": 0.0 + } ] name_error_msg = ["Traceback", "call", @@ -317,7 +368,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -341,7 +393,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_correct_answer_integer(self): # Given self.test_case_data = [{"expected_input": "1\n2", - "expected_output": "3" + "expected_output": "3", + "weight": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -350,7 +403,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): """ ) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'partial_grading': False } # When @@ -364,7 +418,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_correct_answer_list(self): # Given self.test_case_data = [{"expected_input": "1,2,3\n5,6,7", - "expected_output": "[1, 2, 3, 5, 6, 7]" + "expected_output": "[1, 2, 3, 5, 6, 7]", + "weight": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -376,7 +431,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): """ ) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'partial_grading': False } # When @@ -390,7 +446,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_correct_answer_string(self): # Given self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"), - "expected_output": "2" + "expected_output": "2", + "weight": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -400,7 +457,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): """ ) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'partial_grading': False } # When @@ -414,7 +472,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_incorrect_answer_integer(self): # Given self.test_case_data = [{"expected_input": "1\n2", - "expected_output": "3" + "expected_output": "3", + "weight": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -424,6 +483,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): ) kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, + 'partial_grading': False } # When @@ -436,7 +496,10 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_file_based_answer(self): # Given - self.test_case_data = [{"expected_input": "", "expected_output": "2"}] + self.test_case_data = [{"expected_input": "", + "expected_output": "2", + "weight": 0.0 + }] self.file_paths = [('/tmp/test.txt', False)] user_answer = dedent(""" @@ -447,7 +510,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): ) kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -455,20 +519,22 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): result = evaluator.evaluate(**kwargs) # Then - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_infinite_loop(self): # Given test_case_data = [{"expected_input": "1\n2", - "expected_output": "3" - }] + "expected_output": "3", + "weight": 0.0 + }] timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" " your code.").format(SERVER_TIMEOUT) user_answer = "while True:\n\tpass" kwargs = {'user_answer': user_answer, - 'test_case_data': test_case_data + 'test_case_data': test_case_data, + 'partial_grading': False } # When diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index b366480..c30f652 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -11,7 +11,9 @@ from yaksh.settings import SERVER_TIMEOUT class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() - self.test_case_data = [{"test_case": "scilab_files/test_add.sce"}] + self.test_case_data = [{"test_case": "scilab_files/test_add.sce", + "weight": 0.0 + }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop" @@ -25,19 +27,21 @@ class ScilabEvaluationTestCases(unittest.TestCase): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a+b;\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_error(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a+b;\ndis(\tendfunction") get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -50,7 +54,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a-b;\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -64,7 +69,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a;\nwhile(1==1)\nend\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } |