diff options
author | ankitjavalkar | 2016-12-19 11:44:55 +0530 |
---|---|---|
committer | ankitjavalkar | 2016-12-20 12:46:03 +0530 |
commit | f1da39aded67efa3da145851f0e9f687a3e434e5 (patch) | |
tree | c8713233fcaaf50e7e4a62b337258e7932fcf99c /yaksh/evaluator_tests | |
parent | 80a4feef3c209e044e8cbe31e44c81d69136e100 (diff) | |
download | online_test-f1da39aded67efa3da145851f0e9f687a3e434e5.tar.gz online_test-f1da39aded67efa3da145851f0e9f687a3e434e5.tar.bz2 online_test-f1da39aded67efa3da145851f0e9f687a3e434e5.zip |
Change all evaluator structure and make sure eval test cases pass
Diffstat (limited to 'yaksh/evaluator_tests')
-rw-r--r-- | yaksh/evaluator_tests/old_test_python_evaluation.py | 549 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 211 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_c_cpp_evaluation.py | 468 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_code_evaluation.py | 12 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_java_evaluation.py | 371 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_python_evaluation.py | 2 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_scilab_evaluation.py | 107 |
7 files changed, 886 insertions, 834 deletions
diff --git a/yaksh/evaluator_tests/old_test_python_evaluation.py b/yaksh/evaluator_tests/old_test_python_evaluation.py deleted file mode 100644 index 9796fa2..0000000 --- a/yaksh/evaluator_tests/old_test_python_evaluation.py +++ /dev/null @@ -1,549 +0,0 @@ -from __future__ import unicode_literals -import unittest -import os -import tempfile -import shutil -from textwrap import dedent - -# Local import -from yaksh.python_assertion_evaluator import PythonAssertionEvaluator -from yaksh.python_stdio_evaluator import PythonStdioEvaluator -from yaksh.settings import SERVER_TIMEOUT - - -class PythonAssertionEvaluationTestCases(unittest.TestCase): - def setUp(self): - with open('/tmp/test.txt', 'wb') as f: - f.write('2'.encode('ascii')) - tmp_in_dir_path = tempfile.mkdtemp() - self.in_dir = tmp_in_dir_path - self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weight': 0.0}, - {"test_case": 'assert(add(-1,2)==1)', 'weight': 0.0}, - {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0}, - ] - self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in" - " your code.").format(SERVER_TIMEOUT) - self.file_paths = None - - def tearDown(self): - os.remove('/tmp/test.txt') - shutil.rmtree(self.in_dir) - - def test_correct_answer(self): - # Given - user_answer = "def add(a,b):\n\treturn a + b" - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertTrue(result.get('success')) - self.assertIn("Correct answer", result.get('error')) - - def test_incorrect_answer(self): - # Given - user_answer = "def add(a,b):\n\treturn a - b" - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertFalse(result.get('success')) - self.assertIn('AssertionError in: assert(add(1,2)==3)', - result.get('error') - ) - self.assertIn('AssertionError in: assert(add(-1,2)==1)', - result.get('error') - ) - self.assertIn('AssertionError in: assert(add(-1,-2)==-3)', - result.get('error') - ) - - def test_partial_incorrect_answer(self): - # Given - user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)" - test_case_data = [{"test_case": 'assert(add(-1,2)==1)', 'weight': 1.0}, - {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 1.0}, - {"test_case": 'assert(add(1,2)==3)', 'weight': 2.0} - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': True - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertFalse(result.get('success')) - self.assertEqual(result.get('weight'), 2.0) - self.assertIn('AssertionError in: assert(add(-1,2)==1)', - result.get('error') - ) - self.assertIn('AssertionError in: assert(add(-1,-2)==-3)', - result.get('error') - ) - - def test_infinite_loop(self): - # Given - user_answer = "def add(a, b):\n\twhile True:\n\t\tpass" - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertFalse(result.get('success')) - self.assertEqual(result.get('error'), self.timeout_msg) - - def test_syntax_error(self): - # Given - user_answer = dedent(""" - def add(a, b); - return a + b - """) - syntax_error_msg = ["Traceback", - "call", - "File", - "line", - "<string>", - "SyntaxError", - "invalid syntax" - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - err = result.get("error").splitlines() - - # Then - self.assertFalse(result.get("success")) - self.assertEqual(5, len(err)) - for msg in syntax_error_msg: - self.assertIn(msg, result.get("error")) - - def test_indent_error(self): - # Given - user_answer = dedent(""" - def add(a, b): - return a + b - """) - indent_error_msg = ["Traceback", "call", - "File", - "line", - "<string>", - "IndentationError", - "indented block" - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - err = result.get("error").splitlines() - - # Then - self.assertFalse(result.get("success")) - self.assertEqual(5, len(err)) - for msg in indent_error_msg: - self.assertIn(msg, result.get("error")) - - def test_name_error(self): - # Given - user_answer = "" - name_error_msg = ["Traceback", - "call", - "NameError", - "name", - "defined" - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - err = result.get("error").splitlines() - - # Then - self.assertFalse(result.get("success")) - self.assertEqual(3, len(err)) - for msg in name_error_msg: - self.assertIn(msg, result.get("error")) - - def test_recursion_error(self): - # Given - user_answer = dedent(""" - def add(a, b): - return add(3, 3) - """) - recursion_error_msg = ["Traceback", - "call", - "maximum recursion depth exceeded" - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - err = result.get("error").splitlines() - - # Then - self.assertFalse(result.get("success")) - for msg in recursion_error_msg: - self.assertIn(msg, result.get("error")) - - def test_type_error(self): - # Given - user_answer = dedent(""" - def add(a): - return a + b - """) - type_error_msg = ["Traceback", - "call", - "TypeError", - "argument" - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - err = result.get("error").splitlines() - - # Then - self.assertFalse(result.get("success")) - self.assertEqual(3, len(err)) - for msg in type_error_msg: - self.assertIn(msg, result.get("error")) - - def test_value_error(self): - # Given - user_answer = dedent(""" - def add(a, b): - c = 'a' - return int(a) + int(b) + int(c) - """) - value_error_msg = ["Traceback", - "call", - "ValueError", - "invalid literal", - "base" - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - err = result.get("error").splitlines() - - # Then - self.assertFalse(result.get("success")) - self.assertEqual(4, len(err)) - for msg in value_error_msg: - self.assertIn(msg, result.get("error")) - - def test_file_based_assert(self): - # Given - self.test_case_data = [{"test_case": "assert(ans()=='2')", "weight": 0.0}] - self.file_paths = [('/tmp/test.txt', False)] - user_answer = dedent(""" - def ans(): - with open("test.txt") as f: - return f.read()[0] - """) - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertIn("Correct answer", result.get('error')) - self.assertTrue(result.get('success')) - - def test_single_testcase_error(self): - # Given - """ Tests the user answer with just an incorrect test case """ - - user_answer = "def palindrome(a):\n\treturn a == a[::-1]" - test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False', - "weight": 0.0 - } - ] - syntax_error_msg = ["Traceback", - "call", - "File", - "line", - "<string>", - "SyntaxError", - "invalid syntax" - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - err = result.get("error").splitlines() - - # Then - self.assertFalse(result.get("success")) - self.assertEqual(5, len(err)) - for msg in syntax_error_msg: - self.assertIn(msg, result.get("error")) - - - def test_multiple_testcase_error(self): - """ Tests the user answer with an correct test case - first and then with an incorrect test case """ - # Given - user_answer = "def palindrome(a):\n\treturn a == a[::-1]" - test_case_data = [{"test_case": 'assert(palindrome("abba")==True)', - "weight": 0.0 - }, - {"test_case": 's="abbb"\nassert palindrome(S)==False', - "weight": 0.0 - } - ] - name_error_msg = ["Traceback", - "call", - "File", - "line", - "<string>", - "NameError", - "name 'S' is not defined" - ] - kwargs = {'user_answer': user_answer, - 'test_case_data': test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonAssertionEvaluator() - result = evaluator.evaluate(**kwargs) - err = result.get("error").splitlines() - - # Then - self.assertFalse(result.get("success")) - self.assertEqual(3, len(err)) - for msg in name_error_msg: - self.assertIn(msg, result.get("error")) - - -class PythonStdIOEvaluationTestCases(unittest.TestCase): - def setUp(self): - with open('/tmp/test.txt', 'wb') as f: - f.write('2'.encode('ascii')) - self.file_paths = None - - def test_correct_answer_integer(self): - # Given - self.test_case_data = [{"expected_input": "1\n2", - "expected_output": "3", - "weight": 0.0 - }] - user_answer = dedent(""" - a = int(input()) - b = int(input()) - print(a+b) - """ - ) - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'partial_grading': False - } - - # When - evaluator = PythonStdioEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertTrue(result.get('success')) - self.assertIn("Correct answer", result.get('error')) - - def test_correct_answer_list(self): - # Given - self.test_case_data = [{"expected_input": "1,2,3\n5,6,7", - "expected_output": "[1, 2, 3, 5, 6, 7]", - "weight": 0.0 - }] - user_answer = dedent(""" - from six.moves import input - input_a = input() - input_b = input() - a = [int(i) for i in input_a.split(',')] - b = [int(i) for i in input_b.split(',')] - print(a+b) - """ - ) - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'partial_grading': False - } - - # When - evaluator = PythonStdioEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertTrue(result.get('success')) - self.assertIn("Correct answer", result.get('error')) - - def test_correct_answer_string(self): - # Given - self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"), - "expected_output": "2", - "weight": 0.0 - }] - user_answer = dedent(""" - from six.moves import input - a = str(input()) - b = str(input()) - print(a.count(b)) - """ - ) - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'partial_grading': False - } - - # When - evaluator = PythonStdioEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertTrue(result.get('success')) - self.assertIn("Correct answer", result.get('error')) - - def test_incorrect_answer_integer(self): - # Given - self.test_case_data = [{"expected_input": "1\n2", - "expected_output": "3", - "weight": 0.0 - }] - user_answer = dedent(""" - a = int(input()) - b = int(input()) - print(a-b) - """ - ) - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'partial_grading': False - } - - # When - evaluator = PythonStdioEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertFalse(result.get('success')) - self.assertIn("Incorrect answer", result.get('error')) - - def test_file_based_answer(self): - # Given - self.test_case_data = [{"expected_input": "", - "expected_output": "2", - "weight": 0.0 - }] - self.file_paths = [('/tmp/test.txt', False)] - - user_answer = dedent(""" - with open("test.txt") as f: - a = f.read() - print(a[0]) - """ - ) - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths, - 'partial_grading': False - } - - # When - evaluator = PythonStdioEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertEqual(result.get('error'), "Correct answer\n") - self.assertTrue(result.get('success')) - - def test_infinite_loop(self): - # Given - test_case_data = [{"expected_input": "1\n2", - "expected_output": "3", - "weight": 0.0 - }] - timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in" - " your code.").format(SERVER_TIMEOUT) - user_answer = "while True:\n\tpass" - kwargs = {'user_answer': user_answer, - 'test_case_data': test_case_data, - 'partial_grading': False - } - - # When - evaluator = PythonStdioEvaluator() - result = evaluator.evaluate(**kwargs) - - # Then - self.assertEqual(result.get('error'), timeout_msg) - self.assertFalse(result.get('success')) - -if __name__ == '__main__': - unittest.main() diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 99e5122..8888ee6 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -3,6 +3,7 @@ import unittest import os import shutil import tempfile +from yaksh.code_evaluator import CodeEvaluator from yaksh.bash_code_evaluator import BashCodeEvaluator from yaksh.bash_stdio_evaluator import BashStdioEvaluator from yaksh.settings import SERVER_TIMEOUT @@ -15,6 +16,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) self.test_case_data = [ {"test_case": "bash_files/sample.sh,bash_files/sample.args", + "test_case_type": "standardtestcase", "weight": 0.0 } ] @@ -32,39 +34,78 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]" " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" ) - get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = BashCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer\n") def test_error(self): user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] " "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") - get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = BashCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertTrue("Error" in result.get("error")) def test_infinite_loop(self): user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" " do echo "" > /dev/null ; done") - get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = BashCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) @@ -72,17 +113,31 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [ {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", + "test_case_type": "standardtestcase", "weight": 0.0 } ] user_answer = ("#!/bin/bash\ncat $1") - get_class = BashCodeEvaluator() - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = BashCodeEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer\n") @@ -92,6 +147,8 @@ class BashStdioEvaluationTestCases(unittest.TestCase): self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" " code.").format(SERVER_TIMEOUT) + self.file_paths = None + def test_correct_answer(self): user_answer = dedent(""" #!/bin/bash @@ -102,14 +159,28 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', + 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 }] - get_class = BashStdioEvaluator() - kwargs = {"user_answer": user_answer, - "partial_grading": True, - "test_case_data": test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = BashStdioEvaluator() + # kwargs = {"user_answer": user_answer, + # "partial_grading": True, + # "test_case_data": test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, + 'test_case_data': test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) @@ -123,15 +194,29 @@ class BashStdioEvaluationTestCases(unittest.TestCase): """ ) test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', - 'expected_input': '1,2,3\n4,5,6\n7,8,9', - 'weight': 0.0 + 'expected_input': '1,2,3\n4,5,6\n7,8,9', + 'test_case_type': 'stdiobasedtestcase', + 'weight': 0.0 }] - get_class = BashStdioEvaluator() - kwargs = {"user_answer": user_answer, - "partial_grading": True, - "test_case_data": test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = BashStdioEvaluator() + # kwargs = {"user_answer": user_answer, + # "partial_grading": True, + # "test_case_data": test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, + 'test_case_data': test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) @@ -144,14 +229,27 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', + 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 }] - get_class = BashStdioEvaluator() - kwargs = {"user_answer": user_answer, - "partial_grading": True, - "test_case_data": test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = BashStdioEvaluator() + # kwargs = {"user_answer": user_answer, + # "partial_grading": True, + # "test_case_data": test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, + 'test_case_data': test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) self.assertIn("Incorrect", result.get('error')) self.assertFalse(result.get('success')) @@ -164,14 +262,27 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '10', 'expected_input': '', + 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 }] - get_class = BashStdioEvaluator() - kwargs = {"user_answer": user_answer, - "partial_grading": True, - "test_case_data": test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = BashStdioEvaluator() + # kwargs = {"user_answer": user_answer, + # "partial_grading": True, + # "test_case_data": test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, + 'test_case_data': test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index d5193d3..9080e88 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -3,10 +3,14 @@ import unittest import os import shutil import tempfile +from textwrap import dedent + +# Local import +from yaksh.code_evaluator import CodeEvaluator from yaksh.cpp_code_evaluator import CppCodeEvaluator from yaksh.cpp_stdio_evaluator import CppStdioEvaluator from yaksh.settings import SERVER_TIMEOUT -from textwrap import dedent + class CAssertionEvaluationTestCases(unittest.TestCase): @@ -15,6 +19,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [{"test_case": "c_cpp_files/main.cpp", + "test_case_type": "standardtestcase", "weight": 0.0 }] self.in_dir = tmp_in_dir_path @@ -29,25 +34,49 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_correct_answer(self): user_answer = "int add(int a, int b)\n{return a+b;}" - get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': False, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer\n") def test_incorrect_answer(self): user_answer = "int add(int a, int b)\n{return a-b;}" - get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, + 'language': 'cpp' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) self.assertIn("Incorrect:", result.get('error')) @@ -55,31 +84,57 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_compilation_error(self): user_answer = "int add(int a, int b)\n{return a+b}" - get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, + 'language': 'cpp' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) def test_infinite_loop(self): user_answer = "int add(int a, int b)\n{while(1>0){}}" - get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + # get_class = CppCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, + 'language': 'cpp' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_file_based_assert(self): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [{"test_case": "c_cpp_files/file_data.c", + "test_case_type": "standardtestcase", "weight": 0.0 }] user_answer = dedent(""" @@ -94,13 +149,26 @@ class CAssertionEvaluationTestCases(unittest.TestCase): return buff[0]; } """) - get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + # get_class = CppCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, + 'language': 'cpp' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer\n") @@ -108,12 +176,14 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def setUp(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'weight': 0.0 + 'weight': 0.0, + 'test_case_type': 'stdiobasedtestcase', }] self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" " your code.").format(SERVER_TIMEOUT) + self.file_paths = None def test_correct_answer(self): user_answer = dedent(""" @@ -123,19 +193,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase): scanf("%d%d",&a,&b); printf("%d",a+b); }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'weight': 0.0 + 'weight': 0.0, + 'test_case_type': 'stdiobasedtestcase', }] user_answer = dedent(""" #include<stdio.h> @@ -146,19 +230,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase): for(i=0;i<3;i++){ printf("%d",a[i]);} }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_string_input(self): self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', - 'weight': 0.0 + 'weight': 0.0, + 'test_case_type': 'stdiobasedtestcase', }] user_answer = dedent(""" #include<stdio.h> @@ -167,12 +265,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase): scanf("%s",a); printf("%s",a); }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) @@ -183,12 +294,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase): int a=10; printf("%d",a); }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) self.assertIn("Incorrect", result.get('error')) @@ -201,12 +325,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase): int a=10; printf("%d",a) }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -217,19 +354,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase): while(0==0){ printf("abc");} }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'weight': 0.0 + 'weight': 0.0, + 'test_case_type': 'stdiobasedtestcase', }] user_answer = dedent(""" #include<stdio.h> @@ -237,12 +388,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase): int a=5,b=6; printf("%d",a+b); }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) @@ -255,19 +419,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase): cin>>a>>b; cout<<a+b; }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_array_input(self): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'weight': 0.0 + 'weight': 0.0, + 'test_case_type': 'stdiobasedtestcase', }] user_answer = dedent(""" #include<iostream> @@ -279,19 +457,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase): for(i=0;i<3;i++){ cout<<a[i];} }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_string_input(self): self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', - 'weight': 0.0 + 'weight': 0.0, + 'test_case_type': 'stdiobasedtestcase', }] user_answer = dedent(""" #include<iostream> @@ -301,12 +493,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase): cin>>a; cout<<a; }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) @@ -318,12 +523,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase): int a=10; cout<<a; }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) self.assertIn("Incorrect", result.get('error')) @@ -337,12 +555,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase): int a=10; cout<<a }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -354,19 +585,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase): while(0==0){ cout<<"abc";} }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_cpp_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'weight': 0.0 + 'weight': 0.0, + 'test_case_type': 'stdiobasedtestcase', }] user_answer = dedent(""" #include<iostream> @@ -375,12 +620,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase): int a=5,b=6; cout<<a+b; }""") - get_class = CppStdioEvaluator() - kwargs = {'user_answer': user_answer, + # get_class = CppStdioEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': False, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, 'partial_grading': False, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + 'language': 'cpp' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) diff --git a/yaksh/evaluator_tests/test_code_evaluation.py b/yaksh/evaluator_tests/test_code_evaluation.py index 88e0253..f664200 100644 --- a/yaksh/evaluator_tests/test_code_evaluation.py +++ b/yaksh/evaluator_tests/test_code_evaluation.py @@ -13,12 +13,12 @@ class RegistryTestCase(unittest.TestCase): assertion_evaluator_path = ("yaksh.python_assertion_evaluator" ".PythonAssertionEvaluator" ) - stdout_evaluator_path = ("yaksh.python_stdout_evaluator." - "PythonStdoutEvaluator" + stdio_evaluator_path = ("yaksh.python_stdio_evaluator." + "PythonStdioEvaluator" ) code_evaluators['python'] = \ {"standardtestcase": assertion_evaluator_path, - "stdiobasedtestcase": stdout_evaluator_path + "stdiobasedtestcase": stdio_evaluator_path } def test_set_register(self): @@ -28,15 +28,15 @@ class RegistryTestCase(unittest.TestCase): assertion_evaluator_path = ("yaksh.python_assertion_evaluator" ".PythonAssertionEvaluator" ) - stdout_evaluator_path = ("yaksh.python_stdout_evaluator." - "PythonStdoutEvaluator" + stdio_evaluator_path = ("yaksh.python_stdio_evaluator." + "PythonStdioEvaluator" ) class_name = getattr(python_assertion_evaluator, 'PythonAssertionEvaluator' ) self.registry_object.register("python", {"standardtestcase": assertion_evaluator_path, - "stdiobasedtestcase": stdout_evaluator_path + "stdiobasedtestcase": stdio_evaluator_path } ) self.assertEqual(evaluator_class, class_name) diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index f7ecd97..a66b6d6 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -3,7 +3,8 @@ import unittest import os import shutil import tempfile -from yaksh import code_evaluator as evaluator +from yaksh import code_evaluator +from yaksh.code_evaluator import CodeEvaluator from yaksh.java_code_evaluator import JavaCodeEvaluator from yaksh.java_stdio_evaluator import JavaStdioEvaluator from yaksh.settings import SERVER_TIMEOUT @@ -17,41 +18,70 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [ {"test_case": "java_files/main_square.java", + "test_case_type": "standardtestcase", "weight": 0.0 } ] self.in_dir = tmp_in_dir_path - evaluator.SERVER_TIMEOUT = 9 + code_evaluator.SERVER_TIMEOUT = 9 self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" - " your code.").format(evaluator.SERVER_TIMEOUT) + " your code.").format(code_evaluator.SERVER_TIMEOUT) self.file_paths = None def tearDown(self): + code_evaluator.SERVER_TIMEOUT = 4 os.remove('/tmp/test.txt') shutil.rmtree(self.in_dir) def test_correct_answer(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" - get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + + # get_class = JavaCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}" - get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = JavaCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get('success')) lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) @@ -60,25 +90,52 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_error(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" - get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = JavaCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertTrue("Error" in result.get("error")) def test_infinite_loop(self): user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" - get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = JavaCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) @@ -86,6 +143,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [ {"test_case": "java_files/read_file.java", + "test_case_type": "standardtestcase", "weight": 0.0 } ] @@ -107,34 +165,48 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): br.close(); }}} """) - get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = JavaCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer\n") class JavaStdioEvaluationTestCases(unittest.TestCase): - def setUp(self): with open('/tmp/test.txt', 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path self.test_case_data = [{'expected_output': '11', - 'expected_input': '5\n6', - 'weight': 0.0 - }] - evaluator.SERVER_TIMEOUT = 4 + 'expected_input': '5\n6', + 'test_case_type': 'stdiobasedtestcase', + 'weight': 0.0 + }] + code_evaluator.SERVER_TIMEOUT = 4 self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" - " your code.").format(evaluator.SERVER_TIMEOUT) + " your code.").format(code_evaluator.SERVER_TIMEOUT) + self.file_paths = None def tearDown(self): - evaluator.SERVER_TIMEOUT = 4 + code_evaluator.SERVER_TIMEOUT = 4 os.remove('/tmp/test.txt') shutil.rmtree(self.in_dir) @@ -148,19 +220,32 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): int b = s.nextInt(); System.out.print(a+b); }}""") - get_class = JavaStdioEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = JavaStdioEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): - self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', + 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 }] user_answer = dedent(""" @@ -173,17 +258,29 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): a[i] = s.nextInt(); System.out.print(a[i]);} }}""") - get_class = JavaStdioEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = JavaStdioEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): - user_answer = dedent(""" import java.util.Scanner; class Test @@ -193,30 +290,55 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): int b = s.nextInt(); System.out.print(a); }}""") - get_class = JavaStdioEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = JavaStdioEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) self.assertIn("Incorrect", result.get('error')) self.assertTrue(lines_of_error > 1) def test_error(self): - user_answer = dedent(""" class Test { System.out.print("a"); }""") - get_class = JavaStdioEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = JavaStdioEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -229,19 +351,33 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): { System.out.print("a");} }}""") - get_class = JavaStdioEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = JavaStdioEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', - 'expected_input': '', - 'weight': 0.0 + 'expected_input': '', + 'test_case_type': 'stdiobasedtestcase', + 'weight': 0.0 }] user_answer = dedent(""" class Test @@ -250,19 +386,33 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): int b = 6; System.out.print(a+b); }}""") - get_class = JavaStdioEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = JavaStdioEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_string_input(self): self.test_case_data = [{'expected_output': 'HelloWorld', - 'expected_input': 'Hello\nWorld', - 'weight': 0.0 + 'expected_input': 'Hello\nWorld', + 'test_case_type': 'stdiobasedtestcase', + 'weight': 0.0 }] user_answer = dedent(""" import java.util.Scanner; @@ -273,20 +423,34 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): String b = s.nextLine(); System.out.print(a+b); }}""") - get_class = JavaStdioEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, - 'test_case_data': self.test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = JavaStdioEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, + 'test_case_data': self.test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_file_based_stdout(self): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [{'expected_output': '2', - 'expected_input': '', - 'weight': 0.0 + 'expected_input': '', + 'test_case_type': 'stdiobasedtestcase', + 'weight': 0.0 }] user_answer = dedent(""" import java.io.BufferedReader; @@ -306,13 +470,26 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): br.close(); }}} """) - get_class = JavaStdioEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = JavaStdioEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'java' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer\n") diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 4bf0032..fb762f9 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -25,8 +25,6 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): "You probably have an infinite loop in" " your code.").format(SERVER_TIMEOUT) self.file_paths = None - self.language = 'python' - self.test_case_type = 'standardtestcase' def tearDown(self): os.remove('/tmp/test.txt') diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index c30f652..de7368f 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -4,7 +4,8 @@ import os import shutil import tempfile -from yaksh import code_evaluator as evaluator +from yaksh import code_evaluator +from yaksh.code_evaluator import CodeEvaluator from yaksh.scilab_code_evaluator import ScilabCodeEvaluator from yaksh.settings import SERVER_TIMEOUT @@ -12,40 +13,69 @@ class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [{"test_case": "scilab_files/test_add.sce", + "test_case_type": "standardtestcase", "weight": 0.0 }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop" " in your code.").format(SERVER_TIMEOUT) + code_evaluator.SERVER_TIMEOUT = 9 self.file_paths = None def tearDown(self): + code_evaluator.SERVER_TIMEOUT = 4 shutil.rmtree(self.in_dir) def test_correct_answer(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a+b;\nendfunction") - get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = ScilabCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'scilab' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_error(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a+b;\ndis(\tendfunction") - get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = ScilabCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'scilab' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertTrue('error' in result.get("error")) @@ -53,28 +83,55 @@ class ScilabEvaluationTestCases(unittest.TestCase): def test_incorrect_answer(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a-b;\nendfunction") - get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = ScilabCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'scilab' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) self.assertIn("Message", result.get('error')) self.assertTrue(lines_of_error > 1) def test_infinite_loop(self): + code_evaluator.SERVER_TIMEOUT = 4 user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a;\nwhile(1==1)\nend\nendfunction") - get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = ScilabCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'scilab' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) |