diff options
author | ankitjavalkar | 2016-12-19 11:44:55 +0530 |
---|---|---|
committer | ankitjavalkar | 2016-12-20 12:46:03 +0530 |
commit | f1da39aded67efa3da145851f0e9f687a3e434e5 (patch) | |
tree | c8713233fcaaf50e7e4a62b337258e7932fcf99c /yaksh/evaluator_tests/test_bash_evaluation.py | |
parent | 80a4feef3c209e044e8cbe31e44c81d69136e100 (diff) | |
download | online_test-f1da39aded67efa3da145851f0e9f687a3e434e5.tar.gz online_test-f1da39aded67efa3da145851f0e9f687a3e434e5.tar.bz2 online_test-f1da39aded67efa3da145851f0e9f687a3e434e5.zip |
Change all evaluator structure and make sure eval test cases pass
Diffstat (limited to 'yaksh/evaluator_tests/test_bash_evaluation.py')
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 211 |
1 files changed, 161 insertions, 50 deletions
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 99e5122..8888ee6 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -3,6 +3,7 @@ import unittest import os import shutil import tempfile +from yaksh.code_evaluator import CodeEvaluator from yaksh.bash_code_evaluator import BashCodeEvaluator from yaksh.bash_stdio_evaluator import BashStdioEvaluator from yaksh.settings import SERVER_TIMEOUT @@ -15,6 +16,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) self.test_case_data = [ {"test_case": "bash_files/sample.sh,bash_files/sample.args", + "test_case_type": "standardtestcase", "weight": 0.0 } ] @@ -32,39 +34,78 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]" " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" ) - get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = BashCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer\n") def test_error(self): user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] " "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") - get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = BashCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertTrue("Error" in result.get("error")) def test_infinite_loop(self): user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" " do echo "" > /dev/null ; done") - get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = BashCodeEvaluator(self.in_dir) + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) @@ -72,17 +113,31 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [ {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", + "test_case_type": "standardtestcase", "weight": 0.0 } ] user_answer = ("#!/bin/bash\ncat $1") - get_class = BashCodeEvaluator() - kwargs = {'user_answer': user_answer, - 'partial_grading': True, + # get_class = BashCodeEvaluator() + # kwargs = {'user_answer': user_answer, + # 'partial_grading': True, + # 'test_case_data': self.test_case_data, + # 'file_paths': self.file_paths + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } - result = get_class.evaluate(**kwargs) + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer\n") @@ -92,6 +147,8 @@ class BashStdioEvaluationTestCases(unittest.TestCase): self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" " code.").format(SERVER_TIMEOUT) + self.file_paths = None + def test_correct_answer(self): user_answer = dedent(""" #!/bin/bash @@ -102,14 +159,28 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', + 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 }] - get_class = BashStdioEvaluator() - kwargs = {"user_answer": user_answer, - "partial_grading": True, - "test_case_data": test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = BashStdioEvaluator() + # kwargs = {"user_answer": user_answer, + # "partial_grading": True, + # "test_case_data": test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, + 'test_case_data': test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) @@ -123,15 +194,29 @@ class BashStdioEvaluationTestCases(unittest.TestCase): """ ) test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', - 'expected_input': '1,2,3\n4,5,6\n7,8,9', - 'weight': 0.0 + 'expected_input': '1,2,3\n4,5,6\n7,8,9', + 'test_case_type': 'stdiobasedtestcase', + 'weight': 0.0 }] - get_class = BashStdioEvaluator() - kwargs = {"user_answer": user_answer, - "partial_grading": True, - "test_case_data": test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = BashStdioEvaluator() + # kwargs = {"user_answer": user_answer, + # "partial_grading": True, + # "test_case_data": test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, + 'test_case_data': test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) + self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) @@ -144,14 +229,27 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', + 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 }] - get_class = BashStdioEvaluator() - kwargs = {"user_answer": user_answer, - "partial_grading": True, - "test_case_data": test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = BashStdioEvaluator() + # kwargs = {"user_answer": user_answer, + # "partial_grading": True, + # "test_case_data": test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, + 'test_case_data': test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) self.assertIn("Incorrect", result.get('error')) self.assertFalse(result.get('success')) @@ -164,14 +262,27 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '10', 'expected_input': '', + 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 }] - get_class = BashStdioEvaluator() - kwargs = {"user_answer": user_answer, - "partial_grading": True, - "test_case_data": test_case_data - } - result = get_class.evaluate(**kwargs) + # get_class = BashStdioEvaluator() + # kwargs = {"user_answer": user_answer, + # "partial_grading": True, + # "test_case_data": test_case_data + # } + # result = get_class.evaluate(**kwargs) + kwargs = { + 'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, + 'test_case_data': test_case_data, + } + + evaluator = CodeEvaluator(self.in_dir) + result = evaluator.evaluate(kwargs) self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) |