diff options
author | adityacp | 2016-12-23 11:41:31 +0530 |
---|---|---|
committer | adityacp | 2016-12-23 11:41:31 +0530 |
commit | 30f35a22e14e6e372a760676fddceb0807d22141 (patch) | |
tree | 5d06521225f7d4a1288fd88ac8e2498200d83435 /yaksh/evaluator_tests/test_bash_evaluation.py | |
parent | 9d4e16cc5024b756d811e353714074d7d6066c2f (diff) | |
download | online_test-30f35a22e14e6e372a760676fddceb0807d22141.tar.gz online_test-30f35a22e14e6e372a760676fddceb0807d22141.tar.bz2 online_test-30f35a22e14e6e372a760676fddceb0807d22141.zip |
Add test cases based on moderator input code
Diffstat (limited to 'yaksh/evaluator_tests/test_bash_evaluation.py')
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 49 |
1 files changed, 44 insertions, 5 deletions
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 06a56e4..5022c1d 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -12,10 +12,17 @@ from textwrap import dedent class BashAssertionEvaluationTestCases(unittest.TestCase): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) + self.tc_data = dedent(""" + #!/bin/bash + [[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 )) + """) + self.tc_data_args = "1 2\n2 1" self.test_case_data = [ - {"test_case": "bash_files/sample.sh,bash_files/sample.args", + {"test_case": self.tc_data, + "test_case_args": self.tc_data_args, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -27,10 +34,11 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = None def tearDown(self): - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]" " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" ) @@ -44,13 +52,16 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer\n") def test_error(self): + # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] " "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") kwargs = { @@ -63,13 +74,16 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertTrue("Error" in result.get("error")) def test_infinite_loop(self): + # Given user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" " do echo "" > /dev/null ; done") kwargs = { @@ -82,16 +96,25 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_file_based_assert(self): - self.file_paths = [('/tmp/test.txt', False)] + # Given + self.file_paths = [(self.f_path, False)] + self.tc_data = dedent(""" + #!/bin/bash + cat $1 + """) + self.tc_data_args = "test.txt" self.test_case_data = [ - {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", + {"test_case": self.tc_data, + "test_case_args": self.tc_data_args, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -107,9 +130,11 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer\n") @@ -123,6 +148,7 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): def test_correct_answer(self): + # Given user_answer = dedent(""" #!/bin/bash read A read B @@ -144,13 +170,16 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): + # Given user_answer = dedent(""" readarray arr; COUNTER=0 while [ $COUNTER -lt 3 ]; do @@ -174,13 +203,16 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = dedent(""" #!/bin/bash read A read B @@ -202,12 +234,16 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + + # Then self.assertIn("Incorrect", result.get('error')) self.assertFalse(result.get('success')) def test_stdout_only(self): + # Given user_answer = dedent(""" #!/bin/bash A=6 B=4 @@ -229,8 +265,11 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) |