diff options
author | adityacp | 2016-12-23 11:41:31 +0530 |
---|---|---|
committer | adityacp | 2016-12-23 11:41:31 +0530 |
commit | 30f35a22e14e6e372a760676fddceb0807d22141 (patch) | |
tree | 5d06521225f7d4a1288fd88ac8e2498200d83435 /yaksh | |
parent | 9d4e16cc5024b756d811e353714074d7d6066c2f (diff) | |
download | online_test-30f35a22e14e6e372a760676fddceb0807d22141.tar.gz online_test-30f35a22e14e6e372a760676fddceb0807d22141.tar.bz2 online_test-30f35a22e14e6e372a760676fddceb0807d22141.zip |
Add test cases based on moderator input code
Diffstat (limited to 'yaksh')
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 49 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_c_cpp_evaluation.py | 136 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_java_evaluation.py | 121 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_scilab_evaluation.py | 35 |
4 files changed, 321 insertions, 20 deletions
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 06a56e4..5022c1d 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -12,10 +12,17 @@ from textwrap import dedent class BashAssertionEvaluationTestCases(unittest.TestCase): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) + self.tc_data = dedent(""" + #!/bin/bash + [[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 )) + """) + self.tc_data_args = "1 2\n2 1" self.test_case_data = [ - {"test_case": "bash_files/sample.sh,bash_files/sample.args", + {"test_case": self.tc_data, + "test_case_args": self.tc_data_args, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -27,10 +34,11 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = None def tearDown(self): - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]" " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" ) @@ -44,13 +52,16 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer\n") def test_error(self): + # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] " "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") kwargs = { @@ -63,13 +74,16 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertTrue("Error" in result.get("error")) def test_infinite_loop(self): + # Given user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" " do echo "" > /dev/null ; done") kwargs = { @@ -82,16 +96,25 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_file_based_assert(self): - self.file_paths = [('/tmp/test.txt', False)] + # Given + self.file_paths = [(self.f_path, False)] + self.tc_data = dedent(""" + #!/bin/bash + cat $1 + """) + self.tc_data_args = "test.txt" self.test_case_data = [ - {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", + {"test_case": self.tc_data, + "test_case_args": self.tc_data_args, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -107,9 +130,11 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer\n") @@ -123,6 +148,7 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): def test_correct_answer(self): + # Given user_answer = dedent(""" #!/bin/bash read A read B @@ -144,13 +170,16 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): + # Given user_answer = dedent(""" readarray arr; COUNTER=0 while [ $COUNTER -lt 3 ]; do @@ -174,13 +203,16 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = dedent(""" #!/bin/bash read A read B @@ -202,12 +234,16 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + + # Then self.assertIn("Incorrect", result.get('error')) self.assertFalse(result.get('success')) def test_stdout_only(self): + # Given user_answer = dedent(""" #!/bin/bash A=6 B=4 @@ -229,8 +265,11 @@ class BashStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index dc6fdc9..ac56142 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -15,10 +15,45 @@ from yaksh.settings import SERVER_TIMEOUT class CAssertionEvaluationTestCases(unittest.TestCase): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() - self.test_case_data = [{"test_case": "c_cpp_files/main.cpp", + self.tc_data = dedent(""" + #include <stdio.h> + #include <stdlib.h> + + extern int add(int, int); + + template <class T> + + void check(T expect, T result) + { + if (expect == result) + { + printf("Correct: Expected %d got %d ",expect,result); + } + else + { + printf("Incorrect: Expected %d got %d ",expect,result); + exit (1); + } + } + + int main(void) + { + int result; + result = add(0,0); + printf("Input submitted to the function: 0, 0"); + check(0, result); + result = add(2,3); + printf("Input submitted to the function: 2 3"); + check(5,result); + printf("All Correct"); + return 0; + } + """) + self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 }] @@ -29,10 +64,11 @@ class CAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = None def tearDown(self): - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = "int add(int a, int b)\n{return a+b;}" kwargs = { 'metadata': { @@ -44,13 +80,16 @@ class CAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer\n") def test_incorrect_answer(self): + # Given user_answer = "int add(int a, int b)\n{return a-b;}" kwargs = { 'metadata': { @@ -62,15 +101,19 @@ class CAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) lines_of_error = len(result.get('error').splitlines()) + + # Then self.assertFalse(result.get('success')) self.assertIn("Incorrect:", result.get('error')) self.assertTrue(lines_of_error > 1) def test_compilation_error(self): + # Given user_answer = "int add(int a, int b)\n{return a+b}" kwargs = { 'metadata': { @@ -82,13 +125,16 @@ class CAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) def test_infinite_loop(self): + # Given user_answer = "int add(int a, int b)\n{while(1>0){}}" kwargs = { 'metadata': { @@ -100,15 +146,45 @@ class CAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_file_based_assert(self): - self.file_paths = [('/tmp/test.txt', False)] - self.test_case_data = [{"test_case": "c_cpp_files/file_data.c", + # Given + self.file_paths = [(self.f_path, False)] + self.tc_data = dedent(""" + #include <stdio.h> + #include <stdlib.h> + + extern int ans(); + + template <class T> + void check(T expect,T result) + { + if (expect == result) + { + printf("Correct: Expected %d got %d ",expect,result); + } + else + { + printf("Incorrect: Expected %d got %d ",expect,result); + exit (0); + } + } + + int main(void) + { + int result; + result = ans(); + check(50, result); + } + """) + self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 }] @@ -134,12 +210,15 @@ class CAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer\n") + class CppStdIOEvaluationTestCases(unittest.TestCase): def setUp(self): self.test_case_data = [{'expected_output': '11', @@ -153,7 +232,11 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): " your code.").format(SERVER_TIMEOUT) self.file_paths = None + def tearDown(self): + shutil.rmtree(self.in_dir) + def test_correct_answer(self): + # Given user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -171,13 +254,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): + # Given self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', 'weight': 0.0, @@ -202,13 +288,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_string_input(self): + # Given self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', 'weight': 0.0, @@ -231,13 +320,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -254,15 +346,19 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) lines_of_error = len(result.get('error').splitlines()) + + # Then self.assertFalse(result.get('success')) self.assertIn("Incorrect", result.get('error')) self.assertTrue(lines_of_error > 1) def test_error(self): + # Given user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -279,13 +375,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) def test_infinite_loop(self): + # Given user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -302,13 +401,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_only_stdout(self): + # Given self.test_case_data = [{'expected_output': '11', 'expected_input': '', 'weight': 0.0, @@ -330,13 +432,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_correct_answer(self): + # Given user_answer = dedent(""" #include<iostream> using namespace std; @@ -355,13 +460,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_array_input(self): + # Given self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', 'weight': 0.0, @@ -387,13 +495,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_string_input(self): + # Given self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', 'weight': 0.0, @@ -417,13 +528,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_cpp_incorrect_answer(self): + # Given user_answer = dedent(""" #include<iostream> using namespace std; @@ -441,15 +555,19 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) lines_of_error = len(result.get('error').splitlines()) + + # Then self.assertFalse(result.get('success')) self.assertIn("Incorrect", result.get('error')) self.assertTrue(lines_of_error > 1) def test_cpp_error(self): + # Given user_answer = dedent(""" #include<iostream> using namespace std; @@ -467,13 +585,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) def test_cpp_infinite_loop(self): + # Given user_answer = dedent(""" #include<iostream> using namespace std; @@ -491,13 +612,16 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_cpp_only_stdout(self): + # Given self.test_case_data = [{'expected_output': '11', 'expected_input': '', 'weight': 0.0, @@ -520,9 +644,11 @@ class CppStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index 36eb6a5..58d5432 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -14,11 +14,47 @@ from yaksh.java_stdio_evaluator import JavaStdIOEvaluator class JavaAssertionEvaluationTestCases(unittest.TestCase): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() + self.tc_data = dedent(""" + class main + { + public static <E> void check(E expect, E result) + { + if(result.equals(expect)) + { + System.out.println("Correct:Output expected "+expect+" and got "+result); + } + else + { + System.out.println("Incorrect:Output expected "+expect+" but got "+result); + System.exit(1); + } + } + public static void main(String arg[]) + { + Test t = new Test(); + int result, input, output; + input = 0; output = 0; + result = t.square_num(input); + System.out.println("Input submitted to the function: "+input); + check(output, result); + input = 5; output = 25; + result = t.square_num(input); + System.out.println("Input submitted to the function: "+input); + check(output, result); + input = 6; output = 36; + result = t.square_num(input); + System.out.println("Input submitted to the function: "+input); + check(output, result); + } + } + """) + self.test_case_data = [ - {"test_case": "java_files/main_square.java", + {"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -33,10 +69,11 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def tearDown(self): gd.SERVER_TIMEOUT = 4 - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" kwargs = { 'metadata': { @@ -48,13 +85,16 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}" kwargs = { 'metadata': { @@ -66,9 +106,11 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get('success')) lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) @@ -76,6 +118,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): self.assertTrue(lines_of_error > 1) def test_error(self): + # Given user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" kwargs = { 'metadata': { @@ -87,13 +130,16 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertTrue("Error" in result.get("error")) def test_infinite_loop(self): + # Given user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" kwargs = { 'metadata': { @@ -105,16 +151,47 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_file_based_assert(self): - self.file_paths = [("/tmp/test.txt", False)] + # Given + self.file_paths = [(self.f_path, False)] + self.tc_data = dedent(""" + class main + { + public static <E> void check(E expect, E result) + { + if(result.equals(expect)) + { + System.out.println("Correct:Output expected "+expect+" and got "+result); + } + else + { + System.out.println("Incorrect:Output expected "+expect+" but got "+result); + System.exit(1); + } + } + public static void main(String arg[]) + { + String result = ""; + Test t = new Test(); + try{ + result = t.readFile();} + catch(Exception e){ + System.out.print(e); + } + check("2", result); + } + } + """) self.test_case_data = [ - {"test_case": "java_files/read_file.java", + {"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -147,15 +224,18 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer\n") class JavaStdIOEvaluationTestCases(unittest.TestCase): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path @@ -172,10 +252,11 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): def tearDown(self): gd.SERVER_TIMEOUT = 4 - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = dedent(""" import java.util.Scanner; class Test @@ -195,13 +276,16 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_array_input(self): + # Given self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', 'test_case_type': 'stdiobasedtestcase', @@ -227,13 +311,16 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = dedent(""" import java.util.Scanner; class Test @@ -253,15 +340,19 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) lines_of_error = len(result.get('error').splitlines()) + + # Then self.assertFalse(result.get('success')) self.assertIn("Incorrect", result.get('error')) self.assertTrue(lines_of_error > 1) def test_error(self): + # Given user_answer = dedent(""" class Test { @@ -277,13 +368,16 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) def test_infinite_loop(self): + # Given user_answer = dedent(""" class Test {public static void main(String[] args){ @@ -301,13 +395,16 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_only_stdout(self): + # Given self.test_case_data = [{'expected_output': '11', 'expected_input': '', 'test_case_type': 'stdiobasedtestcase', @@ -330,13 +427,16 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_string_input(self): + # Given self.test_case_data = [{'expected_output': 'HelloWorld', 'expected_input': 'Hello\nWorld', 'test_case_type': 'stdiobasedtestcase', @@ -361,14 +461,17 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertEqual(result.get('error'), "Correct answer\n") self.assertTrue(result.get('success')) def test_file_based_stdout(self): - self.file_paths = [("/tmp/test.txt", False)] + # Given + self.file_paths = [(self.f_path, False)] self.test_case_data = [{'expected_output': '2', 'expected_input': '', 'test_case_type': 'stdiobasedtestcase', @@ -402,9 +505,11 @@ class JavaStdIOEvaluationTestCases(unittest.TestCase): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer\n") diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index 0275ee8..c6b3b54 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -3,7 +3,7 @@ import unittest import os import shutil import tempfile - +from textwrap import dedent from yaksh import grader as gd from yaksh.grader import Grader from yaksh.scilab_code_evaluator import ScilabCodeEvaluator @@ -11,7 +11,38 @@ from yaksh.scilab_code_evaluator import ScilabCodeEvaluator class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() - self.test_case_data = [{"test_case": "scilab_files/test_add.sce", + self.tc_data = dedent(""" + mode(-1) + exec("function.sci",-1); + i = 0 + p = add(3,5); + correct = (p == 8); + if correct then + i=i+1 + end + disp("Input submitted 3 and 5") + disp("Expected output 8 got " + string(p)) + p = add(22,-20); + correct = (p==2); + if correct then + i=i+1 + end + disp("Input submitted 22 and -20") + disp("Expected output 2 got " + string(p)) + p =add(91,0); + correct = (p==91); + if correct then + i=i+1 + end + disp("Input submitted 91 and 0") + disp("Expected output 91 got " + string(p)) + if i==3 then + exit(5); + else + exit(3); + end + """) + self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 }] |