diff options
author | adityacp | 2018-05-24 12:07:39 +0530 |
---|---|---|
committer | adityacp | 2018-06-07 14:50:47 +0530 |
commit | 97b657edc2a323f832c81f0e34ce5761bd21f7e9 (patch) | |
tree | 09f4367f813fab99ffa8cd540d246cfc8c8fa00a /yaksh/evaluator_tests | |
parent | 78ce1804d3a82327aa0da1510bb5c03d6bbff3ba (diff) | |
download | online_test-97b657edc2a323f832c81f0e34ce5761bd21f7e9.tar.gz online_test-97b657edc2a323f832c81f0e34ce5761bd21f7e9.tar.bz2 online_test-97b657edc2a323f832c81f0e34ce5761bd21f7e9.zip |
Pep8 changes
Diffstat (limited to 'yaksh/evaluator_tests')
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 178 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_c_cpp_evaluation.py | 223 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_grader_evaluation.py | 31 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_java_evaluation.py | 213 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_python_stdio_evaluator.py | 5 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_scilab_evaluation.py | 27 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_simple_question_types.py | 250 |
7 files changed, 417 insertions, 510 deletions
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 5542710..19f9fb2 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -3,11 +3,9 @@ import unittest import os import shutil import tempfile -from psutil import Process, pid_exists +from psutil import Process # Local Imports from yaksh.grader import Grader -from yaksh.bash_code_evaluator import BashCodeEvaluator -from yaksh.bash_stdio_evaluator import BashStdIOEvaluator from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest from yaksh.settings import SERVER_TIMEOUT from textwrap import dedent @@ -25,15 +23,15 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): self.tc_data_args = "1 2\n2 1" self.test_case_data = [ {"test_case": self.tc_data, - "test_case_args": self.tc_data_args, - "test_case_type": "standardtestcase", - "weight": 0.0 - } + "test_case_args": self.tc_data_args, + "test_case_type": "standardtestcase", + "weight": 0.0 + } ] self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in your" - " code.").format(SERVER_TIMEOUT) + "You probably have an infinite loop in your" + " code.").format(SERVER_TIMEOUT) self.file_paths = None def tearDown(self): @@ -43,16 +41,14 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer(self): # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]" - " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" - ) - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'bash' - }, - 'test_case_data': self.test_case_data, + " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" + ) + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'bash' + }, 'test_case_data': self.test_case_data, } # When @@ -65,15 +61,14 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): def test_error(self): # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] " - "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") + "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -87,15 +82,14 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): def test_infinite_loop(self): # Given user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" - " do echo "" > /dev/null ; done") + " do echo "" > /dev/null ; done") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -120,22 +114,19 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): cat $1 """) self.tc_data_args = "test.txt" - self.test_case_data = [ - {"test_case": self.tc_data, - "test_case_args": self.tc_data_args, - "test_case_type": "standardtestcase", - "weight": 0.0 - } - ] + self.test_case_data = [{ + "test_case": self.tc_data, + "test_case_args": self.tc_data_args, + "test_case_type": "standardtestcase", + "weight": 0.0 + }] user_answer = ("#!/bin/bash\ncat $1") - kwargs = { - 'metadata': { + kwargs = {'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -145,6 +136,7 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get("success")) + class BashStdIOEvaluationTestCases(EvaluatorBaseTest): def setUp(self): self.in_dir = tempfile.mkdtemp() @@ -153,7 +145,6 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): " code.").format(SERVER_TIMEOUT) self.file_paths = None - def test_correct_answer(self): # Given user_answer = dedent(""" #!/bin/bash @@ -162,7 +153,8 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): echo -n `expr $A + $B` """ ) - test_case_data = [{'expected_output': '11', + test_case_data = [{ + 'expected_output': '11', 'expected_input': '5\n6', 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 @@ -173,8 +165,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -190,14 +181,14 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): COUNTER=0 while [ $COUNTER -lt 3 ]; do echo -n "${arr[$COUNTER]}" - let COUNTER=COUNTER+1 + let COUNTER=COUNTER+1 done """ ) test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', - 'expected_input': '1,2,3\n4,5,6\n7,8,9', - 'test_case_type': 'stdiobasedtestcase', - 'weight': 0.0 + 'expected_input': '1,2,3\n4,5,6\n7,8,9', + 'test_case_type': 'stdiobasedtestcase', + 'weight': 0.0 }] kwargs = { 'metadata': { @@ -205,8 +196,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -224,7 +214,8 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): echo -n `expr $A - $B` """ ) - test_case_data = [{'expected_output': '11', + test_case_data = [{ + 'expected_output': '11', 'expected_input': '5\n6', 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 @@ -235,8 +226,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -266,8 +256,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -286,8 +275,8 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): f.write('2'.encode('ascii')) self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in your" - " code.").format(SERVER_TIMEOUT) + "You probably have an infinite loop in your" + " code.").format(SERVER_TIMEOUT) self.file_paths = None def tearDown(self): @@ -306,28 +295,26 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): success = False err = "Incorrect Answer" mark_fraction = 0.0 - proc = subprocess.Popen(user_answer, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + user_answer, shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0}] kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -349,20 +336,18 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): success = False err = "Incorrect Answer" mark_fraction = 0.0 - proc = subprocess.Popen(user_answer, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + user_answer, shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) - + """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0}] kwargs = { 'metadata': { @@ -370,8 +355,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -381,7 +365,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertFalse(result.get('success')) self.assert_correct_output('Incorrect Answer', result.get('error')) - + def test_assert_with_hook(self): # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]" @@ -393,7 +377,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): """) assert_test_case_args = "1 2\n2 1" - + hook_code = dedent("""\ def check_answer(user_answer): success = False @@ -402,13 +386,11 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): if "echo $(( $1 + $2 ))" in user_answer: success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) - + """) test_case_data = [{"test_case_type": "standardtestcase", "test_case": assert_test_case, - "test_case_args":assert_test_case_args, + "test_case_args": assert_test_case_args, 'weight': 1.0 }, {"test_case_type": "hooktestcase", @@ -420,8 +402,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': True, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -438,7 +419,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): echo -n Hello, world! """ ) - + hook_code_1 = dedent("""\ def check_answer(user_answer): success = False @@ -447,8 +428,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): if "echo -n Hello, world!" in user_answer: success, err, mark_fraction = True, "", 0.5 return success, err, mark_fraction - """ - ) + """) hook_code_2 = dedent("""\ def check_answer(user_answer): import subprocess @@ -465,9 +445,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode('utf-8') == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) - + """) test_case_data = [{"test_case_type": "hooktestcase", "hook_code": hook_code_1, 'weight': 1.0}, @@ -480,8 +458,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': True, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -491,7 +468,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get('success')) self.assertEqual(result.get("weight"), 1.5) - + def test_infinite_loop(self): # Given user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" @@ -503,21 +480,19 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): success = False err = "Incorrect Answer" mark_fraction = 0.0 - proc = subprocess.Popen(user_answer, shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + user_answer, shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) - test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0}] kwargs = { 'metadata': { @@ -525,8 +500,7 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'bash' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index 162d90c..877f708 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -8,8 +8,6 @@ from psutil import Process # Local import from yaksh.grader import Grader -from yaksh.cpp_code_evaluator import CppCodeEvaluator -from yaksh.cpp_stdio_evaluator import CppStdIOEvaluator from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest from yaksh.settings import SERVER_TIMEOUT @@ -60,8 +58,8 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in your" - " code.").format(SERVER_TIMEOUT) + "You probably have an infinite loop in your" + " code.").format(SERVER_TIMEOUT) self.file_paths = None def tearDown(self): @@ -77,8 +75,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -97,8 +94,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -120,8 +116,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -141,8 +136,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -211,8 +205,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -254,8 +247,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -287,8 +279,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -318,8 +309,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -343,8 +333,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -372,8 +361,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -398,8 +386,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -434,8 +421,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -461,8 +447,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -495,8 +480,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -527,8 +511,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -553,8 +536,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -583,8 +565,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -610,8 +591,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -644,8 +624,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -655,6 +634,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get('success')) + class CppHookEvaluationTestCases(EvaluatorBaseTest): def setUp(self): @@ -664,8 +644,8 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in your" - " code.").format(SERVER_TIMEOUT) + "You probably have an infinite loop in your" + " code.").format(SERVER_TIMEOUT) self.file_paths = None def tearDown(self): @@ -703,20 +683,17 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0}] kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -744,11 +721,11 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): err = "Incorrect Answer" mark_fraction = 0.0 def _run_command(cmd): - proc = subprocess.Popen("{}".format(cmd), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + "{}".format(cmd), shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() return stdout,stderr cmds = ["gcc Test.c", "./a.out"] @@ -757,20 +734,17 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0}] kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -780,46 +754,47 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertFalse(result.get('success')) self.assert_correct_output('Incorrect Answer', result.get('error')) - + def test_assert_with_hook(self): # Given user_answer = "int add(int a, int b)\n{return a+b;}" - assert_test_case = dedent("""\ - #include <stdio.h> - #include <stdlib.h> - - extern int add(int, int); - - template <class T> - - void check(T expect, T result) - { - if (expect == result) - { - printf("Correct: Expected %d got %d ",expect,result); - } - else - { - printf("Incorrect: Expected %d got %d ",expect,result); - exit (1); - } - } - - int main(void) - { - int result; - result = add(0,0); - printf("Input submitted to the function: 0, 0"); - check(0, result); - result = add(2,3); - printf("Input submitted to the function: 2 3"); - check(5,result); - printf("All Correct"); - return 0; - } - """) + #include <stdio.h> + #include <stdlib.h> + + extern int add(int, int); + + template <class T> + + void check(T expect, T result) + { + if (expect == result) + { + printf("Correct: Expected %d got %d ", + expect,result); + } + else + { + printf("Incorrect: Expected %d got %d ", + expect,result); + exit (1); + } + } + + int main(void) + { + int result; + result = add(0,0); + printf("Input submitted 0, 0"); + check(0, result); + result = add(2,3); + printf("Input submitted 2 3"); + check(5,result); + printf("All Correct"); + return 0; + } + """) hook_code = dedent("""\ def check_answer(user_answer): @@ -829,9 +804,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): if "return a+b;" in user_answer: success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) - + """) test_case_data = [{"test_case_type": "standardtestcase", "test_case": assert_test_case, @@ -846,8 +819,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': True, 'language': 'cpp' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -867,7 +839,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): printf("Hello, world!"); } """) - + hook_code_1 = dedent("""\ def check_answer(user_answer): with open("Test.c", "w+") as f: @@ -877,11 +849,11 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): err = "Incorrect Answer" mark_fraction = 0.0 def _run_command(cmd): - proc = subprocess.Popen("{}".format(cmd), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + "{}".format(cmd), shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() return stdout,stderr cmds = ["gcc Test.c", "./a.out"] @@ -890,8 +862,8 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) + hook_code_2 = dedent("""\ def check_answer(user_answer): success = False @@ -900,10 +872,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): if 'printf("Hello, world!");' in user_answer: success, err, mark_fraction = True, "", 0.5 return success, err, mark_fraction - """ - ) - - + """) test_case_data = [{"test_case_type": "hooktestcase", "hook_code": hook_code_1, 'weight': 1.0}, @@ -916,8 +885,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': True, 'language': 'cpp' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -927,7 +895,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get('success')) self.assertEqual(result.get("weight"), 1.5) - + def test_infinite_loop(self): # Given user_answer = dedent("""\ @@ -937,7 +905,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): printf("abc");} }""") - hook_code= dedent("""\ + hook_code = dedent("""\ def check_answer(user_answer): with open("Test.c", "w+") as f: f.write(user_answer) @@ -946,11 +914,11 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): err = "Incorrect Answer" mark_fraction = 0.0 def _run_command(cmd): - proc = subprocess.Popen("{}".format(cmd), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + "{}".format(cmd), shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() return stdout,stderr cmds = ["gcc Test.c", "./a.out"] @@ -959,12 +927,10 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) - + """) + test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0}] kwargs = { 'metadata': { @@ -972,8 +938,7 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'cpp' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When diff --git a/yaksh/evaluator_tests/test_grader_evaluation.py b/yaksh/evaluator_tests/test_grader_evaluation.py index d11f4a0..e8b519a 100644 --- a/yaksh/evaluator_tests/test_grader_evaluation.py +++ b/yaksh/evaluator_tests/test_grader_evaluation.py @@ -1,9 +1,8 @@ from __future__ import unicode_literals import unittest -import os from yaksh import python_assertion_evaluator from yaksh.language_registry import _LanguageRegistry, get_registry -from yaksh.settings import SERVER_TIMEOUT, code_evaluators +from yaksh.settings import code_evaluators class RegistryTestCase(unittest.TestCase): @@ -11,27 +10,27 @@ class RegistryTestCase(unittest.TestCase): self.registry_object = get_registry() self.language_registry = _LanguageRegistry() assertion_evaluator_path = ("yaksh.python_assertion_evaluator" - ".PythonAssertionEvaluator" - ) + ".PythonAssertionEvaluator" + ) stdio_evaluator_path = ("yaksh.python_stdio_evaluator." - "PythonStdIOEvaluator" - ) + "PythonStdIOEvaluator" + ) hook_evaluator_path = ("yaksh.hook_evaluator." - "HookEvaluator" - ) + "HookEvaluator" + ) code_evaluators['python'] = \ - {"standardtestcase": assertion_evaluator_path, - "stdiobasedtestcase": stdio_evaluator_path, - "hooktestcase": hook_evaluator_path - } + {"standardtestcase": assertion_evaluator_path, + "stdiobasedtestcase": stdio_evaluator_path, + "hooktestcase": hook_evaluator_path + } def test_set_register(self): - evaluator_class = self.registry_object.get_class("python", - "standardtestcase" + evaluator_class = self.registry_object.get_class( + "python", "standardtestcase" ) - class_name = getattr(python_assertion_evaluator, - 'PythonAssertionEvaluator' + class_name = getattr( + python_assertion_evaluator, 'PythonAssertionEvaluator' ) self.assertEqual(evaluator_class, class_name) diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index 35b64d0..909d7ca 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -4,15 +4,12 @@ import os import shutil import tempfile from textwrap import dedent -from psutil import Process, pid_exists -import time +from psutil import Process # Local Import from yaksh import grader as gd from yaksh.grader import Grader -from yaksh.java_code_evaluator import JavaCodeEvaluator -from yaksh.java_stdio_evaluator import JavaStdIOEvaluator from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest @@ -29,11 +26,13 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): { if(result.equals(expect)) { - System.out.println("Correct:Output expected "+expect+" and got "+result); + System.out.println("Correct:Output expected "+expect+ + "and got "+result); } else { - System.out.println("Incorrect:Output expected "+expect+" but got "+result); + System.out.println("Incorrect:Output expected "+expect+ + "but got "+result); System.exit(1); } } @@ -43,15 +42,18 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): int result, input, output; input = 0; output = 0; result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); + System.out.println("Input submitted to the function: "+ + input); check(output, result); input = 5; output = 25; result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); + System.out.println("Input submitted to the function: "+ + input); check(output, result); input = 6; output = 36; result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); + System.out.println("Input submitted to the function: "+ + input); check(output, result); } } @@ -59,17 +61,16 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): self.test_case_data = [ {"test_case": self.tc_data, - "test_case_type": "standardtestcase", - "weight": 0.0 - } + "test_case_type": "standardtestcase", + "weight": 0.0 + } ] self.in_dir = tmp_in_dir_path self.file_paths = None gd.SERVER_TIMEOUT = 9 self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in" - " your code.").format(gd.SERVER_TIMEOUT) - + "You probably have an infinite loop in" + " your code.").format(gd.SERVER_TIMEOUT) def tearDown(self): gd.SERVER_TIMEOUT = 4 @@ -78,15 +79,15 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer(self): # Given - user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" + user_answer = ("class Test {\n\tint square_num(int a)" + " {\n\treturn a*a;\n\t}\n}") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -98,15 +99,15 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): def test_incorrect_answer(self): # Given - user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}" + user_answer = ("class Test {\n\tint square_num(int a) " + "{\n\treturn a;\n\t}\n}") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -122,15 +123,14 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): def test_error(self): # Given - user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" + user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a}" kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -143,15 +143,15 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): def test_infinite_loop(self): # Given - user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" + user_answer = ("class Test {\n\tint square_num(int a)" + " {\n\t\twhile(0==0){\n\t\t}\n\t}\n}") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -178,11 +178,13 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): { if(result.equals(expect)) { - System.out.println("Correct:Output expected "+expect+" and got "+result); + System.out.println("Correct:Output expected "+expect+ + " and got "+result); } else { - System.out.println("Incorrect:Output expected "+expect+" but got "+result); + System.out.println("Incorrect:Output expected "+expect+ + " but got "+result); System.exit(1); } } @@ -201,9 +203,9 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): """) self.test_case_data = [ {"test_case": self.tc_data, - "test_case_type": "standardtestcase", - "weight": 0.0 - } + "test_case_type": "standardtestcase", + "weight": 0.0 + } ] user_answer = dedent(""" import java.io.BufferedReader; @@ -229,8 +231,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -240,6 +241,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get("success")) + class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): def setUp(self): self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") @@ -251,7 +253,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'expected_input': '5\n6', 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 - }] + }] self.file_paths = None gd.SERVER_TIMEOUT = 9 self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -280,8 +282,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -314,8 +315,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -342,8 +342,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -370,8 +369,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -397,8 +395,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -420,7 +417,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): self.test_case_data = [{'expected_output': '11', 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 - }] + }] user_answer = dedent(""" class Test {public static void main(String[] args){ @@ -434,8 +431,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -451,7 +447,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'expected_input': 'Hello\nWorld', 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 - }] + }] user_answer = dedent(""" import java.util.Scanner; class Test @@ -467,8 +463,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -485,7 +480,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'expected_input': '', 'test_case_type': 'stdiobasedtestcase', 'weight': 0.0 - }] + }] user_answer = dedent(""" import java.io.BufferedReader; import java.io.FileReader; @@ -510,8 +505,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } # When @@ -533,8 +527,8 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): self.file_paths = None gd.SERVER_TIMEOUT = 9 self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in" - " your code.").format(gd.SERVER_TIMEOUT) + "You probably have an infinite loop in" + " your code.").format(gd.SERVER_TIMEOUT) def tearDown(self): gd.SERVER_TIMEOUT = 4 @@ -558,11 +552,11 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): err = "Incorrect Answer" mark_fraction = 0.0 def _run_command(cmd): - proc = subprocess.Popen("{}".format(cmd), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + "{}".format(cmd), shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() return stdout,stderr cmds = ["javac Test.java", "java Test"] @@ -571,20 +565,18 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0 + }] kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -624,20 +616,18 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0 + }] kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -647,10 +637,11 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertFalse(result.get('success')) self.assert_correct_output('Incorrect Answer', result.get('error')) - + def test_assert_with_hook(self): # Given - user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" + user_answer = ("class Test {\n\tint square_num(int a)" + " {\n\treturn a*a;\n\t}\n}") assert_test_case = dedent(""" class main { @@ -658,11 +649,13 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): { if(result.equals(expect)) { - System.out.println("Correct:Output expected "+expect+" and got "+result); + System.out.println("Correct:Output expected "+expect+ + " and got "+result); } else { - System.out.println("Incorrect:Output expected "+expect+" but got "+result); + System.out.println("Incorrect:Output expected "+expect+ + " but got "+result); System.exit(1); } } @@ -672,20 +665,23 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): int result, input, output; input = 0; output = 0; result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); + System.out.println("Input submitted to the function: "+ + input); check(output, result); input = 5; output = 25; result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); + System.out.println("Input submitted to the function: "+ + input); check(output, result); input = 6; output = 36; result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); + System.out.println("Input submitted to the function: "+ + input); check(output, result); } } """) - + hook_code = dedent("""\ def check_answer(user_answer): success = False @@ -694,9 +690,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): if "return a*a" in user_answer: success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) - + """) test_case_data = [{"test_case_type": "standardtestcase", "test_case": assert_test_case, @@ -711,8 +705,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': True, 'language': 'java' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -731,7 +724,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): System.out.print("Hello, world!"); }} """) - + hook_code_1 = dedent("""\ def check_answer(user_answer): with open("Test.java", "w+") as f: @@ -741,11 +734,11 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): err = "Incorrect Answer" mark_fraction = 0.0 def _run_command(cmd): - proc = subprocess.Popen("{}".format(cmd), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + "{}".format(cmd), shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() return stdout,stderr cmds = ["javac Test.java", "java Test"] @@ -754,20 +747,18 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) hook_code_2 = dedent("""\ def check_answer(user_answer): success = False err = "Incorrect Answer" mark_fraction = 0.0 - if 'System.out.print("Hello, world!");' in user_answer: + if ('System.out.print("Hello, world!");' in + user_answer): success, err, mark_fraction = True, "", 0.5 return success, err, mark_fraction - """ - ) - + """) test_case_data = [{"test_case_type": "hooktestcase", "hook_code": hook_code_1, 'weight': 1.0}, @@ -780,8 +771,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': True, 'language': 'java' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When @@ -791,7 +781,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get('success')) self.assertEqual(result.get("weight"), 1.5) - + def test_infinite_loop(self): # Given user_answer = dedent("""\ @@ -811,11 +801,11 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): err = "Incorrect Answer" mark_fraction = 0.0 def _run_command(cmd): - proc = subprocess.Popen("{}".format(cmd), - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE - ) + proc = subprocess.Popen( + "{}".format(cmd), shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) stdout,stderr = proc.communicate() return stdout,stderr cmds = ["javac Test.java", "java Test"] @@ -824,13 +814,11 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): if stdout.decode("utf-8") == "Hello, world!": success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction - """ - ) + """) - test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] + "hook_code": hook_code, "weight": 1.0 + }] kwargs = { 'metadata': { @@ -838,8 +826,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest): 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'java' - }, - 'test_case_data': test_case_data, + }, 'test_case_data': test_case_data, } # When diff --git a/yaksh/evaluator_tests/test_python_stdio_evaluator.py b/yaksh/evaluator_tests/test_python_stdio_evaluator.py index 9b8d702..db2fd69 100644 --- a/yaksh/evaluator_tests/test_python_stdio_evaluator.py +++ b/yaksh/evaluator_tests/test_python_stdio_evaluator.py @@ -1,5 +1,6 @@ from yaksh.error_messages import compare_outputs + def test_compare_outputs(): exp = "5\n5\n" given = "5\n5\n" @@ -26,8 +27,8 @@ def test_compare_outputs(): success, msg = compare_outputs(given, exp) error_msg = msg.get('error_msg') assert not success - m = ("Incorrect Answer: We had expected 1 number of lines. " - + "We got 2 number of lines.") + m = ("Incorrect Answer: We had expected 1 number of lines. " + + "We got 2 number of lines.") assert m == error_msg exp = "5\n5\n" diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index f7a9925..d3f1dc8 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -6,12 +6,12 @@ import tempfile from psutil import Process from textwrap import dedent -#Local Import +# Local Import from yaksh import grader as gd from yaksh.grader import Grader -from yaksh.scilab_code_evaluator import ScilabCodeEvaluator from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest + class ScilabEvaluationTestCases(EvaluatorBaseTest): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() @@ -54,7 +54,7 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest): self.file_paths = None gd.SERVER_TIMEOUT = 9 self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop" + "You probably have an infinite loop" " in your code.").format(gd.SERVER_TIMEOUT) def tearDown(self): @@ -63,15 +63,14 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" - "\n\tc=a+b;\nendfunction") + "\n\tc=a+b;\nendfunction") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'scilab' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } grader = Grader(self.in_dir) @@ -81,15 +80,14 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest): def test_error(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" - "\n\tc=a+b;\ndis(\tendfunction") + "\n\tc=a+b;\ndis(\tendfunction") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'scilab' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } grader = Grader(self.in_dir) @@ -98,18 +96,16 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest): self.assertFalse(result.get("success")) self.assert_correct_output('error', result.get("error")) - def test_incorrect_answer(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" - "\n\tc=a-b;\nendfunction") + "\n\tc=a-b;\nendfunction") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'scilab' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } grader = Grader(self.in_dir) @@ -122,15 +118,14 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest): def test_infinite_loop(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" - "\n\tc=a;\nwhile(1==1)\nend\nendfunction") + "\n\tc=a;\nwhile(1==1)\nend\nendfunction") kwargs = { 'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, 'partial_grading': False, 'language': 'scilab' - }, - 'test_case_data': self.test_case_data, + }, 'test_case_data': self.test_case_data, } grader = Grader(self.in_dir) diff --git a/yaksh/evaluator_tests/test_simple_question_types.py b/yaksh/evaluator_tests/test_simple_question_types.py index dfb82a2..f7a6cf6 100644 --- a/yaksh/evaluator_tests/test_simple_question_types.py +++ b/yaksh/evaluator_tests/test_simple_question_types.py @@ -4,7 +4,7 @@ from django.utils import timezone from textwrap import dedent import pytz from yaksh.models import User, Profile, Question, Quiz, QuestionPaper,\ - QuestionSet, AnswerPaper, Answer, Course, IntegerTestCase, FloatTestCase,\ + AnswerPaper, Answer, Course, IntegerTestCase, FloatTestCase,\ StringTestCase, McqTestCase, ArrangeTestCase @@ -19,34 +19,30 @@ def setUpModule(): institute='IIT', department='Aerospace', position='Student') # Create User 2 - user2 = User.objects.create_user(username='demo_user_101', - password='demo', - email='demo@test.com') + user2 = User.objects.create_user( + username='demo_user_101', password='demo', + email='demo@test.com') Profile.objects.create(user=user2, roll_number=2, institute='IIT', department='Aerospace', position='Student') - + # Create a course - course = Course.objects.create(name="Python Course 100", - enrollment="Enroll Request", creator=user) - - quiz = Quiz.objects.create(start_date_time=datetime\ - (2015, 10, 9, 10, 8, 15, 0, - tzinfo=pytz.utc), - end_date_time=datetime\ - (2199, 10, 9, 10, 8, 15, 0, - tzinfo=pytz.utc), - duration=30, active=True, attempts_allowed=1, - time_between_attempts=0, pass_criteria=0, - description='demo quiz 100', - instructions="Demo Instructions", - creator=user - ) - question_paper = QuestionPaper.objects.create(quiz=quiz, - total_marks=1.0) - - + Course.objects.create(name="Python Course 100", + enrollment="Enroll Request", creator=user) + + quiz = Quiz.objects.create( + start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc), + end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc), + duration=30, active=True, attempts_allowed=1, + time_between_attempts=0, pass_criteria=0, + description='demo quiz 100', + instructions="Demo Instructions", + creator=user + ) + QuestionPaper.objects.create(quiz=quiz, total_marks=1.0) + + def tearDownModule(): User.objects.filter(username__in=["demo_user_100", "demo_user_101"])\ .delete() @@ -62,10 +58,10 @@ class IntegerQuestionTestCases(unittest.TestCase): # Creating Question paper self.question_paper = QuestionPaper.objects.get(quiz=self.quiz) - #Creating User + # Creating User self.user = User.objects.get(username='demo_user_100') - #Creating Question + # Creating Question self.question1 = Question.objects.create(summary='int1', points=1, type='code', user=self.user) self.question1.language = 'python' @@ -74,30 +70,26 @@ class IntegerQuestionTestCases(unittest.TestCase): self.question1.description = 'sum of 12+13?' self.question1.save() - #Creating answerpaper - - self.answerpaper = AnswerPaper.objects.create(user=self.user, - user_ip='101.0.0.1', - start_time=timezone.now(), - question_paper=self.question_paper, - end_time=timezone.now() - +timedelta(minutes=5), - attempt_number=1, - course=self.course - ) + # Creating answerpaper + + self.answerpaper = AnswerPaper.objects.create( + user=self.user, user_ip='101.0.0.1', start_time=timezone.now(), + question_paper=self.question_paper, course=self.course, + end_time=timezone.now()+timedelta(minutes=5), attempt_number=1 + ) self.answerpaper.questions.add(self.question1) self.answerpaper.save() - # For question + # For question self.integer_based_testcase = IntegerTestCase(question=self.question1, correct=25, - type = 'integertestcase', + type='integertestcase', ) self.integer_based_testcase.save() @classmethod def tearDownClass(self): - self.question1.delete() - self.answerpaper.delete() + self.question1.delete() + self.answerpaper.delete() def test_validate_regrade_integer_correct_answer(self): # Given @@ -118,7 +110,7 @@ class IntegerQuestionTestCases(unittest.TestCase): self.assertTrue(result['success']) # Regrade - # Given + # Given regrade_answer = Answer.objects.get(id=self.answer.id) regrade_answer.answer = 200 regrade_answer.save() @@ -134,7 +126,6 @@ class IntegerQuestionTestCases(unittest.TestCase): self.assertEqual(self.answer.marks, 0) self.assertFalse(self.answer.correct) - def test_validate_regrade_integer_incorrect_answer(self): # Given integer_answer = 26 @@ -154,7 +145,7 @@ class IntegerQuestionTestCases(unittest.TestCase): self.assertFalse(result['success']) # Regrade - # Given + # Given regrade_answer = Answer.objects.get(id=self.answer.id) regrade_answer.answer = 25 regrade_answer.save() @@ -180,9 +171,9 @@ class StringQuestionTestCases(unittest.TestCase): self.quiz = Quiz.objects.get(description="demo quiz 100") # Creating Question paper self.question_paper = QuestionPaper.objects.get(quiz=self.quiz) - #Creating User + # Creating User self.user = User.objects.get(username='demo_user_100') - #Creating Question + # Creating Question self.question1 = Question.objects.create(summary='str1', points=1, type='code', user=self.user) self.question1.language = 'python' @@ -199,45 +190,41 @@ class StringQuestionTestCases(unittest.TestCase): self.question2.description = 'Write Hello, EARTH!' self.question2.save() - #Creating answerpaper - - self.answerpaper = AnswerPaper.objects.create(user=self.user, - user_ip='101.0.0.1', - start_time=timezone.now(), - question_paper=self.question_paper, - end_time=timezone.now() - +timedelta(minutes=5), - attempt_number=1, - course=self.course - ) + # Creating answerpaper + + self.answerpaper = AnswerPaper.objects.create( + user=self.user, user_ip='101.0.0.1', start_time=timezone.now(), + question_paper=self.question_paper, course=self.course, + end_time=timezone.now()+timedelta(minutes=5), attempt_number=1 + ) self.answerpaper.questions.add(*[self.question1, self.question2]) self.answerpaper.save() - # For question + # For question self.lower_string_testcase = StringTestCase(question=self.question1, correct="Hello, EARTH!", string_check="lower", - type = 'stringtestcase', + type='stringtestcase', ) self.lower_string_testcase.save() self.exact_string_testcase = StringTestCase(question=self.question2, correct="Hello, EARTH!", string_check="exact", - type = 'stringtestcase', + type='stringtestcase', ) self.exact_string_testcase.save() @classmethod def tearDownClass(self): - self.question1.delete() - self.question2.delete() - self.answerpaper.delete() + self.question1.delete() + self.question2.delete() + self.answerpaper.delete() def test_validate_regrade_case_insensitive_string_correct_answer(self): # Given string_answer = "hello, earth!" - answer = Answer(question=self.question1,answer=string_answer) + answer = Answer(question=self.question1, answer=string_answer) answer.save() self.answerpaper.answers.add(answer) @@ -250,7 +237,7 @@ class StringQuestionTestCases(unittest.TestCase): self.assertTrue(result['success']) # Regrade - # Given + # Given regrade_answer = Answer.objects.get(id=answer.id) regrade_answer.answer = "hello, mars!" regrade_answer.save() @@ -259,8 +246,8 @@ class StringQuestionTestCases(unittest.TestCase): details = self.answerpaper.regrade(self.question1.id) # Then - answer = self.answerpaper.answers.filter(question=self.question1)\ - .last() + answer = self.answerpaper.answers.filter( + question=self.question1).last() self.assertEqual(answer, regrade_answer) self.assertTrue(details[0]) self.assertEqual(answer.marks, 0) @@ -269,7 +256,7 @@ class StringQuestionTestCases(unittest.TestCase): def test_validate_regrade_case_insensitive_string_incorrect_answer(self): # Given string_answer = "hello, mars!" - answer = Answer(question=self.question1,answer=string_answer) + answer = Answer(question=self.question1, answer=string_answer) answer.save() self.answerpaper.answers.add(answer) @@ -283,7 +270,7 @@ class StringQuestionTestCases(unittest.TestCase): self.assertFalse(result['success']) # Regrade - # Given + # Given regrade_answer = Answer.objects.get(id=answer.id) regrade_answer.answer = "hello, earth!" regrade_answer.save() @@ -292,8 +279,8 @@ class StringQuestionTestCases(unittest.TestCase): details = self.answerpaper.regrade(self.question1.id) # Then - answer = self.answerpaper.answers.filter(question=self.question1)\ - .last() + answer = self.answerpaper.answers.filter( + question=self.question1).last() self.assertEqual(answer, regrade_answer) self.assertTrue(details[0]) self.assertEqual(answer.marks, 1) @@ -302,7 +289,7 @@ class StringQuestionTestCases(unittest.TestCase): def test_validate_regrade_case_sensitive_string_correct_answer(self): # Given string_answer = "Hello, EARTH!" - answer = Answer(question=self.question2,answer=string_answer) + answer = Answer(question=self.question2, answer=string_answer) answer.save() self.answerpaper.answers.add(answer) @@ -315,7 +302,7 @@ class StringQuestionTestCases(unittest.TestCase): self.assertTrue(result['success']) # Regrade - # Given + # Given regrade_answer = Answer.objects.get(id=answer.id) regrade_answer.answer = "hello, earth!" regrade_answer.save() @@ -324,8 +311,8 @@ class StringQuestionTestCases(unittest.TestCase): details = self.answerpaper.regrade(self.question2.id) # Then - answer = self.answerpaper.answers.filter(question=self.question2)\ - .last() + answer = self.answerpaper.answers.filter( + question=self.question2).last() self.assertEqual(answer, regrade_answer) self.assertTrue(details[0]) self.assertEqual(answer.marks, 0) @@ -334,7 +321,7 @@ class StringQuestionTestCases(unittest.TestCase): def test_case_sensitive_string_incorrect_answer(self): # Given string_answer = "hello, earth!" - answer = Answer(question=self.question2,answer=string_answer) + answer = Answer(question=self.question2, answer=string_answer) answer.save() self.answerpaper.answers.add(answer) @@ -348,7 +335,7 @@ class StringQuestionTestCases(unittest.TestCase): self.assertFalse(result['success']) # Regrade - # Given + # Given regrade_answer = Answer.objects.get(id=answer.id) regrade_answer.answer = "Hello, EARTH!" regrade_answer.save() @@ -357,8 +344,8 @@ class StringQuestionTestCases(unittest.TestCase): details = self.answerpaper.regrade(self.question2.id) # Then - answer = self.answerpaper.answers.filter(question=self.question2)\ - .last() + answer = self.answerpaper.answers.filter( + question=self.question2).last() self.assertEqual(answer, regrade_answer) self.assertTrue(details[0]) self.assertEqual(answer.marks, 1) @@ -375,9 +362,9 @@ class FloatQuestionTestCases(unittest.TestCase): # Creating Question paper self.question_paper = QuestionPaper.objects.get(quiz=self.quiz) - #Creating User + # Creating User self.user = User.objects.get(username='demo_user_100') - #Creating Question + # Creating Question self.question1 = Question.objects.create(summary='flt1', points=1, type='code', user=self.user) self.question1.language = 'python' @@ -385,31 +372,28 @@ class FloatQuestionTestCases(unittest.TestCase): self.question1.test_case_type = 'floattestcase' self.question1.save() - #Creating answerpaper - - self.answerpaper = AnswerPaper.objects.create(user=self.user, - user_ip='101.0.0.1', - start_time=timezone.now(), - question_paper=self.question_paper, - end_time=timezone.now() - +timedelta(minutes=5), - attempt_number=1, - course=self.course - ) + # Creating answerpaper + + self.answerpaper = AnswerPaper.objects.create( + user=self.user, user_ip='101.0.0.1', start_time=timezone.now(), + question_paper=self.question_paper, course=self.course, + end_time=timezone.now()+timedelta(minutes=5), attempt_number=1, + ) + self.answerpaper.questions.add(self.question1) self.answerpaper.save() - # For question + # For question self.float_based_testcase = FloatTestCase(question=self.question1, correct=100, error_margin=0.1, - type = 'floattestcase', + type='floattestcase', ) self.float_based_testcase.save() @classmethod def tearDownClass(self): - self.question1.delete() - self.answerpaper.delete() + self.question1.delete() + self.answerpaper.delete() def test_validate_regrade_float_correct_answer(self): # Given @@ -430,7 +414,7 @@ class FloatQuestionTestCases(unittest.TestCase): self.assertTrue(result['success']) # Regrade with wrong answer - # Given + # Given regrade_answer = Answer.objects.get(id=self.answer.id) regrade_answer.answer = 0.0 regrade_answer.save() @@ -465,7 +449,7 @@ class FloatQuestionTestCases(unittest.TestCase): self.assertFalse(result['success']) # Regrade - # Given + # Given regrade_answer = Answer.objects.get(id=self.answer.id) regrade_answer.answer = 99.9 regrade_answer.save() @@ -480,15 +464,17 @@ class FloatQuestionTestCases(unittest.TestCase): self.assertTrue(details[0]) self.assertEqual(self.answer.marks, 1) self.assertTrue(self.answer.correct) + + class MCQQuestionTestCases(unittest.TestCase): @classmethod def setUpClass(self): - #Creating User + # Creating User self.user = User.objects.get(username='demo_user_100') self.user2 = User.objects.get(username='demo_user_101') self.user_ip = '127.0.0.1' - #Creating Course + # Creating Course self.course = Course.objects.get(name="Python Course 100") # Creating Quiz self.quiz = Quiz.objects.get(description="demo quiz 100") @@ -496,7 +482,7 @@ class MCQQuestionTestCases(unittest.TestCase): self.question_paper = QuestionPaper.objects.get(quiz=self.quiz) self.question_paper.shuffle_testcases = True self.question_paper.save() - #Creating Question + # Creating Question self.question1 = Question.objects.create(summary='mcq1', points=1, type='code', user=self.user, ) @@ -552,9 +538,9 @@ class MCQQuestionTestCases(unittest.TestCase): @classmethod def tearDownClass(self): - self.question1.delete() - self.answerpaper.delete() - self.answerpaper2.delete() + self.question1.delete() + self.answerpaper.delete() + self.answerpaper2.delete() def test_shuffle_test_cases(self): # Given @@ -576,7 +562,8 @@ class MCQQuestionTestCases(unittest.TestCase): course_id=self.course.id ) not_ordered_testcase = self.question1.get_ordered_test_cases( - answerpaper3 ) + answerpaper3 + ) get_test_cases = self.question1.get_test_cases() # Then self.assertNotEqual(order1, order2) @@ -594,9 +581,9 @@ class ArrangeQuestionTestCases(unittest.TestCase): self.question_paper = QuestionPaper.objects.get(quiz=self.quiz, total_marks=1.0) - #Creating User + # Creating User self.user = User.objects.get(username='demo_user_100') - #Creating Question + # Creating Question self.question1 = Question.objects.create(summary='arrange1', points=1.0, user=self.user @@ -607,42 +594,39 @@ class ArrangeQuestionTestCases(unittest.TestCase): self.question1.test_case_type = 'arrangetestcase' self.question1.save() - #Creating answerpaper - - self.answerpaper = AnswerPaper.objects.create(user=self.user, - user_ip='101.0.0.1', - start_time=timezone.now(), - question_paper=self.question_paper, - end_time=timezone.now() - +timedelta(minutes=5), - attempt_number=1, - course=self.course - ) + # Creating answerpaper + + self.answerpaper = AnswerPaper.objects.create( + user=self.user, user_ip='101.0.0.1', course=self.course, + start_time=timezone.now(), question_paper=self.question_paper, + end_time=timezone.now()+timedelta(minutes=5), attempt_number=1 + ) self.answerpaper.questions.add(self.question1) self.answerpaper.save() # For question self.arrange_testcase_1 = ArrangeTestCase(question=self.question1, options="A", - type = 'arrangetestcase', + type='arrangetestcase', ) self.arrange_testcase_1.save() self.testcase_1_id = self.arrange_testcase_1.id self.arrange_testcase_2 = ArrangeTestCase(question=self.question1, options="B", - type = 'arrangetestcase', + type='arrangetestcase', ) self.arrange_testcase_2.save() self.testcase_2_id = self.arrange_testcase_2.id self.arrange_testcase_3 = ArrangeTestCase(question=self.question1, options="C", - type = 'arrangetestcase', + type='arrangetestcase', ) self.arrange_testcase_3.save() self.testcase_3_id = self.arrange_testcase_3.id + @classmethod def tearDownClass(self): - self.question1.delete() - self.answerpaper.delete() + self.question1.delete() + self.answerpaper.delete() def test_validate_regrade_arrange_correct_answer(self): # Given @@ -681,11 +665,10 @@ class ArrangeQuestionTestCases(unittest.TestCase): self.quiz.description, self.question1.summary, self.question1.type - ) ) + )) self.assertFalse(details[0]) self.assertEqual(details[1], err_msg) - # Try regrade with incorrect answer # When regrade_answer.answer = [self.testcase_1_id, @@ -741,15 +724,17 @@ class ArrangeQuestionTestCases(unittest.TestCase): self.assertTrue(details[0]) self.assertEqual(self.answer.marks, 1) self.assertTrue(self.answer.correct) -class MCQQuestionTestCases(unittest.TestCase): + + +class MCQShuffleTestCases(unittest.TestCase): @classmethod def setUpClass(self): - #Creating User + # Creating User self.user = User.objects.get(username='demo_user_100') self.user2 = User.objects.get(username='demo_user_101') self.user_ip = '127.0.0.1' - #Creating Course + # Creating Course self.course = Course.objects.get(name="Python Course 100") # Creating Quiz self.quiz = Quiz.objects.get(description="demo quiz 100") @@ -757,7 +742,7 @@ class MCQQuestionTestCases(unittest.TestCase): self.question_paper = QuestionPaper.objects.get(quiz=self.quiz) self.question_paper.shuffle_testcases = True self.question_paper.save() - #Creating Question + # Creating Question self.question1 = Question.objects.create(summary='mcq1', points=1, type='code', user=self.user, ) @@ -810,11 +795,12 @@ class MCQQuestionTestCases(unittest.TestCase): attempt_num=1, course_id=self.course.id ) + @classmethod def tearDownClass(self): - self.question1.delete() - self.answerpaper.delete() - self.answerpaper2.delete() + self.question1.delete() + self.answerpaper.delete() + self.answerpaper2.delete() def test_shuffle_test_cases(self): # Given |