diff options
Diffstat (limited to 'yaksh')
-rw-r--r-- | yaksh/bash_code_evaluator.py | 176 | ||||
-rwxr-xr-x | yaksh/code_server.py | 2 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 74 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_c_cpp_evaluation.py | 13 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_code_evaluation.py | 38 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_java_evaluation.py | 96 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_scilab_evaluation.py | 70 | ||||
-rw-r--r-- | yaksh/java_code_evaluator.py | 188 | ||||
-rw-r--r-- | yaksh/language_registry.py | 4 | ||||
-rw-r--r-- | yaksh/scilab_code_evaluator.py | 72 | ||||
-rw-r--r-- | yaksh/settings.py | 16 |
11 files changed, 572 insertions, 177 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index a468fd7..fbfa232 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -3,6 +3,7 @@ import traceback import pwd import os from os.path import join, isfile +import sys import subprocess import importlib @@ -11,34 +12,32 @@ from code_evaluator import CodeEvaluator class BashCodeEvaluator(CodeEvaluator): - """Tests the Bash code obtained from Code Server""" - def __init__(self, test_case_data, test, language, user_answer, - ref_code_path=None, in_dir=None): - super(BashCodeEvaluator, self).__init__(test_case_data, test, language, user_answer, - ref_code_path, in_dir) - self.test_case_args = self._setup() + # """Tests the Bash code obtained from Code Server""" + # def __init__(self, test_case_data, test, language, user_answer, + # ref_code_path=None, in_dir=None): + # super(BashCodeEvaluator, self).__init__(test_case_data, test, language, user_answer, + # ref_code_path, in_dir) + # self.test_case_args = self._setup() # Private Protocol ########## - def _setup(self): - super(BashCodeEvaluator, self)._setup() - - self.submit_path = self.create_submit_code_file('submit.sh') - self._set_file_as_executable(self.submit_path) - get_ref_path, get_test_case_path = self.ref_code_path.strip().split(',') - get_ref_path = get_ref_path.strip() - get_test_case_path = get_test_case_path.strip() - ref_path, test_case_path = self._set_test_code_file_path(get_ref_path, - get_test_case_path) - - return ref_path, self.submit_path, test_case_path - - def _teardown(self): + def setup(self): + super(BashCodeEvaluator, self).setup() + self.submit_code_path = self.create_submit_code_file('submit.sh') + self._set_file_as_executable(self.submit_code_path) + # get_ref_path, get_test_case_path = self.ref_code_path.strip().split(',') + # get_ref_path = get_ref_path.strip() + # get_test_case_path = get_test_case_path.strip() + # ref_path, test_case_path = self._set_test_code_file_path(get_ref_path, + # get_test_case_path) + + # return ref_path, self.submit_code_path, test_case_path + + def teardown(self): # Delete the created file. - super(BashCodeEvaluator, self)._teardown() - os.remove(self.submit_path) + super(BashCodeEvaluator, self).teardown() + os.remove(self.submit_code_path) - def _check_code(self, ref_path, submit_path, - test_case_path=None): + def check_code(self, user_answer, test_case_data): """ Function validates student script using instructor script as reference. Test cases can optionally be provided. The first argument ref_path, is the path to instructor script, it is assumed to @@ -61,23 +60,31 @@ class BashCodeEvaluator(CodeEvaluator): the required permissions are not given to the file(s). """ - if not isfile(ref_path): - return False, "No file at %s or Incorrect path" % ref_path - if not isfile(submit_path): - return False, "No file at %s or Incorrect path" % submit_path - if not os.access(ref_path, os.X_OK): - return False, "Script %s is not executable" % ref_path - if not os.access(submit_path, os.X_OK): - return False, "Script %s is not executable" % submit_path + ref_code_path = test_case_data[0] + get_ref_path, get_test_case_path = ref_code_path.strip().split(',') + get_ref_path = get_ref_path.strip() + get_test_case_path = get_test_case_path.strip() + clean_ref_code_path, clean_test_case_path = self._set_test_code_file_path(get_ref_path, + get_test_case_path) + + if not isfile(clean_ref_code_path): + return False, "No file at %s or Incorrect path" % clean_ref_code_path + if not isfile(self.submit_code_path): + return False, "No file at %s or Incorrect path" % self.submit_code_path + if not os.access(clean_ref_code_path, os.X_OK): + return False, "Script %s is not executable" % clean_ref_code_path + if not os.access(self.submit_code_path, os.X_OK): + return False, "Script %s is not executable" % self.submit_code_path success = False + self.write_to_submit_code_file(self.submit_code_path, user_answer) - if test_case_path is None or "": - ret = self._run_command(ref_path, stdin=None, + if clean_test_case_path is None or "": + ret = self._run_command(clean_ref_code_path, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc, inst_stdout, inst_stderr = ret - ret = self._run_command(submit_path, stdin=None, + ret = self._run_command(self.submit_code_path, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc, stdnt_stdout, stdnt_stderr = ret @@ -88,26 +95,26 @@ class BashCodeEvaluator(CodeEvaluator): stdnt_stderr) return False, err else: - if not isfile(test_case_path): - return False, "No test case at %s" % test_case_path - if not os.access(ref_path, os.R_OK): - return False, "Test script %s, not readable" % test_case_path + if not isfile(clean_test_case_path): + return False, "No test case at %s" % clean_test_case_path + if not os.access(clean_ref_code_path, os.R_OK): + return False, "Test script %s, not readable" % clean_test_case_path # valid_answer is True, so that we can stop once a test case fails valid_answer = True # loop_count has to be greater than or equal to one. # Useful for caching things like empty test files,etc. loop_count = 0 - test_cases = open(test_case_path).readlines() + test_cases = open(clean_test_case_path).readlines() num_lines = len(test_cases) for test_case in test_cases: loop_count += 1 if valid_answer: - args = [ref_path] + [x for x in test_case.split()] + args = [clean_ref_code_path] + [x for x in test_case.split()] ret = self._run_command(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc, inst_stdout, inst_stderr = ret - args = [submit_path]+[x for x in test_case.split()] + args = [self.submit_code_path]+[x for x in test_case.split()] ret = self._run_command(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -120,3 +127,88 @@ class BashCodeEvaluator(CodeEvaluator): stdnt_stdout+stdnt_stderr) return False, err + + + # def _check_code(self, ref_path, submit_path, + # test_case_path=None): + # """ Function validates student script using instructor script as + # reference. Test cases can optionally be provided. The first argument + # ref_path, is the path to instructor script, it is assumed to + # have executable permission. The second argument submit_path, is + # the path to the student script, it is assumed to have executable + # permission. The Third optional argument is the path to test the + # scripts. Each line in this file is a test case and each test case is + # passed to the script as standard arguments. + + # Returns + # -------- + + # returns (True, "Correct answer") : If the student script passes all + # test cases/have same output, when compared to the instructor script + + # returns (False, error_msg): If the student script fails a single + # test/have dissimilar output, when compared to the instructor script. + + # Returns (False, error_msg): If mandatory arguments are not files or if + # the required permissions are not given to the file(s). + + # """ + # if not isfile(ref_path): + # return False, "No file at %s or Incorrect path" % ref_path + # if not isfile(submit_path): + # return False, "No file at %s or Incorrect path" % submit_path + # if not os.access(ref_path, os.X_OK): + # return False, "Script %s is not executable" % ref_path + # if not os.access(submit_path, os.X_OK): + # return False, "Script %s is not executable" % submit_path + + # success = False + + # if test_case_path is None or "": + # ret = self._run_command(ref_path, stdin=None, + # stdout=subprocess.PIPE, + # stderr=subprocess.PIPE) + # proc, inst_stdout, inst_stderr = ret + # ret = self._run_command(submit_path, stdin=None, + # stdout=subprocess.PIPE, + # stderr=subprocess.PIPE) + # proc, stdnt_stdout, stdnt_stderr = ret + # if inst_stdout == stdnt_stdout: + # return True, "Correct answer" + # else: + # err = "Error: expected %s, got %s" % (inst_stderr, + # stdnt_stderr) + # return False, err + # else: + # if not isfile(test_case_path): + # return False, "No test case at %s" % test_case_path + # if not os.access(ref_path, os.R_OK): + # return False, "Test script %s, not readable" % test_case_path + # # valid_answer is True, so that we can stop once a test case fails + # valid_answer = True + # # loop_count has to be greater than or equal to one. + # # Useful for caching things like empty test files,etc. + # loop_count = 0 + # test_cases = open(test_case_path).readlines() + # num_lines = len(test_cases) + # for test_case in test_cases: + # loop_count += 1 + # if valid_answer: + # args = [ref_path] + [x for x in test_case.split()] + # ret = self._run_command(args, stdin=None, + # stdout=subprocess.PIPE, + # stderr=subprocess.PIPE) + # proc, inst_stdout, inst_stderr = ret + # args = [submit_path]+[x for x in test_case.split()] + # ret = self._run_command(args, stdin=None, + # stdout=subprocess.PIPE, + # stderr=subprocess.PIPE) + # proc, stdnt_stdout, stdnt_stderr = ret + # valid_answer = inst_stdout == stdnt_stdout + # if valid_answer and (num_lines == loop_count): + # return True, "Correct answer" + # else: + # err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr, + # stdnt_stdout+stdnt_stderr) + # return False, err + diff --git a/yaksh/code_server.py b/yaksh/code_server.py index 66c4271..48b97a7 100755 --- a/yaksh/code_server.py +++ b/yaksh/code_server.py @@ -31,7 +31,7 @@ import re import json # Local imports. from settings import SERVER_PORTS, SERVER_POOL_PORT -from language_registry import get_registry, create_evaluator_instance, unpack_json +from language_registry import create_evaluator_instance, unpack_json MY_DIR = abspath(dirname(__file__)) diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index c6a5408..5fa3d2d 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -5,37 +5,81 @@ from yaksh.settings import SERVER_TIMEOUT class BashEvaluationTestCases(unittest.TestCase): def setUp(self): - self.language = "bash" - self.ref_code_path = "bash_files/sample.sh,bash_files/sample.args" + self.test_case_data = ["bash_files/sample.sh,bash_files/sample.args"] self.in_dir = "/tmp" - self.test_case_data = [] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT) - self.test = None + + # def setUp(self): + # self.language = "bash" + # self.ref_code_path = "bash_files/sample.sh,bash_files/sample.args" + # self.in_dir = "/tmp" + # self.test_case_data = [] + # self.timeout_msg = ("Code took more than {0} seconds to run. " + # "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT) + # self.test = None def test_correct_answer(self): user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" - get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir) - result = get_class.evaluate() - - self.assertTrue(result.get("success")) - self.assertEqual(result.get("error"), "Correct answer") + get_class = BashCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) + self.assertTrue(result.get('success')) + self.assertEquals(result.get('error'), "Correct answer") def test_error(self): user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 - $2 )) && exit $(( $1 - $2 ))" - get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir) - result = get_class.evaluate() - + get_class = BashCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("Error" in result.get("error")) def test_infinite_loop(self): user_answer = "#!/bin/bash\nwhile [ 1 ] ; do echo "" > /dev/null ; done" - get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir) - result = get_class.evaluate() - + get_class = BashCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEquals(result.get("error"), self.timeout_msg) + # def test_infinite_loop(self): + # user_answer = "#!/bin/bash\nwhile [ 1 ] ; do echo "" > /dev/null ; done" + # get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir) + # result = get_class.evaluate() + + # self.assertFalse(result.get("success")) + # self.assertEquals(result.get("error"), self.timeout_msg) + + # def test_correct_answer(self): + # user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" + # get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir) + # result = get_class.evaluate() + + # self.assertTrue(result.get("success")) + # self.assertEqual(result.get("error"), "Correct answer") + + # def test_error(self): + # user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 - $2 )) && exit $(( $1 - $2 ))" + # get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir) + # result = get_class.evaluate() + + # self.assertFalse(result.get("success")) + # self.assertTrue("Error" in result.get("error")) + + # def test_infinite_loop(self): + # user_answer = "#!/bin/bash\nwhile [ 1 ] ; do echo "" > /dev/null ; done" + # get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir) + # result = get_class.evaluate() + + # self.assertFalse(result.get("success")) + # self.assertEquals(result.get("error"), self.timeout_msg) + if __name__ == '__main__': unittest.main() diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index c5f8775..9d37fb4 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -18,7 +18,18 @@ class CEvaluationTestCases(unittest.TestCase): } result = get_class.evaluate(**kwargs) self.assertTrue(result.get('success')) - self.assertEqual(result.get('error'), "Correct answer") + self.assertEquals(result.get('error'), "Correct answer") + + def test_incorrect_answer(self): + user_answer = "int add(int a, int b)\n{return a-b;}" + get_class = CppCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) + self.assertFalse(result.get('success')) + self.assertIn("Incorrect:", result.get('error')) + self.assertTrue(result.get('error').splitlines > 1) def test_compilation_error(self): user_answer = "int add(int a, int b)\n{return a+b}" diff --git a/yaksh/evaluator_tests/test_code_evaluation.py b/yaksh/evaluator_tests/test_code_evaluation.py index 84701fb..e4f129c 100644 --- a/yaksh/evaluator_tests/test_code_evaluation.py +++ b/yaksh/evaluator_tests/test_code_evaluation.py @@ -1,20 +1,21 @@ import unittest import os -from yaksh import python_code_evaluator -from yaksh.language_registry import _LanguageRegistry, set_registry, get_registry +from yaksh import python_assertion_evaluator +from yaksh.language_registry import _LanguageRegistry, get_registry from yaksh.settings import SERVER_TIMEOUT class RegistryTestCase(unittest.TestCase): def setUp(self): - set_registry() self.registry_object = get_registry() self.language_registry = _LanguageRegistry() def test_set_register(self): - class_name = getattr(python_code_evaluator, 'PythonCodeEvaluator') - self.registry_object.register("python", "yaksh.python_code_evaluator.PythonCodeEvaluator") - self.assertEquals(self.registry_object.get_class("python"), class_name) + class_name = getattr(python_assertion_evaluator, 'PythonAssertionEvaluator') + self.registry_object.register("python", {"standardtestcase": "yaksh.python_assertion_evaluator.PythonAssertionEvaluator", + "stdoutbasedtestcase": "python_stdout_evaluator.PythonStdoutEvaluator" + }) + self.assertEquals(self.registry_object.get_class("python", "standardtestcase"), class_name) def tearDown(self): self.registry_object = None @@ -22,3 +23,28 @@ class RegistryTestCase(unittest.TestCase): if __name__ == '__main__': unittest.main() + + +# import unittest +# import os +# from yaksh import cpp_code_evaluator +# from yaksh.language_registry import _LanguageRegistry, get_registry +# from yaksh.settings import SERVER_TIMEOUT + + +# class RegistryTestCase(unittest.TestCase): +# def setUp(self): +# self.registry_object = get_registry() +# self.language_registry = _LanguageRegistry() + +# def test_set_register(self): +# class_name = getattr(cpp_code_evaluator, 'CppCodeEvaluator') +# self.registry_object.register("c", {"standardtestcase": "cpp_code_evaluator.CppCodeEvaluator"}) +# self.assertEquals(self.registry_object.get_class("c", "standardtestcase"), class_name) + +# def tearDown(self): +# self.registry_object = None + + +# if __name__ == '__main__': +# unittest.main() diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index dfa1066..a5852a3 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -2,52 +2,104 @@ import unittest import os from yaksh import code_evaluator as evaluator from yaksh.java_code_evaluator import JavaCodeEvaluator +from yaksh.settings import SERVER_TIMEOUT class JavaEvaluationTestCases(unittest.TestCase): def setUp(self): - self.language = "java" - self.ref_code_path = "java_files/main_square.java" + self.test_case_data = ["java_files/main_square.java"] self.in_dir = "/tmp" - self.test_case_data = [] evaluator.SERVER_TIMEOUT = 9 self.timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in " - "your code.").format(evaluator.SERVER_TIMEOUT) - self.test = None + "You probably have an infinite loop in your code.").format(evaluator.SERVER_TIMEOUT) def tearDown(self): evaluator.SERVER_TIMEOUT = 2 def test_correct_answer(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" - get_class = JavaCodeEvaluator(self.test_case_data, self.test, - self.language, user_answer, - self.ref_code_path, self.in_dir) - result = get_class.evaluate() + get_class = JavaCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) + self.assertEquals(result.get('error'), "Correct answer") + self.assertTrue(result.get('success')) - self.assertTrue(result.get("success")) - self.assertEqual(result.get("error"), "Correct answer") + def test_incorrect_answer(self): + user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}" + get_class = JavaCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) + self.assertFalse(result.get('success')) + self.assertIn("Incorrect:", result.get('error')) + self.assertTrue(result.get('error').splitlines > 1) def test_error(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" - get_class = JavaCodeEvaluator(self.test_case_data, self.test, - self.language, user_answer, - self.ref_code_path, self.in_dir) - result = get_class.evaluate() - + get_class = JavaCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("Error" in result.get("error")) def test_infinite_loop(self): user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" - get_class = JavaCodeEvaluator(self.test_case_data, self.test, - self.language, user_answer, - self.ref_code_path, self.in_dir) - result = get_class.evaluate() - + get_class = JavaCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEquals(result.get("error"), self.timeout_msg) + # def setUp(self): + # self.language = "java" + # self.ref_code_path = "java_files/main_square.java" + # self.in_dir = "/tmp" + # self.test_case_data = [] + # evaluator.SERVER_TIMEOUT = 9 + # self.timeout_msg = ("Code took more than {0} seconds to run. " + # "You probably have an infinite loop in " + # "your code.").format(evaluator.SERVER_TIMEOUT) + # self.test = None + + # def tearDown(self): + # evaluator.SERVER_TIMEOUT = 2 + + # def test_correct_answer(self): + # user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" + # get_class = JavaCodeEvaluator(self.test_case_data, self.test, + # self.language, user_answer, + # self.ref_code_path, self.in_dir) + # result = get_class.evaluate() + + # self.assertTrue(result.get("success")) + # self.assertEqual(result.get("error"), "Correct answer") + + # def test_error(self): + # user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" + # get_class = JavaCodeEvaluator(self.test_case_data, self.test, + # self.language, user_answer, + # self.ref_code_path, self.in_dir) + # result = get_class.evaluate() + + # self.assertFalse(result.get("success")) + # self.assertTrue("Error" in result.get("error")) + + # def test_infinite_loop(self): + # user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" + # get_class = JavaCodeEvaluator(self.test_case_data, self.test, + # self.language, user_answer, + # self.ref_code_path, self.in_dir) + # result = get_class.evaluate() + + # self.assertFalse(result.get("success")) + # self.assertEquals(result.get("error"), self.timeout_msg) + if __name__ == '__main__': unittest.main() diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index 30af041..2b459fc 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -1,45 +1,75 @@ import unittest import os +from yaksh import code_evaluator as evaluator from yaksh.scilab_code_evaluator import ScilabCodeEvaluator from yaksh.settings import SERVER_TIMEOUT class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): - self.language = "scilab" - self.ref_code_path = "scilab_files/test_add.sce" + self.test_case_data = ["scilab_files/test_add.sce"] self.in_dir = "/tmp" - self.test_case_data = [] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT) - self.test = None + + # def setUp(self): + # self.language = "scilab" + # self.ref_code_path = "scilab_files/test_add.sce" + # self.in_dir = "/tmp" + # self.test_case_data = [] + # self.timeout_msg = ("Code took more than {0} seconds to run. " + # "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT) + # self.test = None + def test_correct_answer(self): user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\nendfunction" - get_class = ScilabCodeEvaluator(self.test_case_data, self.test, - self.language, user_answer, - self.ref_code_path, self.in_dir) - result = get_class.evaluate() + get_class = ScilabCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) + self.assertEquals(result.get('error'), "Correct answer") + self.assertTrue(result.get('success')) + + # def test_correct_answer(self): + # user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\nendfunction" + # get_class = ScilabCodeEvaluator(self.test_case_data, self.test, + # self.language, user_answer, + # self.ref_code_path, self.in_dir) + # result = get_class.evaluate() - self.assertTrue(result.get("success")) - self.assertEqual(result.get("error"), "Correct answer") + # self.assertTrue(result.get("success")) + # self.assertEqual(result.get("error"), "Correct answer") def test_error(self): user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\ndis(\tendfunction" - get_class = ScilabCodeEvaluator(self.test_case_data, self.test, - self.language, user_answer, - self.ref_code_path, self.in_dir) - result = get_class.evaluate() - + get_class = ScilabCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("error" in result.get("error")) + + def test_incorrect_answer(self): + user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a-b;\nendfunction" + get_class = ScilabCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) + self.assertFalse(result.get('success')) + self.assertIn("Message", result.get('error')) + self.assertTrue(result.get('error').splitlines > 1) + def test_infinite_loop(self): user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a;\nwhile(1==1)\nend\nendfunction" - get_class = ScilabCodeEvaluator(self.test_case_data, self.test, - self.language, user_answer, - self.ref_code_path, self.in_dir) - result = get_class.evaluate() - + get_class = ScilabCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data + } + result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEquals(result.get("error"), self.timeout_msg) diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py index 6f39d71..e99448c 100644 --- a/yaksh/java_code_evaluator.py +++ b/yaksh/java_code_evaluator.py @@ -12,47 +12,24 @@ from code_evaluator import CodeEvaluator class JavaCodeEvaluator(CodeEvaluator): """Tests the Java code obtained from Code Server""" - def __init__(self, test_case_data, test, language, user_answer, - ref_code_path=None, in_dir=None): - super(JavaCodeEvaluator, self).__init__(test_case_data, test, - language, user_answer, - ref_code_path, in_dir) - self.test_case_args = self.setup() - def setup(self): super(JavaCodeEvaluator, self).setup() - - ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path) - self.submit_path = self.create_submit_code_file('Test.java') - - # Set file paths - java_student_directory = os.getcwd() + '/' - java_ref_file_name = (ref_path.split('/')[-1]).split('.')[0] - - # Set command variables - compile_command = 'javac {0}'.format(self.submit_path), - compile_main = ('javac {0} -classpath ' - '{1} -d {2}').format(ref_path, - java_student_directory, - java_student_directory) - run_command_args = "java -cp {0} {1}".format(java_student_directory, - java_ref_file_name) - remove_user_output = "{0}{1}.class".format(java_student_directory, - 'Test') - remove_ref_output = "{0}{1}.class".format(java_student_directory, - java_ref_file_name) - - return (ref_path, self.submit_path, compile_command, compile_main, - run_command_args, remove_user_output, remove_ref_output) + self.submit_code_path = self.create_submit_code_file('Test.java') def teardown(self): - # Delete the created file. super(JavaCodeEvaluator, self).teardown() - os.remove(self.submit_path) + # Delete the created file. + os.remove(self.submit_code_path) + + def get_commands(self, clean_ref_code_path, user_code_directory): + compile_command = 'javac {0}'.format(self.submit_code_path), + compile_main = ('javac {0} -classpath ' + '{1} -d {2}').format(clean_ref_code_path, + user_code_directory, + user_code_directory) + return compile_command, compile_main - def check_code(self, ref_code_path, submit_code_path, compile_command, - compile_main, run_command_args, remove_user_output, - remove_ref_output): + def check_code(self, user_answer, test_case_data): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -72,12 +49,26 @@ class JavaCodeEvaluator(CodeEvaluator): if the required permissions are not given to the file(s). """ - if not isfile(ref_code_path): - return False, "No file at %s or Incorrect path" % ref_code_path - if not isfile(submit_code_path): - return False, 'No file at %s or Incorrect path' % submit_code_path + ref_code_path = test_case_data[0] + clean_ref_code_path, clean_test_case_path = self._set_test_code_file_path(ref_code_path) + + if not isfile(clean_ref_code_path): + return False, "No file at %s or Incorrect path" % clean_ref_code_path + if not isfile(self.submit_code_path): + return False, 'No file at %s or Incorrect path' % self.submit_code_path success = False + user_code_directory = os.getcwd() + '/' + self.write_to_submit_code_file(self.submit_code_path, user_answer) + ref_file_name = (clean_ref_code_path.split('/')[-1]).split('.')[0] + user_output_path = "{0}{1}.class".format(user_code_directory, + 'Test') + ref_output_path = "{0}{1}.class".format(user_code_directory, + ref_file_name) + # user_output_path, ref_output_path = self.set_file_paths(user_code_directory, clean_ref_code_path) + compile_command, compile_main = self.get_commands(clean_ref_code_path, user_code_directory) + run_command_args = "java -cp {0} {1}".format(user_code_directory, + ref_file_name) ret = self._compile_command(compile_command) proc, stdnt_stderr = ret stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -99,7 +90,7 @@ class JavaCodeEvaluator(CodeEvaluator): success, err = True, "Correct answer" else: err = stdout + "\n" + stderr - os.remove(remove_ref_output) + os.remove(ref_output_path) else: err = "Error:" try: @@ -111,7 +102,7 @@ class JavaCodeEvaluator(CodeEvaluator): err = err + "\n" + e except: err = err + "\n" + main_err - os.remove(remove_user_output) + os.remove(user_output_path) else: err = "Compilation Error:" try: @@ -125,3 +116,118 @@ class JavaCodeEvaluator(CodeEvaluator): err = err + "\n" + stdnt_stderr return success, err + + + # def __init__(self, test_case_data, test, language, user_answer, + # ref_code_path=None, in_dir=None): + # super(JavaCodeEvaluator, self).__init__(test_case_data, test, + # language, user_answer, + # ref_code_path, in_dir) + # self.test_case_args = self.setup() + + # def setup(self): + # super(JavaCodeEvaluator, self).setup() + + # ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path) + # self.submit_path = self.create_submit_code_file('Test.java') + + # # Set file paths + # java_student_directory = os.getcwd() + '/' + # java_ref_file_name = (ref_path.split('/')[-1]).split('.')[0] + + # # Set command variables + # compile_command = 'javac {0}'.format(self.submit_path), + # compile_main = ('javac {0} -classpath ' + # '{1} -d {2}').format(ref_path, + # java_student_directory, + # java_student_directory) + # run_command_args = "java -cp {0} {1}".format(java_student_directory, + # java_ref_file_name) + # remove_user_output = "{0}{1}.class".format(java_student_directory, + # 'Test') + # remove_ref_output = "{0}{1}.class".format(java_student_directory, + # java_ref_file_name) + + # return (ref_path, self.submit_path, compile_command, compile_main, + # run_command_args, remove_user_output, remove_ref_output) + + # def teardown(self): + # # Delete the created file. + # super(JavaCodeEvaluator, self).teardown() + # os.remove(self.submit_path) + + # def check_code(self, ref_code_path, submit_code_path, compile_command, + # compile_main, run_command_args, remove_user_output, + # remove_ref_output): + # """ Function validates student code using instructor code as + # reference.The first argument ref_code_path, is the path to + # instructor code, it is assumed to have executable permission. + # The second argument submit_code_path, is the path to the student + # code, it is assumed to have executable permission. + + # Returns + # -------- + + # returns (True, "Correct answer") : If the student function returns + # expected output when called by reference code. + + # returns (False, error_msg): If the student function fails to return + # expected output when called by reference code. + + # Returns (False, error_msg): If mandatory arguments are not files or + # if the required permissions are not given to the file(s). + + # """ + # if not isfile(ref_code_path): + # return False, "No file at %s or Incorrect path" % ref_code_path + # if not isfile(submit_code_path): + # return False, 'No file at %s or Incorrect path' % submit_code_path + + # success = False + # ret = self._compile_command(compile_command) + # proc, stdnt_stderr = ret + # stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) + + # # Only if compilation is successful, the program is executed + # # And tested with testcases + # if stdnt_stderr == '': + # ret = self._compile_command(compile_main) + # proc, main_err = ret + # main_err = self._remove_null_substitute_char(main_err) + + # if main_err == '': + # ret = self._run_command(run_command_args, shell=True, + # stdin=None, + # stdout=subprocess.PIPE, + # stderr=subprocess.PIPE) + # proc, stdout, stderr = ret + # if proc.returncode == 0: + # success, err = True, "Correct answer" + # else: + # err = stdout + "\n" + stderr + # os.remove(remove_ref_output) + # else: + # err = "Error:" + # try: + # error_lines = main_err.splitlines() + # for e in error_lines: + # if ':' in e: + # err = err + "\n" + e.split(":", 1)[1] + # else: + # err = err + "\n" + e + # except: + # err = err + "\n" + main_err + # os.remove(remove_user_output) + # else: + # err = "Compilation Error:" + # try: + # error_lines = stdnt_stderr.splitlines() + # for e in error_lines: + # if ':' in e: + # err = err + "\n" + e.split(":", 1)[1] + # else: + # err = err + "\n" + e + # except: + # err = err + "\n" + stdnt_stderr + + # return success, err diff --git a/yaksh/language_registry.py b/yaksh/language_registry.py index 512e2f5..3205279 100644 --- a/yaksh/language_registry.py +++ b/yaksh/language_registry.py @@ -48,7 +48,7 @@ class _LanguageRegistry(object): get_class = getattr(get_module, class_name) return get_class - def register(self, language, class_name): + def register(self, language, class_names): """ Register a new code evaluator class for language""" - self._register[language] = class_name + self._register[language] = class_names diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py index a8bd4cd..6ddfa5a 100644 --- a/yaksh/scilab_code_evaluator.py +++ b/yaksh/scilab_code_evaluator.py @@ -12,40 +12,42 @@ from code_evaluator import CodeEvaluator class ScilabCodeEvaluator(CodeEvaluator): """Tests the Scilab code obtained from Code Server""" - def __init__(self, test_case_data, test, language, user_answer, - ref_code_path=None, in_dir=None): - super(ScilabCodeEvaluator, self).__init__(test_case_data, test, - language, user_answer, - ref_code_path, in_dir) + # def __init__(self, test_case_data, test, language, user_answer, + # ref_code_path=None, in_dir=None): + # super(ScilabCodeEvaluator, self).__init__(test_case_data, test, + # language, user_answer, + # ref_code_path, in_dir) - # Removes all the commands that terminates scilab - self.user_answer, self.terminate_commands = self._remove_scilab_exit(user_answer.lstrip()) - self.test_case_args = self.setup() + # # Removes all the commands that terminates scilab + # self.user_answer, self.terminate_commands = self._remove_scilab_exit(user_answer.lstrip()) + # self.test_case_args = self.setup() def setup(self): super(ScilabCodeEvaluator, self).setup() - - ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path) - self.submit_path = self.create_submit_code_file('function.sci') - - return ref_path, # Return as a tuple + # ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path) + self.submit_code_path = self.create_submit_code_file('function.sci') + # return ref_path, # Return as a tuple def teardown(self): - # Delete the created file. super(ScilabCodeEvaluator, self).teardown() - os.remove(self.submit_path) + # Delete the created file. + os.remove(self.submit_code_path) - def check_code(self, ref_path): - success = False + def check_code(self, user_answer, test_case_data): + ref_code_path = test_case_data[0] + clean_ref_path, clean_test_case_path = self._set_test_code_file_path(ref_code_path) + user_answer, terminate_commands = self._remove_scilab_exit(user_answer.lstrip()) + success = False + self.write_to_submit_code_file(self.submit_code_path, user_answer) # Throw message if there are commmands that terminates scilab add_err="" - if self.terminate_commands: + if terminate_commands: add_err = "Please do not use exit, quit and abort commands in your\ code.\n Otherwise your code will not be evaluated\ correctly.\n" - cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(ref_path) + cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(clean_ref_path) cmd += ' | timeout 8 scilab-cli -nb' ret = self._run_command(cmd, shell=True, @@ -67,6 +69,38 @@ class ScilabCodeEvaluator(CodeEvaluator): return success, err + # def check_code(self, ref_path): + # success = False + + # # Throw message if there are commmands that terminates scilab + # add_err="" + # if self.terminate_commands: + # add_err = "Please do not use exit, quit and abort commands in your\ + # code.\n Otherwise your code will not be evaluated\ + # correctly.\n" + + # cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(ref_path) + # cmd += ' | timeout 8 scilab-cli -nb' + # ret = self._run_command(cmd, + # shell=True, + # stdout=subprocess.PIPE, + # stderr=subprocess.PIPE) + # proc, stdout, stderr = ret + + # # Get only the error. + # stderr = self._get_error(stdout) + # if stderr is None: + # # Clean output + # stdout = self._strip_output(stdout) + # if proc.returncode == 5: + # success, err = True, "Correct answer" + # else: + # err = add_err + stdout + # else: + # err = add_err + stderr + + # return success, err + def _remove_scilab_exit(self, string): """ Removes exit, quit and abort from the scilab code diff --git a/yaksh/settings.py b/yaksh/settings.py index 30fab0a..aaf6226 100644 --- a/yaksh/settings.py +++ b/yaksh/settings.py @@ -20,12 +20,12 @@ SERVER_TIMEOUT = 2 URL_ROOT = '' code_evaluators = { - "python": {"standardtestcase": "python_assertion_evaluator.PythonStandardEvaluator", - "stdoutbasedtestcase": "python_stdout_evaluator.PythonStdoutEvaluator" - }, - "c": "cpp_code_evaluator.CppCodeEvaluator", - "cpp": "cpp_code_evaluator.CppCodeEvaluator", - "java": "java_code_evaluator.JavaCodeEvaluator", - "bash": "bash_code_evaluator.BashCodeEvaluator", - "scilab": "scilab_code_evaluator.ScilabCodeEvaluator", + "python": {"standardtestcase": "yaksh.python_assertion_evaluator.PythonAssertionEvaluator", + "stdoutbasedtestcase": "yaksh.python_stdout_evaluator.PythonStdoutEvaluator" + }, + "c": {"standardtestcase": "yaksh.cpp_code_evaluator.CppCodeEvaluator"}, + "cpp": {"standardtestcase": "yaksh.cpp_code_evaluator.CppCodeEvaluator"}, + "java": {"standardtestcase": "yaksh.java_code_evaluator.JavaCodeEvaluator"}, + "bash": {"standardtestcase": "yaksh.bash_code_evaluator.BashCodeEvaluator"}, + "scilab": {"standardtestcase": "yaksh.scilab_code_evaluator.ScilabCodeEvaluator"}, } |