summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--yaksh/base_evaluator.py11
-rw-r--r--yaksh/bash_code_evaluator.py54
-rw-r--r--yaksh/bash_stdio_evaluator.py45
-rw-r--r--yaksh/code_evaluator.py34
-rw-r--r--yaksh/cpp_code_evaluator.py34
-rw-r--r--yaksh/cpp_stdio_evaluator.py39
-rw-r--r--yaksh/evaluator_tests/old_test_python_evaluation.py549
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py211
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py468
-rw-r--r--yaksh/evaluator_tests/test_code_evaluation.py12
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py371
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py2
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py107
-rw-r--r--yaksh/java_code_evaluator.py46
-rw-r--r--yaksh/java_stdio_evaluator.py41
-rwxr-xr-xyaksh/old_code_server.py221
-rw-r--r--yaksh/python_assertion_evaluator.py17
-rw-r--r--yaksh/python_stdio_evaluator.py9
-rw-r--r--yaksh/scilab_code_evaluator.py44
-rw-r--r--yaksh/settings.py14
-rw-r--r--yaksh/stdio_evaluator.py12
21 files changed, 1132 insertions, 1209 deletions
diff --git a/yaksh/base_evaluator.py b/yaksh/base_evaluator.py
index c8177b7..b290ba4 100644
--- a/yaksh/base_evaluator.py
+++ b/yaksh/base_evaluator.py
@@ -4,7 +4,13 @@ import traceback
import pwd
import os
from os.path import join, isfile
+from os.path import isdir, dirname, abspath, join, isfile, exists
import subprocess
+import stat
+
+
+# Local imports
+from .code_evaluator import MY_DIR, TimeoutException
class BaseEvaluator(object):
"""Base Evaluator class containing generic attributes and callable methods"""
@@ -64,3 +70,8 @@ class BaseEvaluator(object):
test_case_path = join(MY_DIR, test_case_path)
return ref_path, test_case_path
+
+ def _set_file_as_executable(self, fname):
+ os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
+ | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index b5974d2..03ec16a 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -9,26 +9,37 @@ import subprocess
import importlib
# local imports
-from .code_evaluator import CodeEvaluator
+from .base_evaluator import BaseEvaluator
from .file_utils import copy_files, delete_files
-class BashCodeEvaluator(CodeEvaluator):
+class BashCodeEvaluator(BaseEvaluator):
# Private Protocol ##########
- def setup(self):
- super(BashCodeEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('submit.sh')
- self._set_file_as_executable(self.submit_code_path)
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
+
+ # def setup(self):
+ # super(BashCodeEvaluator, self).setup()
+ # self.files = []
+ # self.submit_code_path = self.create_submit_code_file('submit.sh')
+ # self._set_file_as_executable(self.submit_code_path)
def teardown(self):
# Delete the created file.
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(BashCodeEvaluator, self).teardown()
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
+ def check_code(self):
""" Function validates student script using instructor script as
reference. Test cases can optionally be provided. The first argument
ref_path, is the path to instructor script, it is assumed to
@@ -53,18 +64,21 @@ class BashCodeEvaluator(CodeEvaluator):
Returns (False, error_msg, 0.0): If mandatory arguments are not files or if
the required permissions are not given to the file(s).
"""
- ref_code_path = test_case
+ ref_code_path = self.test_case
success = False
test_case_weight = 0.0
+ self.submit_code_path = self.create_submit_code_file('submit.sh')
+ self._set_file_as_executable(self.submit_code_path)
+
get_ref_path, get_test_case_path = ref_code_path.strip().split(',')
get_ref_path = get_ref_path.strip()
get_test_case_path = get_test_case_path.strip()
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(get_ref_path, get_test_case_path)
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
msg = "No file at %s or Incorrect path" % clean_ref_code_path
return False, msg, 0.0
@@ -78,8 +92,8 @@ class BashCodeEvaluator(CodeEvaluator):
msg = "Script %s is not executable" % self.submit_code_path
return False, msg, 0.0
- user_answer = user_answer.replace("\r", "")
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.user_answer = self.user_answer.replace("\r", "")
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
if clean_test_case_path is None or "":
ret = self._run_command(clean_ref_code_path,
@@ -95,7 +109,7 @@ class BashCodeEvaluator(CodeEvaluator):
)
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
- test_case_weight = float(weight) if partial_grading else 0.0
+ test_case_weight = float(self.weight) if self.partial_grading else 0.0
return True, "Correct answer", test_case_weight
else:
err = "Error: expected %s, got %s" % (inst_stderr,
@@ -116,21 +130,21 @@ class BashCodeEvaluator(CodeEvaluator):
loop_count = 0
test_cases = open(clean_test_case_path).readlines()
num_lines = len(test_cases)
- for test_case in test_cases:
+ for tc in test_cases:
loop_count += 1
if valid_answer:
args = [clean_ref_code_path] + \
- [x for x in test_case.split()]
+ [x for x in tc.split()]
ret = self._run_command(args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
proc, inst_stdout, inst_stderr = ret
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
args = [self.submit_code_path] + \
- [x for x in test_case.split()]
+ [x for x in tc.split()]
ret = self._run_command(args,
stdin=None,
stdout=subprocess.PIPE,
@@ -138,7 +152,7 @@ class BashCodeEvaluator(CodeEvaluator):
proc, stdnt_stdout, stdnt_stderr = ret
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
- test_case_weight = float(weight) if partial_grading else 0.0
+ test_case_weight = float(self.weight) if self.partial_grading else 0.0
return True, "Correct answer", test_case_weight
else:
err = ("Error:expected"
diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py
index 1dd9fd5..3344c57 100644
--- a/yaksh/bash_stdio_evaluator.py
+++ b/yaksh/bash_stdio_evaluator.py
@@ -11,43 +11,54 @@ from .file_utils import copy_files, delete_files
class BashStdioEvaluator(StdIOEvaluator):
"""Evaluates Bash StdIO based code"""
-
- def setup(self):
- super(BashStdioEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('Test.sh')
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.expected_input = test_case_data.get('expected_input')
+ self.expected_output = test_case_data.get('expected_output')
+ self.weight = test_case_data.get('weight')
+
+ # def setup(self):
+ # super(BashStdioEvaluator, self).setup()
+ # self.files = []
+ # self.submit_code_path = self.create_submit_code_file('Test.sh')
def teardown(self):
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(BashStdioEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
- if file_paths:
- self.files = copy_files(file_paths)
+ def compile_code(self):
+ self.submit_code_path = self.create_submit_code_file('Test.sh')
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
user_code_directory = os.getcwd() + '/'
- user_answer = user_answer.replace("\r", "")
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.user_answer = self.user_answer.replace("\r", "")
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
- def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weight):
+ def check_code(self):
success = False
test_case_weight = 0.0
- expected_input = str(expected_input).replace('\r', '')
+ self.expected_input = str(self.expected_input).replace('\r', '')
proc = subprocess.Popen("bash ./Test.sh",
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
- success, err = self.evaluate_stdio(user_answer, proc,
- expected_input,
- expected_output
+ success, err = self.evaluate_stdio(self.user_answer, proc,
+ self.expected_input,
+ self.expected_output
)
- test_case_weight = float(weight) if partial_grading and success else 0.0
+ test_case_weight = float(self.weight) if self.partial_grading and success else 0.0
return success, err, test_case_weight
diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py
index e5b8853..f1ac5b7 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/code_evaluator.py
@@ -4,6 +4,7 @@ import sys
import pwd
import os
import stat
+import contextlib
from os.path import isdir, dirname, abspath, join, isfile, exists
import signal
import traceback
@@ -31,6 +32,15 @@ registry = None
class TimeoutException(Exception):
pass
+@contextlib.contextmanager
+def change_dir(path):
+ cur_dir = os.getcwd()
+ os.chdir(path)
+ try:
+ yield
+ finally:
+ os.chdir(cur_dir)
+
def timeout_handler(signum, frame):
"""A handler for the ALARM signal."""
@@ -89,9 +99,10 @@ class CodeEvaluator(object):
A tuple: (success, error message, weight).
"""
- test_case_instances = self.get_evaluator_objects(kwargs)
self.setup()
- success, error, weight = self.safe_evaluate(test_case_instances)
+ test_case_instances = self.get_evaluator_objects(kwargs)
+ with change_dir(self.in_dir):
+ success, error, weight = self.safe_evaluate(test_case_instances)
self.teardown()
result = {'success': success, 'error': error, 'weight': weight}
@@ -102,7 +113,7 @@ class CodeEvaluator(object):
if self.in_dir:
if not os.path.exists(self.in_dir):
os.makedirs(self.in_dir)
- self._change_dir(self.in_dir)
+ # self._change_dir(self.in_dir)
def get_evaluator_objects(self, kwargs):
metadata = kwargs.get('metadata') # metadata contains user_answer, language, partial_grading, file_paths
@@ -140,6 +151,7 @@ class CodeEvaluator(object):
test_case_success = False
test_case_instance.compile_code() #user_answer, file_paths, test_case
test_case_success, err, test_case_weight = test_case_instance.check_code() #**kwargs
+ test_case_instance.teardown()
# self.teardown()
# user_answer,
# file_paths,
@@ -223,7 +235,7 @@ class CodeEvaluator(object):
def teardown(self):
# Cancel the signal
delete_signal_handler()
- self._change_dir(dirname(MY_DIR))
+ # self._change_dir(dirname(MY_DIR))
# def check_code(self):
# raise NotImplementedError("check_code method not implemented")
@@ -246,10 +258,10 @@ class CodeEvaluator(object):
# submit_f.write(user_answer.lstrip())
# submit_f.close()
- def _set_file_as_executable(self, fname):
- os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
- | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
- | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
+ # def _set_file_as_executable(self, fname):
+ # os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ # | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
+ # | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
# def _set_test_code_file_path(self, ref_path=None, test_case_path=None):
# if ref_path and not ref_path.startswith('/'):
@@ -275,9 +287,9 @@ class CodeEvaluator(object):
# raise
# return proc, stdout.decode('utf-8'), stderr.decode('utf-8')
- def _change_dir(self, in_dir):
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
+ # def _change_dir(self, in_dir):
+ # if in_dir is not None and isdir(in_dir):
+ # os.chdir(in_dir)
# def _remove_null_substitute_char(self, string):
# """Returns a string without any null and substitute characters"""
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index 806fe67..c6f5a7e 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -39,17 +39,15 @@ class CppCodeEvaluator(BaseEvaluator):
# self.user_output_path = ""
# self.ref_output_path = ""
- # def teardown(self):
- # # Delete the created file.
- # os.remove(self.submit_code_path)
- # if os.path.exists(self.ref_output_path):
- # os.remove(self.ref_output_path)
- # if os.path.exists(self.user_output_path):
- # os.remove(self.user_output_path)
- # if self.files:
- # delete_files(self.files)
- # super(CppCodeEvaluator, self).teardown()
-
+ def teardown(self):
+ # Delete the created file.
+ os.remove(self.submit_code_path)
+ if os.path.exists(self.ref_output_path):
+ os.remove(self.ref_output_path)
+ if os.path.exists(self.user_output_path):
+ os.remove(self.user_output_path)
+ if self.files:
+ delete_files(self.files)
def set_file_paths(self):
user_output_path = os.getcwd() + '/output_file'
@@ -66,15 +64,15 @@ class CppCodeEvaluator(BaseEvaluator):
ref_output_path)
return compile_command, compile_main
- def compile_code(self, user_answer, file_paths, test_case, weight):
+ def compile_code(self):
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
- ref_code_path = test_case
+ ref_code_path = self.test_case
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
msg = "No file at %s or Incorrect path" % clean_ref_code_path
return False, msg
@@ -82,7 +80,7 @@ class CppCodeEvaluator(BaseEvaluator):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
self.user_output_path, self.ref_output_path = self.set_file_paths()
self.compile_command, self.compile_main = self.get_commands(
clean_ref_code_path,
@@ -105,7 +103,7 @@ class CppCodeEvaluator(BaseEvaluator):
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
+ def check_code(self):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -145,7 +143,7 @@ class CppCodeEvaluator(BaseEvaluator):
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
- test_case_weight = float(weight) if partial_grading else 0.0
+ test_case_weight = float(self.weight) if self.partial_grading else 0.0
else:
err = "{0} \n {1}".format(stdout, stderr)
else:
diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py
index 00fad92..a57afbe 100644
--- a/yaksh/cpp_stdio_evaluator.py
+++ b/yaksh/cpp_stdio_evaluator.py
@@ -11,17 +11,29 @@ from .file_utils import copy_files, delete_files
class CppStdioEvaluator(StdIOEvaluator):
"""Evaluates C StdIO based code"""
-
- def setup(self):
- super(CppStdioEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('main.c')
+ self.submit_code_path = self.create_submit_code_file('submit.c')
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.expected_input = test_case_data.get('expected_input')
+ self.expected_output = test_case_data.get('expected_output')
+ self.weight = test_case_data.get('weight')
+
+ # def setup(self):
+ # super(CppStdioEvaluator, self).setup()
+ # self.files = []
+ # self.submit_code_path = self.create_submit_code_file('main.c')
def teardown(self):
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(CppStdioEvaluator, self).teardown()
def set_file_paths(self):
user_output_path = os.getcwd() + '/output_file'
@@ -35,13 +47,13 @@ class CppStdioEvaluator(StdIOEvaluator):
ref_output_path)
return compile_command, compile_main
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
- if file_paths:
+ def compile_code(self):
+ if self.file_paths:
self.files = copy_files(file_paths)
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
self.user_output_path, self.ref_output_path = self.set_file_paths()
self.compile_command, self.compile_main = self.get_commands(
self.user_output_path,
@@ -61,8 +73,7 @@ class CppStdioEvaluator(StdIOEvaluator):
)
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weight):
+ def check_code(self):
success = False
test_case_weight = 0.0
@@ -78,9 +89,9 @@ class CppStdioEvaluator(StdIOEvaluator):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
- success, err = self.evaluate_stdio(user_answer, proc,
- expected_input,
- expected_output
+ success, err = self.evaluate_stdio(self.user_answer, proc,
+ self.expected_input,
+ self.expected_output
)
os.remove(self.ref_output_path)
else:
@@ -106,5 +117,5 @@ class CppStdioEvaluator(StdIOEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- test_case_weight = float(weight) if partial_grading and success else 0.0
+ test_case_weight = float(self.weight) if self.partial_grading and success else 0.0
return success, err, test_case_weight
diff --git a/yaksh/evaluator_tests/old_test_python_evaluation.py b/yaksh/evaluator_tests/old_test_python_evaluation.py
deleted file mode 100644
index 9796fa2..0000000
--- a/yaksh/evaluator_tests/old_test_python_evaluation.py
+++ /dev/null
@@ -1,549 +0,0 @@
-from __future__ import unicode_literals
-import unittest
-import os
-import tempfile
-import shutil
-from textwrap import dedent
-
-# Local import
-from yaksh.python_assertion_evaluator import PythonAssertionEvaluator
-from yaksh.python_stdio_evaluator import PythonStdioEvaluator
-from yaksh.settings import SERVER_TIMEOUT
-
-
-class PythonAssertionEvaluationTestCases(unittest.TestCase):
- def setUp(self):
- with open('/tmp/test.txt', 'wb') as f:
- f.write('2'.encode('ascii'))
- tmp_in_dir_path = tempfile.mkdtemp()
- self.in_dir = tmp_in_dir_path
- self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weight': 0.0},
- {"test_case": 'assert(add(-1,2)==1)', 'weight': 0.0},
- {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0},
- ]
- self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in"
- " your code.").format(SERVER_TIMEOUT)
- self.file_paths = None
-
- def tearDown(self):
- os.remove('/tmp/test.txt')
- shutil.rmtree(self.in_dir)
-
- def test_correct_answer(self):
- # Given
- user_answer = "def add(a,b):\n\treturn a + b"
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertTrue(result.get('success'))
- self.assertIn("Correct answer", result.get('error'))
-
- def test_incorrect_answer(self):
- # Given
- user_answer = "def add(a,b):\n\treturn a - b"
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertFalse(result.get('success'))
- self.assertIn('AssertionError in: assert(add(1,2)==3)',
- result.get('error')
- )
- self.assertIn('AssertionError in: assert(add(-1,2)==1)',
- result.get('error')
- )
- self.assertIn('AssertionError in: assert(add(-1,-2)==-3)',
- result.get('error')
- )
-
- def test_partial_incorrect_answer(self):
- # Given
- user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)"
- test_case_data = [{"test_case": 'assert(add(-1,2)==1)', 'weight': 1.0},
- {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 1.0},
- {"test_case": 'assert(add(1,2)==3)', 'weight': 2.0}
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': True
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertFalse(result.get('success'))
- self.assertEqual(result.get('weight'), 2.0)
- self.assertIn('AssertionError in: assert(add(-1,2)==1)',
- result.get('error')
- )
- self.assertIn('AssertionError in: assert(add(-1,-2)==-3)',
- result.get('error')
- )
-
- def test_infinite_loop(self):
- # Given
- user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertFalse(result.get('success'))
- self.assertEqual(result.get('error'), self.timeout_msg)
-
- def test_syntax_error(self):
- # Given
- user_answer = dedent("""
- def add(a, b);
- return a + b
- """)
- syntax_error_msg = ["Traceback",
- "call",
- "File",
- "line",
- "<string>",
- "SyntaxError",
- "invalid syntax"
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
- err = result.get("error").splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- self.assertEqual(5, len(err))
- for msg in syntax_error_msg:
- self.assertIn(msg, result.get("error"))
-
- def test_indent_error(self):
- # Given
- user_answer = dedent("""
- def add(a, b):
- return a + b
- """)
- indent_error_msg = ["Traceback", "call",
- "File",
- "line",
- "<string>",
- "IndentationError",
- "indented block"
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
-
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
- err = result.get("error").splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- self.assertEqual(5, len(err))
- for msg in indent_error_msg:
- self.assertIn(msg, result.get("error"))
-
- def test_name_error(self):
- # Given
- user_answer = ""
- name_error_msg = ["Traceback",
- "call",
- "NameError",
- "name",
- "defined"
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
- err = result.get("error").splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- self.assertEqual(3, len(err))
- for msg in name_error_msg:
- self.assertIn(msg, result.get("error"))
-
- def test_recursion_error(self):
- # Given
- user_answer = dedent("""
- def add(a, b):
- return add(3, 3)
- """)
- recursion_error_msg = ["Traceback",
- "call",
- "maximum recursion depth exceeded"
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
- err = result.get("error").splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- for msg in recursion_error_msg:
- self.assertIn(msg, result.get("error"))
-
- def test_type_error(self):
- # Given
- user_answer = dedent("""
- def add(a):
- return a + b
- """)
- type_error_msg = ["Traceback",
- "call",
- "TypeError",
- "argument"
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
- err = result.get("error").splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- self.assertEqual(3, len(err))
- for msg in type_error_msg:
- self.assertIn(msg, result.get("error"))
-
- def test_value_error(self):
- # Given
- user_answer = dedent("""
- def add(a, b):
- c = 'a'
- return int(a) + int(b) + int(c)
- """)
- value_error_msg = ["Traceback",
- "call",
- "ValueError",
- "invalid literal",
- "base"
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
- err = result.get("error").splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- self.assertEqual(4, len(err))
- for msg in value_error_msg:
- self.assertIn(msg, result.get("error"))
-
- def test_file_based_assert(self):
- # Given
- self.test_case_data = [{"test_case": "assert(ans()=='2')", "weight": 0.0}]
- self.file_paths = [('/tmp/test.txt', False)]
- user_answer = dedent("""
- def ans():
- with open("test.txt") as f:
- return f.read()[0]
- """)
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertIn("Correct answer", result.get('error'))
- self.assertTrue(result.get('success'))
-
- def test_single_testcase_error(self):
- # Given
- """ Tests the user answer with just an incorrect test case """
-
- user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
- test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False',
- "weight": 0.0
- }
- ]
- syntax_error_msg = ["Traceback",
- "call",
- "File",
- "line",
- "<string>",
- "SyntaxError",
- "invalid syntax"
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
- err = result.get("error").splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- self.assertEqual(5, len(err))
- for msg in syntax_error_msg:
- self.assertIn(msg, result.get("error"))
-
-
- def test_multiple_testcase_error(self):
- """ Tests the user answer with an correct test case
- first and then with an incorrect test case """
- # Given
- user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
- test_case_data = [{"test_case": 'assert(palindrome("abba")==True)',
- "weight": 0.0
- },
- {"test_case": 's="abbb"\nassert palindrome(S)==False',
- "weight": 0.0
- }
- ]
- name_error_msg = ["Traceback",
- "call",
- "File",
- "line",
- "<string>",
- "NameError",
- "name 'S' is not defined"
- ]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
- err = result.get("error").splitlines()
-
- # Then
- self.assertFalse(result.get("success"))
- self.assertEqual(3, len(err))
- for msg in name_error_msg:
- self.assertIn(msg, result.get("error"))
-
-
-class PythonStdIOEvaluationTestCases(unittest.TestCase):
- def setUp(self):
- with open('/tmp/test.txt', 'wb') as f:
- f.write('2'.encode('ascii'))
- self.file_paths = None
-
- def test_correct_answer_integer(self):
- # Given
- self.test_case_data = [{"expected_input": "1\n2",
- "expected_output": "3",
- "weight": 0.0
- }]
- user_answer = dedent("""
- a = int(input())
- b = int(input())
- print(a+b)
- """
- )
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertTrue(result.get('success'))
- self.assertIn("Correct answer", result.get('error'))
-
- def test_correct_answer_list(self):
- # Given
- self.test_case_data = [{"expected_input": "1,2,3\n5,6,7",
- "expected_output": "[1, 2, 3, 5, 6, 7]",
- "weight": 0.0
- }]
- user_answer = dedent("""
- from six.moves import input
- input_a = input()
- input_b = input()
- a = [int(i) for i in input_a.split(',')]
- b = [int(i) for i in input_b.split(',')]
- print(a+b)
- """
- )
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertTrue(result.get('success'))
- self.assertIn("Correct answer", result.get('error'))
-
- def test_correct_answer_string(self):
- # Given
- self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"),
- "expected_output": "2",
- "weight": 0.0
- }]
- user_answer = dedent("""
- from six.moves import input
- a = str(input())
- b = str(input())
- print(a.count(b))
- """
- )
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertTrue(result.get('success'))
- self.assertIn("Correct answer", result.get('error'))
-
- def test_incorrect_answer_integer(self):
- # Given
- self.test_case_data = [{"expected_input": "1\n2",
- "expected_output": "3",
- "weight": 0.0
- }]
- user_answer = dedent("""
- a = int(input())
- b = int(input())
- print(a-b)
- """
- )
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertFalse(result.get('success'))
- self.assertIn("Incorrect answer", result.get('error'))
-
- def test_file_based_answer(self):
- # Given
- self.test_case_data = [{"expected_input": "",
- "expected_output": "2",
- "weight": 0.0
- }]
- self.file_paths = [('/tmp/test.txt', False)]
-
- user_answer = dedent("""
- with open("test.txt") as f:
- a = f.read()
- print(a[0])
- """
- )
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertEqual(result.get('error'), "Correct answer\n")
- self.assertTrue(result.get('success'))
-
- def test_infinite_loop(self):
- # Given
- test_case_data = [{"expected_input": "1\n2",
- "expected_output": "3",
- "weight": 0.0
- }]
- timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in"
- " your code.").format(SERVER_TIMEOUT)
- user_answer = "while True:\n\tpass"
- kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data,
- 'partial_grading': False
- }
-
- # When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
-
- # Then
- self.assertEqual(result.get('error'), timeout_msg)
- self.assertFalse(result.get('success'))
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index 99e5122..8888ee6 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -3,6 +3,7 @@ import unittest
import os
import shutil
import tempfile
+from yaksh.code_evaluator import CodeEvaluator
from yaksh.bash_code_evaluator import BashCodeEvaluator
from yaksh.bash_stdio_evaluator import BashStdioEvaluator
from yaksh.settings import SERVER_TIMEOUT
@@ -15,6 +16,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
self.test_case_data = [
{"test_case": "bash_files/sample.sh,bash_files/sample.args",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}
]
@@ -32,39 +34,78 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]"
" && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
)
- get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = BashCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertTrue(result.get('success'))
self.assertEqual(result.get('error'), "Correct answer\n")
def test_error(self):
user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] "
"&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))")
- get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = BashCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Error" in result.get("error"))
def test_infinite_loop(self):
user_answer = ("#!/bin/bash\nwhile [ 1 ] ;"
" do echo "" > /dev/null ; done")
- get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = BashCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
@@ -72,17 +113,31 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
self.file_paths = [('/tmp/test.txt', False)]
self.test_case_data = [
{"test_case": "bash_files/sample1.sh,bash_files/sample1.args",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}
]
user_answer = ("#!/bin/bash\ncat $1")
- get_class = BashCodeEvaluator()
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = BashCodeEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertTrue(result.get("success"))
self.assertEqual(result.get("error"), "Correct answer\n")
@@ -92,6 +147,8 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in your"
" code.").format(SERVER_TIMEOUT)
+ self.file_paths = None
+
def test_correct_answer(self):
user_answer = dedent(""" #!/bin/bash
@@ -102,14 +159,28 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
+ 'test_case_type': 'stdiobasedtestcase',
'weight': 0.0
}]
- get_class = BashStdioEvaluator()
- kwargs = {"user_answer": user_answer,
- "partial_grading": True,
- "test_case_data": test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = BashStdioEvaluator()
+ # kwargs = {"user_answer": user_answer,
+ # "partial_grading": True,
+ # "test_case_data": test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
+ 'test_case_data': test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -123,15 +194,29 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
"""
)
test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n',
- 'expected_input': '1,2,3\n4,5,6\n7,8,9',
- 'weight': 0.0
+ 'expected_input': '1,2,3\n4,5,6\n7,8,9',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
}]
- get_class = BashStdioEvaluator()
- kwargs = {"user_answer": user_answer,
- "partial_grading": True,
- "test_case_data": test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = BashStdioEvaluator()
+ # kwargs = {"user_answer": user_answer,
+ # "partial_grading": True,
+ # "test_case_data": test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
+ 'test_case_data': test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -144,14 +229,27 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
+ 'test_case_type': 'stdiobasedtestcase',
'weight': 0.0
}]
- get_class = BashStdioEvaluator()
- kwargs = {"user_answer": user_answer,
- "partial_grading": True,
- "test_case_data": test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = BashStdioEvaluator()
+ # kwargs = {"user_answer": user_answer,
+ # "partial_grading": True,
+ # "test_case_data": test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
+ 'test_case_data': test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
self.assertIn("Incorrect", result.get('error'))
self.assertFalse(result.get('success'))
@@ -164,14 +262,27 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '10',
'expected_input': '',
+ 'test_case_type': 'stdiobasedtestcase',
'weight': 0.0
}]
- get_class = BashStdioEvaluator()
- kwargs = {"user_answer": user_answer,
- "partial_grading": True,
- "test_case_data": test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = BashStdioEvaluator()
+ # kwargs = {"user_answer": user_answer,
+ # "partial_grading": True,
+ # "test_case_data": test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
+ 'test_case_data': test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index d5193d3..9080e88 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -3,10 +3,14 @@ import unittest
import os
import shutil
import tempfile
+from textwrap import dedent
+
+# Local import
+from yaksh.code_evaluator import CodeEvaluator
from yaksh.cpp_code_evaluator import CppCodeEvaluator
from yaksh.cpp_stdio_evaluator import CppStdioEvaluator
from yaksh.settings import SERVER_TIMEOUT
-from textwrap import dedent
+
class CAssertionEvaluationTestCases(unittest.TestCase):
@@ -15,6 +19,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [{"test_case": "c_cpp_files/main.cpp",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}]
self.in_dir = tmp_in_dir_path
@@ -29,25 +34,49 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
def test_correct_answer(self):
user_answer = "int add(int a, int b)\n{return a+b;}"
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': False,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertTrue(result.get('success'))
self.assertEqual(result.get('error'), "Correct answer\n")
def test_incorrect_answer(self):
user_answer = "int add(int a, int b)\n{return a-b;}"
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
+ 'language': 'cpp'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Incorrect:", result.get('error'))
@@ -55,31 +84,57 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
def test_compilation_error(self):
user_answer = "int add(int a, int b)\n{return a+b}"
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
+ 'language': 'cpp'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
def test_infinite_loop(self):
user_answer = "int add(int a, int b)\n{while(1>0){}}"
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ # get_class = CppCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
+ 'language': 'cpp'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_file_based_assert(self):
self.file_paths = [('/tmp/test.txt', False)]
self.test_case_data = [{"test_case": "c_cpp_files/file_data.c",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}]
user_answer = dedent("""
@@ -94,13 +149,26 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
return buff[0];
}
""")
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ # get_class = CppCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
+ 'language': 'cpp'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertTrue(result.get('success'))
self.assertEqual(result.get('error'), "Correct answer\n")
@@ -108,12 +176,14 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
def setUp(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
" your code.").format(SERVER_TIMEOUT)
+ self.file_paths = None
def test_correct_answer(self):
user_answer = dedent("""
@@ -123,19 +193,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
scanf("%d%d",&a,&b);
printf("%d",a+b);
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<stdio.h>
@@ -146,19 +230,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
for(i=0;i<3;i++){
printf("%d",a[i]);}
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_string_input(self):
self.test_case_data = [{'expected_output': 'abc',
'expected_input': 'abc',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<stdio.h>
@@ -167,12 +265,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
scanf("%s",a);
printf("%s",a);
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -183,12 +294,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=10;
printf("%d",a);
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Incorrect", result.get('error'))
@@ -201,12 +325,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=10;
printf("%d",a)
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
@@ -217,19 +354,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
while(0==0){
printf("abc");}
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<stdio.h>
@@ -237,12 +388,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=5,b=6;
printf("%d",a+b);
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -255,19 +419,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
cin>>a>>b;
cout<<a+b;
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_array_input(self):
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<iostream>
@@ -279,19 +457,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
for(i=0;i<3;i++){
cout<<a[i];}
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_string_input(self):
self.test_case_data = [{'expected_output': 'abc',
'expected_input': 'abc',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<iostream>
@@ -301,12 +493,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
cin>>a;
cout<<a;
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -318,12 +523,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=10;
cout<<a;
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Incorrect", result.get('error'))
@@ -337,12 +555,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=10;
cout<<a
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
@@ -354,19 +585,33 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
while(0==0){
cout<<"abc";}
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_cpp_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<iostream>
@@ -375,12 +620,25 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=5,b=6;
cout<<a+b;
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ # get_class = CppStdioEvaluator()
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': False,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
diff --git a/yaksh/evaluator_tests/test_code_evaluation.py b/yaksh/evaluator_tests/test_code_evaluation.py
index 88e0253..f664200 100644
--- a/yaksh/evaluator_tests/test_code_evaluation.py
+++ b/yaksh/evaluator_tests/test_code_evaluation.py
@@ -13,12 +13,12 @@ class RegistryTestCase(unittest.TestCase):
assertion_evaluator_path = ("yaksh.python_assertion_evaluator"
".PythonAssertionEvaluator"
)
- stdout_evaluator_path = ("yaksh.python_stdout_evaluator."
- "PythonStdoutEvaluator"
+ stdio_evaluator_path = ("yaksh.python_stdio_evaluator."
+ "PythonStdioEvaluator"
)
code_evaluators['python'] = \
{"standardtestcase": assertion_evaluator_path,
- "stdiobasedtestcase": stdout_evaluator_path
+ "stdiobasedtestcase": stdio_evaluator_path
}
def test_set_register(self):
@@ -28,15 +28,15 @@ class RegistryTestCase(unittest.TestCase):
assertion_evaluator_path = ("yaksh.python_assertion_evaluator"
".PythonAssertionEvaluator"
)
- stdout_evaluator_path = ("yaksh.python_stdout_evaluator."
- "PythonStdoutEvaluator"
+ stdio_evaluator_path = ("yaksh.python_stdio_evaluator."
+ "PythonStdioEvaluator"
)
class_name = getattr(python_assertion_evaluator,
'PythonAssertionEvaluator'
)
self.registry_object.register("python",
{"standardtestcase": assertion_evaluator_path,
- "stdiobasedtestcase": stdout_evaluator_path
+ "stdiobasedtestcase": stdio_evaluator_path
}
)
self.assertEqual(evaluator_class, class_name)
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index f7ecd97..a66b6d6 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -3,7 +3,8 @@ import unittest
import os
import shutil
import tempfile
-from yaksh import code_evaluator as evaluator
+from yaksh import code_evaluator
+from yaksh.code_evaluator import CodeEvaluator
from yaksh.java_code_evaluator import JavaCodeEvaluator
from yaksh.java_stdio_evaluator import JavaStdioEvaluator
from yaksh.settings import SERVER_TIMEOUT
@@ -17,41 +18,70 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [
{"test_case": "java_files/main_square.java",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}
]
self.in_dir = tmp_in_dir_path
- evaluator.SERVER_TIMEOUT = 9
+ code_evaluator.SERVER_TIMEOUT = 9
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
- " your code.").format(evaluator.SERVER_TIMEOUT)
+ " your code.").format(code_evaluator.SERVER_TIMEOUT)
self.file_paths = None
def tearDown(self):
+ code_evaluator.SERVER_TIMEOUT = 4
os.remove('/tmp/test.txt')
shutil.rmtree(self.in_dir)
def test_correct_answer(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}"
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+
+ # get_class = JavaCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}"
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = JavaCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get('success'))
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
@@ -60,25 +90,52 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
def test_error(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a"
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = JavaCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Error" in result.get("error"))
def test_infinite_loop(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}"
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = JavaCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
@@ -86,6 +143,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
self.file_paths = [("/tmp/test.txt", False)]
self.test_case_data = [
{"test_case": "java_files/read_file.java",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}
]
@@ -107,34 +165,48 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
br.close();
}}}
""")
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = JavaCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertTrue(result.get("success"))
self.assertEqual(result.get("error"), "Correct answer\n")
class JavaStdioEvaluationTestCases(unittest.TestCase):
-
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.in_dir = tmp_in_dir_path
self.test_case_data = [{'expected_output': '11',
- 'expected_input': '5\n6',
- 'weight': 0.0
- }]
- evaluator.SERVER_TIMEOUT = 4
+ 'expected_input': '5\n6',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
+ }]
+ code_evaluator.SERVER_TIMEOUT = 4
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
- " your code.").format(evaluator.SERVER_TIMEOUT)
+ " your code.").format(code_evaluator.SERVER_TIMEOUT)
+ self.file_paths = None
def tearDown(self):
- evaluator.SERVER_TIMEOUT = 4
+ code_evaluator.SERVER_TIMEOUT = 4
os.remove('/tmp/test.txt')
shutil.rmtree(self.in_dir)
@@ -148,19 +220,32 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
int b = s.nextInt();
System.out.print(a+b);
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = JavaStdioEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
-
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
+ 'test_case_type': 'stdiobasedtestcase',
'weight': 0.0
}]
user_answer = dedent("""
@@ -173,17 +258,29 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
a[i] = s.nextInt();
System.out.print(a[i]);}
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = JavaStdioEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
-
user_answer = dedent("""
import java.util.Scanner;
class Test
@@ -193,30 +290,55 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
int b = s.nextInt();
System.out.print(a);
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = JavaStdioEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Incorrect", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_error(self):
-
user_answer = dedent("""
class Test
{
System.out.print("a");
}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = JavaStdioEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
@@ -229,19 +351,33 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
{
System.out.print("a");}
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = JavaStdioEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
- 'expected_input': '',
- 'weight': 0.0
+ 'expected_input': '',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
}]
user_answer = dedent("""
class Test
@@ -250,19 +386,33 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
int b = 6;
System.out.print(a+b);
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = JavaStdioEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_string_input(self):
self.test_case_data = [{'expected_output': 'HelloWorld',
- 'expected_input': 'Hello\nWorld',
- 'weight': 0.0
+ 'expected_input': 'Hello\nWorld',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
}]
user_answer = dedent("""
import java.util.Scanner;
@@ -273,20 +423,34 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
String b = s.nextLine();
System.out.print(a+b);
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ # get_class = JavaStdioEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_file_based_stdout(self):
self.file_paths = [("/tmp/test.txt", False)]
self.test_case_data = [{'expected_output': '2',
- 'expected_input': '',
- 'weight': 0.0
+ 'expected_input': '',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
}]
user_answer = dedent("""
import java.io.BufferedReader;
@@ -306,13 +470,26 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
br.close();
}}}
""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = JavaStdioEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertTrue(result.get("success"))
self.assertEqual(result.get("error"), "Correct answer\n")
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 4bf0032..fb762f9 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -25,8 +25,6 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"You probably have an infinite loop in"
" your code.").format(SERVER_TIMEOUT)
self.file_paths = None
- self.language = 'python'
- self.test_case_type = 'standardtestcase'
def tearDown(self):
os.remove('/tmp/test.txt')
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index c30f652..de7368f 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -4,7 +4,8 @@ import os
import shutil
import tempfile
-from yaksh import code_evaluator as evaluator
+from yaksh import code_evaluator
+from yaksh.code_evaluator import CodeEvaluator
from yaksh.scilab_code_evaluator import ScilabCodeEvaluator
from yaksh.settings import SERVER_TIMEOUT
@@ -12,40 +13,69 @@ class ScilabEvaluationTestCases(unittest.TestCase):
def setUp(self):
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [{"test_case": "scilab_files/test_add.sce",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}]
self.in_dir = tmp_in_dir_path
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop"
" in your code.").format(SERVER_TIMEOUT)
+ code_evaluator.SERVER_TIMEOUT = 9
self.file_paths = None
def tearDown(self):
+ code_evaluator.SERVER_TIMEOUT = 4
shutil.rmtree(self.in_dir)
def test_correct_answer(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a+b;\nendfunction")
- get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = ScilabCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'scilab'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_error(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a+b;\ndis(\tendfunction")
- get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = ScilabCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'scilab'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue('error' in result.get("error"))
@@ -53,28 +83,55 @@ class ScilabEvaluationTestCases(unittest.TestCase):
def test_incorrect_answer(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a-b;\nendfunction")
- get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = ScilabCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'scilab'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Message", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_infinite_loop(self):
+ code_evaluator.SERVER_TIMEOUT = 4
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a;\nwhile(1==1)\nend\nendfunction")
- get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ # get_class = ScilabCodeEvaluator(self.in_dir)
+ # kwargs = {'user_answer': user_answer,
+ # 'partial_grading': True,
+ # 'test_case_data': self.test_case_data,
+ # 'file_paths': self.file_paths
+ # }
+ # result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'scilab'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ evaluator = CodeEvaluator(self.in_dir)
+ result = evaluator.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index d87e6e3..f2ca53b 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -8,21 +8,38 @@ import subprocess
import importlib
# Local imports
-from .code_evaluator import CodeEvaluator
+from .base_evaluator import BaseEvaluator
from .file_utils import copy_files, delete_files
-class JavaCodeEvaluator(CodeEvaluator):
+class JavaCodeEvaluator(BaseEvaluator):
"""Tests the Java code obtained from Code Server"""
- def setup(self):
- super(JavaCodeEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('Test.java')
+ # self.submit_code_path = self.create_submit_code_file('Test.java')
self.compiled_user_answer = None
self.compiled_test_code = None
self.user_output_path = ""
self.ref_output_path = ""
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
+
+ # def setup(self):
+ # super(JavaCodeEvaluator, self).setup()
+ # self.files = []
+ # self.submit_code_path = self.create_submit_code_file('Test.java')
+ # self.compiled_user_answer = None
+ # self.compiled_test_code = None
+ # self.user_output_path = ""
+ # self.ref_output_path = ""
+
def teardown(self):
# Delete the created file.
os.remove(self.submit_code_path)
@@ -32,8 +49,6 @@ class JavaCodeEvaluator(CodeEvaluator):
os.remove(self.ref_output_path)
if self.files:
delete_files(self.files)
- super(JavaCodeEvaluator, self).teardown()
-
def get_commands(self, clean_ref_code_path, user_code_directory):
compile_command = 'javac {0}'.format(self.submit_code_path),
@@ -47,15 +62,16 @@ class JavaCodeEvaluator(CodeEvaluator):
output_path = "{0}{1}.class".format(directory, file_name)
return output_path
- def compile_code(self, user_answer, file_paths, test_case, weight):
+ def compile_code(self): # , user_answer, file_paths, test_case, weight
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
- ref_code_path = test_case
+ self.submit_code_path = self.create_submit_code_file('Test.java')
+ ref_code_path = self.test_case
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
msg = "No file at %s or Incorrect path" % clean_ref_code_path
return False, msg
@@ -65,7 +81,7 @@ class JavaCodeEvaluator(CodeEvaluator):
user_code_directory = os.getcwd() + '/'
self.write_to_submit_code_file(self.submit_code_path,
- user_answer
+ self.user_answer
)
ref_file_name = (clean_ref_code_path.split('/')[-1]).split('.')[0]
self.user_output_path = self.set_file_paths(user_code_directory,
@@ -82,6 +98,7 @@ class JavaCodeEvaluator(CodeEvaluator):
user_code_directory,
ref_file_name
)
+
self.compiled_user_answer = self._run_command(compile_command,
shell=True,
stdout=subprocess.PIPE,
@@ -96,7 +113,7 @@ class JavaCodeEvaluator(CodeEvaluator):
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
+ def check_code(self): # user_answer, file_paths, partial_grading, test_case, weight
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -136,7 +153,7 @@ class JavaCodeEvaluator(CodeEvaluator):
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
- test_case_weight = float(weight) if partial_grading else 0.0
+ test_case_weight = float(seelf.weight) if self.partial_grading else 0.0
else:
err = stdout + "\n" + stderr
else:
@@ -161,4 +178,5 @@ class JavaCodeEvaluator(CodeEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
+
return success, err, test_case_weight
diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py
index 88d4c88..78bc20e 100644
--- a/yaksh/java_stdio_evaluator.py
+++ b/yaksh/java_stdio_evaluator.py
@@ -11,17 +11,28 @@ from .file_utils import copy_files, delete_files
class JavaStdioEvaluator(StdIOEvaluator):
"""Evaluates Java StdIO based code"""
-
- def setup(self):
- super(JavaStdioEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('Test.java')
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.expected_input = test_case_data.get('expected_input')
+ self.expected_output = test_case_data.get('expected_output')
+ self.weight = test_case_data.get('weight')
+
+ # def setup(self):
+ # super(JavaStdioEvaluator, self).setup()
+ # self.files = []
+ # self.submit_code_path = self.create_submit_code_file('Test.java')
def teardown(self):
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(JavaStdioEvaluator, self).teardown()
def set_file_paths(self, directory, file_name):
output_path = "{0}{1}.class".format(directory, file_name)
@@ -31,14 +42,15 @@ class JavaStdioEvaluator(StdIOEvaluator):
compile_command = 'javac {0}'.format(self.submit_code_path)
return compile_command
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
+ def compile_code(self):
+ self.submit_code_path = self.create_submit_code_file('Test.java')
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
user_code_directory = os.getcwd() + '/'
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
self.user_output_path = self.set_file_paths(user_code_directory,
'Test'
)
@@ -50,8 +62,7 @@ class JavaStdioEvaluator(StdIOEvaluator):
)
return self.compiled_user_answer
- def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weight):
+ def check_code(self):
success = False
test_case_weight = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
@@ -63,9 +74,9 @@ class JavaStdioEvaluator(StdIOEvaluator):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
- success, err = self.evaluate_stdio(user_answer, proc,
- expected_input,
- expected_output
+ success, err = self.evaluate_stdio(self.user_answer, proc,
+ self.expected_input,
+ self.expected_output
)
os.remove(self.user_output_path)
else:
@@ -79,5 +90,5 @@ class JavaStdioEvaluator(StdIOEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- test_case_weight = float(weight) if partial_grading and success else 0.0
+ test_case_weight = float(self.weight) if self.partial_grading and success else 0.0
return success, err, test_case_weight
diff --git a/yaksh/old_code_server.py b/yaksh/old_code_server.py
deleted file mode 100755
index b3c9c30..0000000
--- a/yaksh/old_code_server.py
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/env python
-
-"""This server runs an HTTP server (using tornado) and several code servers
-using XMLRPC that can be submitted code
-and tests and returns the output. It *should* be run as root and will run as
-the user 'nobody' so as to minimize any damange by errant code. This can be
-configured by editing settings.py to run as many servers as desired. One can
-also specify the ports on the command line. Here are examples::
-
- $ sudo ./code_server.py
- # Runs servers based on settings.py:SERVER_PORTS one server per port given.
-
-or::
-
- $ sudo ./code_server.py 8001 8002 8003 8004 8005
- # Runs 5 servers on ports specified.
-
-All these servers should be running as nobody. This will also start a server
-pool that defaults to port 50000 and is configurable in
-settings.py:SERVER_POOL_PORT. This port exposes a `get_server_port` function
-that returns an available server.
-
-"""
-
-# Standard library imports
-from __future__ import unicode_literals
-import json
-from multiprocessing import Process, Queue
-import os
-from os.path import isdir, dirname, abspath, join, isfile
-import pwd
-import re
-import signal
-import stat
-import subprocess
-import sys
-
-try:
- from SimpleXMLRPCServer import SimpleXMLRPCServer
-except ImportError:
- # The above import will not work on Python-3.x.
- from xmlrpc.server import SimpleXMLRPCServer
-
-try:
- from urllib import unquote
-except ImportError:
- # The above import will not work on Python-3.x.
- from urllib.parse import unquote
-
-# Library imports
-from tornado.ioloop import IOLoop
-from tornado.web import Application, RequestHandler
-
-# Local imports
-from .settings import SERVER_PORTS, SERVER_POOL_PORT
-from .language_registry import create_evaluator_instance, unpack_json
-
-
-MY_DIR = abspath(dirname(__file__))
-
-
-# Private Protocol ##########
-def run_as_nobody():
- """Runs the current process as nobody."""
- # Set the effective uid and to that of nobody.
- nobody = pwd.getpwnam('nobody')
- os.setegid(nobody.pw_gid)
- os.seteuid(nobody.pw_uid)
-
-
-###############################################################################
-# `CodeServer` class.
-###############################################################################
-class CodeServer(object):
- """A code server that executes user submitted test code, tests it and
- reports if the code was correct or not.
- """
- def __init__(self, port, queue):
- self.port = port
- self.queue = queue
-
- # Public Protocol ##########
- def check_code(self, language, test_case_type, json_data, in_dir=None):
- """Calls relevant EvaluateCode class based on language to check the
- answer code
- """
- code_evaluator = create_evaluator_instance(language,
- test_case_type,
- json_data,
- in_dir
- )
- data = unpack_json(json_data)
- result = code_evaluator.evaluate(**data)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return json.dumps(result)
-
- def run(self):
- """Run XMLRPC server, serving our methods."""
- server = SimpleXMLRPCServer(("0.0.0.0", self.port))
- self.server = server
- server.register_instance(self)
- self.queue.put(self.port)
- server.serve_forever()
-
-
-###############################################################################
-# `ServerPool` class.
-###############################################################################
-class ServerPool(object):
- """Manages a pool of CodeServer objects."""
- def __init__(self, ports, pool_port=50000):
- """Create a pool of servers. Uses a shared Queue to get available
- servers.
-
- Parameters
- ----------
-
- ports : list(int)
- List of ports at which the CodeServer's should run.
-
- pool_port : int
- Port at which the server pool should serve.
- """
- self.my_port = pool_port
- self.ports = ports
- queue = Queue(maxsize=len(self.ports))
- self.queue = queue
- servers = []
- processes = []
- for port in self.ports:
- server = CodeServer(port, queue)
- servers.append(server)
- p = Process(target=server.run)
- processes.append(p)
- self.servers = servers
- self.processes = processes
- self.app = self._make_app()
-
- def _make_app(self):
- app = Application([
- (r"/.*", MainHandler, dict(server=self)),
- ])
- app.listen(self.my_port)
- return app
-
- def _start_code_servers(self):
- for proc in self.processes:
- if proc.pid is None:
- proc.start()
-
- # Public Protocol ##########
-
- def get_server_port(self):
- """Get available server port from ones in the pool. This will block
- till it gets an available server.
- """
- return self.queue.get()
-
- def get_status(self):
- """Returns current queue size and total number of ports used."""
- try:
- qs = self.queue.qsize()
- except NotImplementedError:
- # May not work on OS X so we return a dummy.
- qs = len(self.ports)
-
- return qs, len(self.ports)
-
- def run(self):
- """Run server which returns an available server port where code
- can be executed.
- """
- # We start the code servers here to ensure they are run as nobody.
- self._start_code_servers()
- IOLoop.current().start()
-
- def stop(self):
- """Stop all the code server processes.
- """
- for proc in self.processes:
- proc.terminate()
- IOLoop.current().stop()
-
-
-class MainHandler(RequestHandler):
- def initialize(self, server):
- self.server = server
-
- def get(self):
- path = self.request.path[1:]
- if len(path) == 0:
- port = self.server.get_server_port()
- self.write(str(port))
- elif path == "status":
- q_size, total = self.server.get_status()
- result = "%d servers out of %d are free.\n"%(q_size, total)
- load = float(total - q_size)/total*100
- result += "Load: %s%%\n"%load
- self.write(result)
-
-
-###############################################################################
-def main(args=None):
- if args:
- ports = [int(x) for x in args]
- else:
- ports = SERVER_PORTS
-
- server_pool = ServerPool(ports=ports, pool_port=SERVER_POOL_PORT)
- # This is done *after* the server pool is created because when the tornado
- # app calls listen(), it cannot be nobody.
- run_as_nobody()
-
- server_pool.run()
-
-if __name__ == '__main__':
- args = sys.argv[1:]
- main(args)
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index eb13f53..003e001 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -39,19 +39,10 @@ class PythonAssertionEvaluator(BaseEvaluator):
# self.files = []
- # def teardown(self):
- # # Delete the created file.
- # if self.files:
- # delete_files(self.files)
- # super(PythonAssertionEvaluator, self).teardown()
-
- # def teardown(self):
- # # Delete the created file.
- # if self.files:
- # delete_files(self.files)
- # # Cancel the signal
- # delete_signal_handler()
- # self._change_dir(dirname(MY_DIR))
+ def teardown(self):
+ # Delete the created file.
+ if self.files:
+ delete_files(self.files)
def compile_code(self):
if self.file_paths:
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index 7ef3a7c..991fd63 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -35,11 +35,6 @@ class PythonStdioEvaluator(BaseEvaluator):
# super(PythonStdioEvaluator, self).setup()
# self.files = []
- # def teardown(self):
- # # Delete the created file.
- # if self.files:
- # delete_files(self.files)
- # super(PythonStdioEvaluator, self).teardown()
def __init__(self, metadata, test_case_data):
self.files = []
@@ -53,6 +48,10 @@ class PythonStdioEvaluator(BaseEvaluator):
self.expected_output = test_case_data.get('expected_output')
self.weight = test_case_data.get('weight')
+ def teardown(self):
+ # Delete the created file.
+ if self.files:
+ delete_files(self.files)
def compile_code(self): # user_answer, file_paths, expected_input, expected_output, weight):
if self.file_paths:
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index 3c2d44c..97e40a8 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -8,38 +8,49 @@ import re
import importlib
# Local imports
-from .code_evaluator import CodeEvaluator
+from .base_evaluator import BaseEvaluator
from .file_utils import copy_files, delete_files
-class ScilabCodeEvaluator(CodeEvaluator):
+class ScilabCodeEvaluator(BaseEvaluator):
"""Tests the Scilab code obtained from Code Server"""
- def setup(self):
- super(ScilabCodeEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = \
- self.create_submit_code_file('function.sci')
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
+
+ # def setup(self):
+ # super(ScilabCodeEvaluator, self).setup()
+ # self.files = []
+ # self.submit_code_path = \
+ # self.create_submit_code_file('function.sci')
def teardown(self):
# Delete the created file.
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(ScilabCodeEvaluator, self).teardown()
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
- if file_paths:
- self.files = copy_files(file_paths)
- ref_code_path = test_case
+ def check_code(self):
+ self.submit_code_path = self.create_submit_code_file('function.sci')
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
+ ref_code_path = self.test_case
clean_ref_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
- user_answer, terminate_commands = \
- self._remove_scilab_exit(user_answer.lstrip())
+ self.user_answer, terminate_commands = \
+ self._remove_scilab_exit(self.user_answer.lstrip())
success = False
test_case_weight = 0.0
-
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
# Throw message if there are commmands that terminates scilab
add_err = ""
if terminate_commands:
@@ -65,11 +76,12 @@ class ScilabCodeEvaluator(CodeEvaluator):
stdout = self._strip_output(stdout)
if proc.returncode == 5:
success, err = True, "Correct answer"
- test_case_weight = float(weight) if partial_grading else 0.0
+ test_case_weight = float(self.weight) if self.partial_grading else 0.0
else:
err = add_err + stdout
else:
err = add_err + stderr
+
return success, err, test_case_weight
def _remove_scilab_exit(self, string):
diff --git a/yaksh/settings.py b/yaksh/settings.py
index 690ddb1..e18d310 100644
--- a/yaksh/settings.py
+++ b/yaksh/settings.py
@@ -42,5 +42,17 @@ URL_ROOT = ''
code_evaluators = {
"python": {"standardtestcase": "yaksh.python_assertion_evaluator.PythonAssertionEvaluator",
"stdiobasedtestcase": "yaksh.python_stdio_evaluator.PythonStdioEvaluator"
- }
+ },
+ "c": {"standardtestcase": "yaksh.cpp_code_evaluator.CppCodeEvaluator",
+ "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdioEvaluator"
+ },
+ "cpp": {"standardtestcase": "yaksh.cpp_code_evaluator.CppCodeEvaluator",
+ "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdioEvaluator"
+ },
+ "java": {"standardtestcase": "yaksh.java_code_evaluator.JavaCodeEvaluator",
+ "stdiobasedtestcase": "yaksh.java_stdio_evaluator.JavaStdioEvaluator"},
+ "bash": {"standardtestcase": "yaksh.bash_code_evaluator.BashCodeEvaluator",
+ "stdiobasedtestcase": "yaksh.bash_stdio_evaluator.BashStdioEvaluator"
+ },
+ "scilab": {"standardtestcase": "yaksh.scilab_code_evaluator.ScilabCodeEvaluator"},
}
diff --git a/yaksh/stdio_evaluator.py b/yaksh/stdio_evaluator.py
index 7530b96..106facd 100644
--- a/yaksh/stdio_evaluator.py
+++ b/yaksh/stdio_evaluator.py
@@ -1,18 +1,10 @@
from __future__ import unicode_literals
# Local imports
-from .code_evaluator import CodeEvaluator
+from .base_evaluator import BaseEvaluator
-class StdIOEvaluator(CodeEvaluator):
- def setup(self):
- super(StdIOEvaluator, self).setup()
- pass
-
- def teardown(self):
- super(StdIOEvaluator, self).teardown()
- pass
-
+class StdIOEvaluator(BaseEvaluator):
def evaluate_stdio(self, user_answer, proc, expected_input, expected_output):
success = False
ip = expected_input.replace(",", " ")