summaryrefslogtreecommitdiff
path: root/yaksh
diff options
context:
space:
mode:
authorPrabhu Ramachandran2016-12-20 16:53:17 +0530
committerGitHub2016-12-20 16:53:17 +0530
commit77e8a6c1cde9190daf9075d71caf6017dc1380e7 (patch)
treec0d4a002bba428269c2f7ba62eb68d24b8cbec5f /yaksh
parent1400eeb1d5af1cd1d69e015a19a319ab35d357c4 (diff)
parentbf5b4e7607bae0b81ceeb99e8bf5d750433e92e8 (diff)
downloadonline_test-77e8a6c1cde9190daf9075d71caf6017dc1380e7.tar.gz
online_test-77e8a6c1cde9190daf9075d71caf6017dc1380e7.tar.bz2
online_test-77e8a6c1cde9190daf9075d71caf6017dc1380e7.zip
Merge pull request #163 from ankitjavalkar/code-server-refactor2016-form
Code Evaluator refactoring
Diffstat (limited to 'yaksh')
-rw-r--r--yaksh/admin.py4
-rw-r--r--yaksh/base_evaluator.py77
-rw-r--r--yaksh/bash_code_evaluator.py54
-rw-r--r--yaksh/bash_stdio_evaluator.py46
-rw-r--r--[-rwxr-xr-x]yaksh/code_server.py13
-rw-r--r--yaksh/cpp_code_evaluator.py37
-rw-r--r--yaksh/cpp_stdio_evaluator.py40
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py163
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py356
-rw-r--r--yaksh/evaluator_tests/test_code_evaluation.py12
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py298
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py378
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py83
-rw-r--r--yaksh/forms.py4
-rw-r--r--yaksh/grader.py (renamed from yaksh/code_evaluator.py)125
-rw-r--r--yaksh/java_code_evaluator.py40
-rw-r--r--yaksh/java_stdio_evaluator.py42
-rw-r--r--yaksh/language_registry.py12
-rw-r--r--yaksh/models.py65
-rw-r--r--yaksh/python_assertion_evaluator.py49
-rw-r--r--yaksh/python_stdio_evaluator.py47
-rw-r--r--yaksh/scilab_code_evaluator.py40
-rw-r--r--yaksh/settings.py12
-rw-r--r--yaksh/stdio_evaluator.py12
-rw-r--r--yaksh/test_models.py49
-rw-r--r--yaksh/test_views.py2
-rw-r--r--yaksh/tests/test_code_server.py48
-rw-r--r--yaksh/views.py2
28 files changed, 1320 insertions, 790 deletions
diff --git a/yaksh/admin.py b/yaksh/admin.py
index 58af7b2..c525ba3 100644
--- a/yaksh/admin.py
+++ b/yaksh/admin.py
@@ -1,11 +1,11 @@
from yaksh.models import Question, Quiz, QuestionPaper
-from yaksh.models import TestCase, StandardTestCase, StdioBasedTestCase, Course, AnswerPaper
+from yaksh.models import TestCase, StandardTestCase, StdIOBasedTestCase, Course, AnswerPaper
from django.contrib import admin
admin.site.register(Question)
admin.site.register(TestCase)
admin.site.register(StandardTestCase)
-admin.site.register(StdioBasedTestCase)
+admin.site.register(StdIOBasedTestCase)
admin.site.register(Course)
admin.site.register(Quiz)
admin.site.register(QuestionPaper)
diff --git a/yaksh/base_evaluator.py b/yaksh/base_evaluator.py
new file mode 100644
index 0000000..ce1647f
--- /dev/null
+++ b/yaksh/base_evaluator.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+from __future__ import unicode_literals
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+from os.path import isdir, dirname, abspath, join, isfile, exists
+import subprocess
+import stat
+
+
+# Local imports
+from .grader import MY_DIR, TimeoutException
+
+class BaseEvaluator(object):
+ """Base Evaluator class containing generic attributes and callable methods"""
+
+ def __init__(self):
+ pass
+
+ def check_code(self):
+ raise NotImplementedError("check_code method not implemented")
+
+ def compile_code(self):
+ pass
+
+ def _run_command(self, cmd_args, *args, **kw):
+ """Run a command in a subprocess while blocking, the process is killed
+ if it takes more than 2 seconds to run. Return the Popen object, the
+ stdout and stderr.
+ """
+ try:
+ proc = subprocess.Popen(cmd_args, *args, **kw)
+ stdout, stderr = proc.communicate()
+ except TimeoutException:
+ # Runaway code, so kill it.
+ proc.kill()
+ # Re-raise exception.
+ raise
+ return proc, stdout.decode('utf-8'), stderr.decode('utf-8')
+
+ def _remove_null_substitute_char(self, string):
+ """Returns a string without any null and substitute characters"""
+ stripped = ""
+ for c in string:
+ if ord(c) is not 26 and ord(c) is not 0:
+ stripped = stripped + c
+ return ''.join(stripped)
+
+ def create_submit_code_file(self, file_name):
+ """ Set the file path for code (`answer`)"""
+ submit_path = abspath(file_name)
+ if not exists(submit_path):
+ submit_f = open(submit_path, 'w')
+ submit_f.close()
+
+ return submit_path
+
+ def write_to_submit_code_file(self, file_path, user_answer):
+ """ Write the code (`answer`) to a file"""
+ submit_f = open(file_path, 'w')
+ submit_f.write(user_answer.lstrip())
+ submit_f.close()
+
+ def _set_test_code_file_path(self, ref_path=None, test_case_path=None):
+ if ref_path and not ref_path.startswith('/'):
+ ref_path = join(MY_DIR, ref_path)
+
+ if test_case_path and not test_case_path.startswith('/'):
+ test_case_path = join(MY_DIR, test_case_path)
+
+ return ref_path, test_case_path
+
+ def _set_file_as_executable(self, fname):
+ os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
+ | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index b5974d2..1e6fc9c 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -9,26 +9,31 @@ import subprocess
import importlib
# local imports
-from .code_evaluator import CodeEvaluator
+from .base_evaluator import BaseEvaluator
from .file_utils import copy_files, delete_files
-class BashCodeEvaluator(CodeEvaluator):
+class BashCodeEvaluator(BaseEvaluator):
# Private Protocol ##########
- def setup(self):
- super(BashCodeEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('submit.sh')
- self._set_file_as_executable(self.submit_code_path)
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
def teardown(self):
# Delete the created file.
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(BashCodeEvaluator, self).teardown()
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
+ def check_code(self):
""" Function validates student script using instructor script as
reference. Test cases can optionally be provided. The first argument
ref_path, is the path to instructor script, it is assumed to
@@ -53,9 +58,12 @@ class BashCodeEvaluator(CodeEvaluator):
Returns (False, error_msg, 0.0): If mandatory arguments are not files or if
the required permissions are not given to the file(s).
"""
- ref_code_path = test_case
+ ref_code_path = self.test_case
success = False
- test_case_weight = 0.0
+ mark_fraction = 0.0
+
+ self.submit_code_path = self.create_submit_code_file('submit.sh')
+ self._set_file_as_executable(self.submit_code_path)
get_ref_path, get_test_case_path = ref_code_path.strip().split(',')
get_ref_path = get_ref_path.strip()
@@ -63,8 +71,8 @@ class BashCodeEvaluator(CodeEvaluator):
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(get_ref_path, get_test_case_path)
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
msg = "No file at %s or Incorrect path" % clean_ref_code_path
return False, msg, 0.0
@@ -78,8 +86,8 @@ class BashCodeEvaluator(CodeEvaluator):
msg = "Script %s is not executable" % self.submit_code_path
return False, msg, 0.0
- user_answer = user_answer.replace("\r", "")
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.user_answer = self.user_answer.replace("\r", "")
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
if clean_test_case_path is None or "":
ret = self._run_command(clean_ref_code_path,
@@ -95,8 +103,8 @@ class BashCodeEvaluator(CodeEvaluator):
)
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
- test_case_weight = float(weight) if partial_grading else 0.0
- return True, "Correct answer", test_case_weight
+ mark_fraction = float(self.weight) if self.partial_grading else 0.0
+ return True, "Correct answer", mark_fraction
else:
err = "Error: expected %s, got %s" % (inst_stderr,
stdnt_stderr
@@ -116,21 +124,21 @@ class BashCodeEvaluator(CodeEvaluator):
loop_count = 0
test_cases = open(clean_test_case_path).readlines()
num_lines = len(test_cases)
- for test_case in test_cases:
+ for tc in test_cases:
loop_count += 1
if valid_answer:
args = [clean_ref_code_path] + \
- [x for x in test_case.split()]
+ [x for x in tc.split()]
ret = self._run_command(args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
proc, inst_stdout, inst_stderr = ret
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
args = [self.submit_code_path] + \
- [x for x in test_case.split()]
+ [x for x in tc.split()]
ret = self._run_command(args,
stdin=None,
stdout=subprocess.PIPE,
@@ -138,8 +146,8 @@ class BashCodeEvaluator(CodeEvaluator):
proc, stdnt_stdout, stdnt_stderr = ret
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
- test_case_weight = float(weight) if partial_grading else 0.0
- return True, "Correct answer", test_case_weight
+ mark_fraction = float(self.weight) if self.partial_grading else 0.0
+ return True, "Correct answer", mark_fraction
else:
err = ("Error:expected"
" {0}, got {1}").format(inst_stdout+inst_stderr,
diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py
index 1dd9fd5..50ee0d6 100644
--- a/yaksh/bash_stdio_evaluator.py
+++ b/yaksh/bash_stdio_evaluator.py
@@ -9,45 +9,51 @@ from .stdio_evaluator import StdIOEvaluator
from .file_utils import copy_files, delete_files
-class BashStdioEvaluator(StdIOEvaluator):
+class BashStdIOEvaluator(StdIOEvaluator):
"""Evaluates Bash StdIO based code"""
-
- def setup(self):
- super(BashStdioEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('Test.sh')
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.expected_input = test_case_data.get('expected_input')
+ self.expected_output = test_case_data.get('expected_output')
+ self.weight = test_case_data.get('weight')
def teardown(self):
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(BashStdioEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
- if file_paths:
- self.files = copy_files(file_paths)
+ def compile_code(self):
+ self.submit_code_path = self.create_submit_code_file('Test.sh')
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
user_code_directory = os.getcwd() + '/'
- user_answer = user_answer.replace("\r", "")
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.user_answer = self.user_answer.replace("\r", "")
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
- def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weight):
+ def check_code(self):
success = False
- test_case_weight = 0.0
+ mark_fraction = 0.0
- expected_input = str(expected_input).replace('\r', '')
+ self.expected_input = str(self.expected_input).replace('\r', '')
proc = subprocess.Popen("bash ./Test.sh",
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
- success, err = self.evaluate_stdio(user_answer, proc,
- expected_input,
- expected_output
+ success, err = self.evaluate_stdio(self.user_answer, proc,
+ self.expected_input,
+ self.expected_output
)
- test_case_weight = float(weight) if partial_grading and success else 0.0
- return success, err, test_case_weight
+ mark_fraction = float(self.weight) if self.partial_grading and success else 0.0
+ return success, err, mark_fraction
diff --git a/yaksh/code_server.py b/yaksh/code_server.py
index b3c9c30..4db5810 100755..100644
--- a/yaksh/code_server.py
+++ b/yaksh/code_server.py
@@ -53,7 +53,8 @@ from tornado.web import Application, RequestHandler
# Local imports
from .settings import SERVER_PORTS, SERVER_POOL_PORT
-from .language_registry import create_evaluator_instance, unpack_json
+from .language_registry import create_evaluator_instance
+from .grader import Grader
MY_DIR = abspath(dirname(__file__))
@@ -84,13 +85,9 @@ class CodeServer(object):
"""Calls relevant EvaluateCode class based on language to check the
answer code
"""
- code_evaluator = create_evaluator_instance(language,
- test_case_type,
- json_data,
- in_dir
- )
- data = unpack_json(json_data)
- result = code_evaluator.evaluate(**data)
+ data = json.loads(json_data)
+ grader = Grader(in_dir)
+ result = grader.evaluate(data)
# Put us back into the server pool queue since we are free now.
self.queue.put(self.port)
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index 716a522..f0c2029 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -5,17 +5,15 @@ import pwd
import os
from os.path import join, isfile
import subprocess
-import importlib
# Local imports
-from .code_evaluator import CodeEvaluator
from .file_utils import copy_files, delete_files
+from .base_evaluator import BaseEvaluator
-class CppCodeEvaluator(CodeEvaluator):
+class CppCodeEvaluator(BaseEvaluator):
"""Tests the C code obtained from Code Server"""
- def setup(self):
- super(CppCodeEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
self.submit_code_path = self.create_submit_code_file('submit.c')
self.compiled_user_answer = None
@@ -23,6 +21,15 @@ class CppCodeEvaluator(CodeEvaluator):
self.user_output_path = ""
self.ref_output_path = ""
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
+
def teardown(self):
# Delete the created file.
os.remove(self.submit_code_path)
@@ -32,8 +39,6 @@ class CppCodeEvaluator(CodeEvaluator):
os.remove(self.user_output_path)
if self.files:
delete_files(self.files)
- super(CppCodeEvaluator, self).teardown()
-
def set_file_paths(self):
user_output_path = os.getcwd() + '/output_file'
@@ -50,15 +55,15 @@ class CppCodeEvaluator(CodeEvaluator):
ref_output_path)
return compile_command, compile_main
- def compile_code(self, user_answer, file_paths, test_case, weight):
+ def compile_code(self):
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
- ref_code_path = test_case
+ ref_code_path = self.test_case
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
msg = "No file at %s or Incorrect path" % clean_ref_code_path
return False, msg
@@ -66,7 +71,7 @@ class CppCodeEvaluator(CodeEvaluator):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
self.user_output_path, self.ref_output_path = self.set_file_paths()
self.compile_command, self.compile_main = self.get_commands(
clean_ref_code_path,
@@ -89,7 +94,7 @@ class CppCodeEvaluator(CodeEvaluator):
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
+ def check_code(self):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -109,7 +114,7 @@ class CppCodeEvaluator(CodeEvaluator):
if the required permissions are not given to the file(s).
"""
success = False
- test_case_weight = 0.0
+ mark_fraction = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
@@ -129,7 +134,7 @@ class CppCodeEvaluator(CodeEvaluator):
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
- test_case_weight = float(weight) if partial_grading else 0.0
+ mark_fraction = float(self.weight) if self.partial_grading else 0.0
else:
err = "{0} \n {1}".format(stdout, stderr)
else:
@@ -155,4 +160,4 @@ class CppCodeEvaluator(CodeEvaluator):
except:
err = "{0} \n {1}".format(err, stdnt_stderr)
- return success, err, test_case_weight
+ return success, err, mark_fraction
diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py
index 00fad92..c318a82 100644
--- a/yaksh/cpp_stdio_evaluator.py
+++ b/yaksh/cpp_stdio_evaluator.py
@@ -9,19 +9,26 @@ from .stdio_evaluator import StdIOEvaluator
from .file_utils import copy_files, delete_files
-class CppStdioEvaluator(StdIOEvaluator):
+class CppStdIOEvaluator(StdIOEvaluator):
"""Evaluates C StdIO based code"""
-
- def setup(self):
- super(CppStdioEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('main.c')
+ self.submit_code_path = self.create_submit_code_file('submit.c')
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.expected_input = test_case_data.get('expected_input')
+ self.expected_output = test_case_data.get('expected_output')
+ self.weight = test_case_data.get('weight')
def teardown(self):
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(CppStdioEvaluator, self).teardown()
def set_file_paths(self):
user_output_path = os.getcwd() + '/output_file'
@@ -35,13 +42,13 @@ class CppStdioEvaluator(StdIOEvaluator):
ref_output_path)
return compile_command, compile_main
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
- if file_paths:
+ def compile_code(self):
+ if self.file_paths:
self.files = copy_files(file_paths)
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
self.user_output_path, self.ref_output_path = self.set_file_paths()
self.compile_command, self.compile_main = self.get_commands(
self.user_output_path,
@@ -61,10 +68,9 @@ class CppStdioEvaluator(StdIOEvaluator):
)
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weight):
+ def check_code(self):
success = False
- test_case_weight = 0.0
+ mark_fraction = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
@@ -78,9 +84,9 @@ class CppStdioEvaluator(StdIOEvaluator):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
- success, err = self.evaluate_stdio(user_answer, proc,
- expected_input,
- expected_output
+ success, err = self.evaluate_stdio(self.user_answer, proc,
+ self.expected_input,
+ self.expected_output
)
os.remove(self.ref_output_path)
else:
@@ -106,5 +112,5 @@ class CppStdioEvaluator(StdIOEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- test_case_weight = float(weight) if partial_grading and success else 0.0
- return success, err, test_case_weight
+ mark_fraction = float(self.weight) if self.partial_grading and success else 0.0
+ return success, err, mark_fraction
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index 99e5122..06a56e4 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -3,8 +3,9 @@ import unittest
import os
import shutil
import tempfile
+from yaksh.grader import Grader
from yaksh.bash_code_evaluator import BashCodeEvaluator
-from yaksh.bash_stdio_evaluator import BashStdioEvaluator
+from yaksh.bash_stdio_evaluator import BashStdIOEvaluator
from yaksh.settings import SERVER_TIMEOUT
from textwrap import dedent
@@ -15,6 +16,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
self.test_case_data = [
{"test_case": "bash_files/sample.sh,bash_files/sample.args",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}
]
@@ -32,39 +34,57 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]"
" && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
)
- get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertTrue(result.get('success'))
self.assertEqual(result.get('error'), "Correct answer\n")
def test_error(self):
user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] "
"&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))")
- get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Error" in result.get("error"))
def test_infinite_loop(self):
user_answer = ("#!/bin/bash\nwhile [ 1 ] ;"
" do echo "" > /dev/null ; done")
- get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
@@ -72,26 +92,35 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
self.file_paths = [('/tmp/test.txt', False)]
self.test_case_data = [
{"test_case": "bash_files/sample1.sh,bash_files/sample1.args",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}
]
user_answer = ("#!/bin/bash\ncat $1")
- get_class = BashCodeEvaluator()
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertTrue(result.get("success"))
self.assertEqual(result.get("error"), "Correct answer\n")
-class BashStdioEvaluationTestCases(unittest.TestCase):
+class BashStdIOEvaluationTestCases(unittest.TestCase):
def setUp(self):
self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in your"
" code.").format(SERVER_TIMEOUT)
+ self.file_paths = None
+
def test_correct_answer(self):
user_answer = dedent(""" #!/bin/bash
@@ -102,14 +131,22 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
+ 'test_case_type': 'stdiobasedtestcase',
'weight': 0.0
}]
- get_class = BashStdioEvaluator()
- kwargs = {"user_answer": user_answer,
- "partial_grading": True,
- "test_case_data": test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
+ 'test_case_data': test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -123,15 +160,23 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
"""
)
test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n',
- 'expected_input': '1,2,3\n4,5,6\n7,8,9',
- 'weight': 0.0
+ 'expected_input': '1,2,3\n4,5,6\n7,8,9',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
}]
- get_class = BashStdioEvaluator()
- kwargs = {"user_answer": user_answer,
- "partial_grading": True,
- "test_case_data": test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
+ 'test_case_data': test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -144,14 +189,21 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
+ 'test_case_type': 'stdiobasedtestcase',
'weight': 0.0
}]
- get_class = BashStdioEvaluator()
- kwargs = {"user_answer": user_answer,
- "partial_grading": True,
- "test_case_data": test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
+ 'test_case_data': test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
self.assertIn("Incorrect", result.get('error'))
self.assertFalse(result.get('success'))
@@ -164,14 +216,21 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '10',
'expected_input': '',
+ 'test_case_type': 'stdiobasedtestcase',
'weight': 0.0
}]
- get_class = BashStdioEvaluator()
- kwargs = {"user_answer": user_answer,
- "partial_grading": True,
- "test_case_data": test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'bash'
+ },
+ 'test_case_data': test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index d5193d3..dc6fdc9 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -3,10 +3,14 @@ import unittest
import os
import shutil
import tempfile
+from textwrap import dedent
+
+# Local import
+from yaksh.grader import Grader
from yaksh.cpp_code_evaluator import CppCodeEvaluator
-from yaksh.cpp_stdio_evaluator import CppStdioEvaluator
+from yaksh.cpp_stdio_evaluator import CppStdIOEvaluator
from yaksh.settings import SERVER_TIMEOUT
-from textwrap import dedent
+
class CAssertionEvaluationTestCases(unittest.TestCase):
@@ -15,6 +19,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [{"test_case": "c_cpp_files/main.cpp",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}]
self.in_dir = tmp_in_dir_path
@@ -29,25 +34,37 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
def test_correct_answer(self):
user_answer = "int add(int a, int b)\n{return a+b;}"
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': False,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertTrue(result.get('success'))
self.assertEqual(result.get('error'), "Correct answer\n")
def test_incorrect_answer(self):
user_answer = "int add(int a, int b)\n{return a-b;}"
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
+ 'language': 'cpp'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Incorrect:", result.get('error'))
@@ -55,31 +72,44 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
def test_compilation_error(self):
user_answer = "int add(int a, int b)\n{return a+b}"
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
+ 'language': 'cpp'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
def test_infinite_loop(self):
user_answer = "int add(int a, int b)\n{while(1>0){}}"
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
+ 'language': 'cpp'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_file_based_assert(self):
self.file_paths = [('/tmp/test.txt', False)]
self.test_case_data = [{"test_case": "c_cpp_files/file_data.c",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}]
user_answer = dedent("""
@@ -94,26 +124,34 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
return buff[0];
}
""")
- get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
+ 'language': 'cpp'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertTrue(result.get('success'))
self.assertEqual(result.get('error'), "Correct answer\n")
-class CppStdioEvaluationTestCases(unittest.TestCase):
+class CppStdIOEvaluationTestCases(unittest.TestCase):
def setUp(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
" your code.").format(SERVER_TIMEOUT)
+ self.file_paths = None
def test_correct_answer(self):
user_answer = dedent("""
@@ -123,19 +161,27 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
scanf("%d%d",&a,&b);
printf("%d",a+b);
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<stdio.h>
@@ -146,19 +192,27 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
for(i=0;i<3;i++){
printf("%d",a[i]);}
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_string_input(self):
self.test_case_data = [{'expected_output': 'abc',
'expected_input': 'abc',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<stdio.h>
@@ -167,12 +221,19 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
scanf("%s",a);
printf("%s",a);
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -183,12 +244,19 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=10;
printf("%d",a);
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Incorrect", result.get('error'))
@@ -201,12 +269,19 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=10;
printf("%d",a)
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
@@ -217,19 +292,27 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
while(0==0){
printf("abc");}
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<stdio.h>
@@ -237,12 +320,19 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=5,b=6;
printf("%d",a+b);
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -255,19 +345,27 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
cin>>a>>b;
cout<<a+b;
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_array_input(self):
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<iostream>
@@ -279,19 +377,27 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
for(i=0;i<3;i++){
cout<<a[i];}
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_string_input(self):
self.test_case_data = [{'expected_output': 'abc',
'expected_input': 'abc',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<iostream>
@@ -301,12 +407,19 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
cin>>a;
cout<<a;
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
@@ -318,12 +431,19 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=10;
cout<<a;
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Incorrect", result.get('error'))
@@ -337,12 +457,19 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=10;
cout<<a
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
@@ -354,19 +481,27 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
while(0==0){
cout<<"abc";}
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_cpp_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'test_case_type': 'stdiobasedtestcase',
}]
user_answer = dedent("""
#include<iostream>
@@ -375,12 +510,19 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
int a=5,b=6;
cout<<a+b;
}""")
- get_class = CppStdioEvaluator()
- kwargs = {'user_answer': user_answer,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
'partial_grading': False,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ 'language': 'cpp'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
diff --git a/yaksh/evaluator_tests/test_code_evaluation.py b/yaksh/evaluator_tests/test_code_evaluation.py
index 88e0253..cb783b0 100644
--- a/yaksh/evaluator_tests/test_code_evaluation.py
+++ b/yaksh/evaluator_tests/test_code_evaluation.py
@@ -13,12 +13,12 @@ class RegistryTestCase(unittest.TestCase):
assertion_evaluator_path = ("yaksh.python_assertion_evaluator"
".PythonAssertionEvaluator"
)
- stdout_evaluator_path = ("yaksh.python_stdout_evaluator."
- "PythonStdoutEvaluator"
+ stdio_evaluator_path = ("yaksh.python_stdio_evaluator."
+ "PythonStdIOEvaluator"
)
code_evaluators['python'] = \
{"standardtestcase": assertion_evaluator_path,
- "stdiobasedtestcase": stdout_evaluator_path
+ "stdiobasedtestcase": stdio_evaluator_path
}
def test_set_register(self):
@@ -28,15 +28,15 @@ class RegistryTestCase(unittest.TestCase):
assertion_evaluator_path = ("yaksh.python_assertion_evaluator"
".PythonAssertionEvaluator"
)
- stdout_evaluator_path = ("yaksh.python_stdout_evaluator."
- "PythonStdoutEvaluator"
+ stdio_evaluator_path = ("yaksh.python_stdio_evaluator."
+ "PythonStdIOEvaluator"
)
class_name = getattr(python_assertion_evaluator,
'PythonAssertionEvaluator'
)
self.registry_object.register("python",
{"standardtestcase": assertion_evaluator_path,
- "stdiobasedtestcase": stdout_evaluator_path
+ "stdiobasedtestcase": stdio_evaluator_path
}
)
self.assertEqual(evaluator_class, class_name)
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index f7ecd97..36eb6a5 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -3,12 +3,14 @@ import unittest
import os
import shutil
import tempfile
-from yaksh import code_evaluator as evaluator
-from yaksh.java_code_evaluator import JavaCodeEvaluator
-from yaksh.java_stdio_evaluator import JavaStdioEvaluator
-from yaksh.settings import SERVER_TIMEOUT
from textwrap import dedent
+# Local Import
+from yaksh import grader as gd
+from yaksh.grader import Grader
+from yaksh.java_code_evaluator import JavaCodeEvaluator
+from yaksh.java_stdio_evaluator import JavaStdIOEvaluator
+
class JavaAssertionEvaluationTestCases(unittest.TestCase):
def setUp(self):
@@ -17,41 +19,56 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [
{"test_case": "java_files/main_square.java",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}
]
self.in_dir = tmp_in_dir_path
- evaluator.SERVER_TIMEOUT = 9
+ self.file_paths = None
+ gd.SERVER_TIMEOUT = 9
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
- " your code.").format(evaluator.SERVER_TIMEOUT)
- self.file_paths = None
+ " your code.").format(gd.SERVER_TIMEOUT)
+
def tearDown(self):
+ gd.SERVER_TIMEOUT = 4
os.remove('/tmp/test.txt')
shutil.rmtree(self.in_dir)
def test_correct_answer(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}"
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}"
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get('success'))
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
@@ -60,25 +77,37 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
def test_error(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a"
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Error" in result.get("error"))
def test_infinite_loop(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}"
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
@@ -86,6 +115,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
self.file_paths = [("/tmp/test.txt", False)]
self.test_case_data = [
{"test_case": "java_files/read_file.java",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}
]
@@ -107,34 +137,41 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
br.close();
}}}
""")
- get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertTrue(result.get("success"))
self.assertEqual(result.get("error"), "Correct answer\n")
-class JavaStdioEvaluationTestCases(unittest.TestCase):
-
+class JavaStdIOEvaluationTestCases(unittest.TestCase):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.in_dir = tmp_in_dir_path
self.test_case_data = [{'expected_output': '11',
- 'expected_input': '5\n6',
- 'weight': 0.0
- }]
- evaluator.SERVER_TIMEOUT = 4
+ 'expected_input': '5\n6',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
+ }]
+ self.file_paths = None
+ gd.SERVER_TIMEOUT = 9
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
- " your code.").format(evaluator.SERVER_TIMEOUT)
+ " your code.").format(gd.SERVER_TIMEOUT)
def tearDown(self):
- evaluator.SERVER_TIMEOUT = 4
+ gd.SERVER_TIMEOUT = 4
os.remove('/tmp/test.txt')
shutil.rmtree(self.in_dir)
@@ -148,19 +185,26 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
int b = s.nextInt();
System.out.print(a+b);
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
-
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
+ 'test_case_type': 'stdiobasedtestcase',
'weight': 0.0
}]
user_answer = dedent("""
@@ -173,17 +217,23 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
a[i] = s.nextInt();
System.out.print(a[i]);}
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
-
user_answer = dedent("""
import java.util.Scanner;
class Test
@@ -193,35 +243,47 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
int b = s.nextInt();
System.out.print(a);
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Incorrect", result.get('error'))
self.assertTrue(lines_of_error > 1)
def test_error(self):
-
user_answer = dedent("""
class Test
{
System.out.print("a");
}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
def test_infinite_loop(self):
-
user_answer = dedent("""
class Test
{public static void main(String[] args){
@@ -229,19 +291,27 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
{
System.out.print("a");}
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
- 'expected_input': '',
- 'weight': 0.0
+ 'expected_input': '',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
}]
user_answer = dedent("""
class Test
@@ -250,19 +320,27 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
int b = 6;
System.out.print(a+b);
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_string_input(self):
self.test_case_data = [{'expected_output': 'HelloWorld',
- 'expected_input': 'Hello\nWorld',
- 'weight': 0.0
+ 'expected_input': 'Hello\nWorld',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
}]
user_answer = dedent("""
import java.util.Scanner;
@@ -273,20 +351,28 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
String b = s.nextLine();
System.out.print(a+b);
}}""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
- 'test_case_data': self.test_case_data
- }
- result = get_class.evaluate(**kwargs)
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
+ 'test_case_data': self.test_case_data,
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_file_based_stdout(self):
self.file_paths = [("/tmp/test.txt", False)]
self.test_case_data = [{'expected_output': '2',
- 'expected_input': '',
- 'weight': 0.0
+ 'expected_input': '',
+ 'test_case_type': 'stdiobasedtestcase',
+ 'weight': 0.0
}]
user_answer = dedent("""
import java.io.BufferedReader;
@@ -306,13 +392,19 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
br.close();
}}}
""")
- get_class = JavaStdioEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertTrue(result.get("success"))
self.assertEqual(result.get("error"), "Correct answer\n")
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index acf5d0a..e638049 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -6,20 +6,20 @@ import shutil
from textwrap import dedent
# Local import
+from yaksh.grader import Grader
from yaksh.python_assertion_evaluator import PythonAssertionEvaluator
-from yaksh.python_stdio_evaluator import PythonStdioEvaluator
+from yaksh.python_stdio_evaluator import PythonStdIOEvaluator
from yaksh.settings import SERVER_TIMEOUT
-
class PythonAssertionEvaluationTestCases(unittest.TestCase):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.in_dir = tmp_in_dir_path
- self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weight': 0.0},
- {"test_case": 'assert(add(-1,2)==1)', 'weight': 0.0},
- {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0},
+ self.test_case_data = [{"test_case_type": "standardtestcase", "test_case": 'assert(add(1,2)==3)', 'weight': 0.0},
+ {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,2)==1)', 'weight': 0.0},
+ {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0},
]
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
@@ -33,15 +33,19 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def test_correct_answer(self):
# Given
user_answer = "def add(a,b):\n\treturn a + b"
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
}
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertTrue(result.get('success'))
@@ -50,15 +54,19 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def test_incorrect_answer(self):
# Given
user_answer = "def add(a,b):\n\treturn a - b"
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
}
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertFalse(result.get('success'))
@@ -75,19 +83,23 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def test_partial_incorrect_answer(self):
# Given
user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)"
- test_case_data = [{"test_case": 'assert(add(-1,2)==1)', 'weight': 1.0},
- {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 1.0},
- {"test_case": 'assert(add(1,2)==3)', 'weight': 2.0}
+ test_case_data = [{"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,2)==1)', 'weight': 1.0},
+ {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,-2)==-3)', 'weight': 1.0},
+ {"test_case_type": "standardtestcase", "test_case": 'assert(add(1,2)==3)', 'weight': 2.0}
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': True
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': True,
+ 'language': 'python'
+ },
+ 'test_case_data': test_case_data,
}
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertFalse(result.get('success'))
@@ -102,15 +114,19 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def test_infinite_loop(self):
# Given
user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
}
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertFalse(result.get('success'))
@@ -130,15 +146,19 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"SyntaxError",
"invalid syntax"
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
}
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
err = result.get("error").splitlines()
# Then
@@ -160,16 +180,19 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"IndentationError",
"indented block"
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
-
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
}
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
err = result.get("error").splitlines()
# Then
@@ -187,15 +210,20 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"name",
"defined"
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
+
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
+ }
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
err = result.get("error").splitlines()
# Then
@@ -214,15 +242,20 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"call",
"maximum recursion depth exceeded"
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
+
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
+ }
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
err = result.get("error").splitlines()
# Then
@@ -241,15 +274,20 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"TypeError",
"argument"
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
+
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
+ }
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
err = result.get("error").splitlines()
# Then
@@ -271,15 +309,20 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"invalid literal",
"base"
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
+
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
+ }
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
err = result.get("error").splitlines()
# Then
@@ -290,22 +333,27 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def test_file_based_assert(self):
# Given
- self.test_case_data = [{"test_case": "assert(ans()=='2')", "weight": 0.0}]
+ self.test_case_data = [{"test_case_type": "standardtestcase", "test_case": "assert(ans()=='2')", "weight": 0.0}]
self.file_paths = [('/tmp/test.txt', False)]
user_answer = dedent("""
def ans():
with open("test.txt") as f:
return f.read()[0]
""")
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
+
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data,
+ }
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertIn("Correct answer", result.get('error'))
@@ -316,7 +364,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
""" Tests the user answer with just an incorrect test case """
user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
- test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False',
+ test_case_data = [{"test_case_type": "standardtestcase",
+ "test_case": 's="abbb"\nasert palindrome(s)==False',
"weight": 0.0
}
]
@@ -328,15 +377,20 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"SyntaxError",
"invalid syntax"
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
+
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': test_case_data,
+ }
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
err = result.get("error").splitlines()
# Then
@@ -351,10 +405,12 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
first and then with an incorrect test case """
# Given
user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
- test_case_data = [{"test_case": 'assert(palindrome("abba")==True)',
+ test_case_data = [{"test_case_type": "standardtestcase",
+ "test_case": 'assert(palindrome("abba")==True)',
"weight": 0.0
},
- {"test_case": 's="abbb"\nassert palindrome(S)==False',
+ {"test_case_type": "standardtestcase",
+ "test_case": 's="abbb"\nassert palindrome(S)==False',
"weight": 0.0
}
]
@@ -363,15 +419,19 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"NameError",
"name 'S' is not defined"
]
- kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': test_case_data,
+ }
# When
- evaluator = PythonAssertionEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
err = result.get("error").splitlines()
# Then
@@ -380,16 +440,18 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
for msg in name_error_msg:
self.assertIn(msg, result.get("error"))
-
class PythonStdIOEvaluationTestCases(unittest.TestCase):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
self.file_paths = None
+ tmp_in_dir_path = tempfile.mkdtemp()
+ self.in_dir = tmp_in_dir_path
def test_correct_answer_integer(self):
# Given
- self.test_case_data = [{"expected_input": "1\n2",
+ self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ "expected_input": "1\n2",
"expected_output": "3",
"weight": 0.0
}]
@@ -399,14 +461,18 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
print(a+b)
"""
)
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'partial_grading': False
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data
+ }
# When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertTrue(result.get('success'))
@@ -414,7 +480,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_correct_answer_list(self):
# Given
- self.test_case_data = [{"expected_input": "1,2,3\n5,6,7",
+ self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ "expected_input": "1,2,3\n5,6,7",
"expected_output": "[1, 2, 3, 5, 6, 7]",
"weight": 0.0
}]
@@ -427,14 +494,19 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
print(a+b)
"""
)
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'partial_grading': False
- }
+
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data
+ }
# When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertTrue(result.get('success'))
@@ -442,7 +514,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_correct_answer_string(self):
# Given
- self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"),
+ self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ "expected_input": ("the quick brown fox jumps over the lazy dog\nthe"),
"expected_output": "2",
"weight": 0.0
}]
@@ -453,14 +526,19 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
print(a.count(b))
"""
)
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'partial_grading': False
- }
+
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data
+ }
# When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertTrue(result.get('success'))
@@ -468,7 +546,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_incorrect_answer_integer(self):
# Given
- self.test_case_data = [{"expected_input": "1\n2",
+ self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ "expected_input": "1\n2",
"expected_output": "3",
"weight": 0.0
}]
@@ -478,14 +557,18 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
print(a-b)
"""
)
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'partial_grading': False
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data
+ }
# When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertFalse(result.get('success'))
@@ -493,7 +576,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_file_based_answer(self):
# Given
- self.test_case_data = [{"expected_input": "",
+ self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ "expected_input": "",
"expected_output": "2",
"weight": 0.0
}]
@@ -505,15 +589,18 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
print(a[0])
"""
)
- kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths,
- 'partial_grading': False
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data
+ }
# When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertEqual(result.get('error'), "Correct answer\n")
@@ -521,7 +608,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_infinite_loop(self):
# Given
- test_case_data = [{"expected_input": "1\n2",
+ self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ "expected_input": "1\n2",
"expected_output": "3",
"weight": 0.0
}]
@@ -529,18 +617,24 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
"You probably have an infinite loop in"
" your code.").format(SERVER_TIMEOUT)
user_answer = "while True:\n\tpass"
- kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data,
- 'partial_grading': False
- }
+
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'
+ },
+ 'test_case_data': self.test_case_data
+ }
# When
- evaluator = PythonStdioEvaluator()
- result = evaluator.evaluate(**kwargs)
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
# Then
self.assertEqual(result.get('error'), timeout_msg)
self.assertFalse(result.get('success'))
+
if __name__ == '__main__':
unittest.main()
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index c30f652..0275ee8 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -4,48 +4,63 @@ import os
import shutil
import tempfile
-from yaksh import code_evaluator as evaluator
+from yaksh import grader as gd
+from yaksh.grader import Grader
from yaksh.scilab_code_evaluator import ScilabCodeEvaluator
-from yaksh.settings import SERVER_TIMEOUT
class ScilabEvaluationTestCases(unittest.TestCase):
def setUp(self):
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [{"test_case": "scilab_files/test_add.sce",
+ "test_case_type": "standardtestcase",
"weight": 0.0
}]
self.in_dir = tmp_in_dir_path
+ self.file_paths = None
+ gd.SERVER_TIMEOUT = 9
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop"
- " in your code.").format(SERVER_TIMEOUT)
- self.file_paths = None
+ " in your code.").format(gd.SERVER_TIMEOUT)
def tearDown(self):
+ gd.SERVER_TIMEOUT = 4
shutil.rmtree(self.in_dir)
def test_correct_answer(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a+b;\nendfunction")
- get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'scilab'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_error(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a+b;\ndis(\tendfunction")
- get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'scilab'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertTrue('error' in result.get("error"))
@@ -53,13 +68,19 @@ class ScilabEvaluationTestCases(unittest.TestCase):
def test_incorrect_answer(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a-b;\nendfunction")
- get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'scilab'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
self.assertIn("Message", result.get('error'))
@@ -68,13 +89,19 @@ class ScilabEvaluationTestCases(unittest.TestCase):
def test_infinite_loop(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a;\nwhile(1==1)\nend\nendfunction")
- get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
- 'partial_grading': True,
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'scilab'
+ },
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
- result = get_class.evaluate(**kwargs)
+ }
+
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
diff --git a/yaksh/forms.py b/yaksh/forms.py
index 1931fad..6fbaf5d 100644
--- a/yaksh/forms.py
+++ b/yaksh/forms.py
@@ -1,6 +1,6 @@
from django import forms
from yaksh.models import get_model_class, Profile, Quiz, Question, TestCase, Course,\
- QuestionPaper, StandardTestCase, StdioBasedTestCase
+ QuestionPaper, StandardTestCase, StdIOBasedTestCase
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
@@ -37,7 +37,7 @@ question_types = (
test_case_types = (
("standardtestcase", "Standard Testcase"),
- ("stdiobasedtestcase", "Stdio Based Testcase"),
+ ("stdiobasedtestcase", "StdIO Based Testcase"),
("mcqtestcase", "MCQ Testcase"),
)
diff --git a/yaksh/code_evaluator.py b/yaksh/grader.py
index afe18c3..ef349e0 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/grader.py
@@ -4,6 +4,7 @@ import sys
import pwd
import os
import stat
+import contextlib
from os.path import isdir, dirname, abspath, join, isfile, exists
import signal
import traceback
@@ -19,15 +20,27 @@ except ImportError:
# Local imports
from .settings import SERVER_TIMEOUT
+from .language_registry import create_evaluator_instance
+
MY_DIR = abspath(dirname(__file__))
+registry = None
# Raised when the code times-out.
# c.f. http://pguides.net/python/timeout-a-function
class TimeoutException(Exception):
pass
+@contextlib.contextmanager
+def change_dir(path):
+ cur_dir = os.getcwd()
+ os.chdir(path)
+ try:
+ yield
+ finally:
+ os.chdir(cur_dir)
+
def timeout_handler(signum, frame):
"""A handler for the ALARM signal."""
@@ -55,15 +68,16 @@ def delete_signal_handler():
return
-class CodeEvaluator(object):
+class Grader(object):
"""Tests the code obtained from Code Server"""
def __init__(self, in_dir=None):
msg = 'Code took more than %s seconds to run. You probably '\
'have an infinite loop in your code.' % SERVER_TIMEOUT
self.timeout_msg = msg
- self.in_dir = in_dir
+ self.in_dir = in_dir if in_dir else MY_DIR
+
- def evaluate(self, **kwargs):
+ def evaluate(self, kwargs): #language, test_case_type,
"""Evaluates given code with the test cases based on
given arguments in test_case_data.
@@ -86,10 +100,12 @@ class CodeEvaluator(object):
"""
self.setup()
- success, error, weight = self.safe_evaluate(**kwargs)
+ test_case_instances = self.get_evaluator_objects(kwargs)
+ with change_dir(self.in_dir):
+ success, error, mark = self.safe_evaluate(test_case_instances)
self.teardown()
- result = {'success': success, 'error': error, 'weight': weight}
+ result = {'success': success, 'error': error, 'weight': mark}
return result
# Private Protocol ##########
@@ -97,45 +113,55 @@ class CodeEvaluator(object):
if self.in_dir:
if not os.path.exists(self.in_dir):
os.makedirs(self.in_dir)
- self._change_dir(self.in_dir)
- def safe_evaluate(self, user_answer, partial_grading, test_case_data, file_paths=None):
+ def get_evaluator_objects(self, kwargs):
+ metadata = kwargs.get('metadata')
+ test_case_data = kwargs.get('test_case_data')
+ test_case_instances = []
+
+ for test_case in test_case_data:
+ test_case_instance = create_evaluator_instance(metadata, test_case)
+ test_case_instances.append(test_case_instance)
+
+ return test_case_instances
+
+
+ def safe_evaluate(self, test_case_instances):
"""
Handles code evaluation along with compilation, signal handling
and Exception handling
"""
-
# Add a new signal handler for the execution of this code.
prev_handler = create_signal_handler()
success = False
- test_case_success_status = [False] * len(test_case_data)
+ test_case_success_status = [False] * len(test_case_instances)
error = ""
weight = 0.0
# Do whatever testing needed.
try:
- for idx, test_case in enumerate(test_case_data):
+ # Run evaluator selection registry here
+ for idx, test_case_instance in enumerate(test_case_instances):
test_case_success = False
- self.compile_code(user_answer, file_paths, **test_case)
- test_case_success, err, test_case_weight = self.check_code(user_answer,
- file_paths,
- partial_grading,
- **test_case
- )
+ test_case_instance.compile_code()
+ test_case_success, err, mark_fraction = test_case_instance.check_code()
if test_case_success:
- weight += test_case_weight
+ weight += mark_fraction
error += err + "\n"
test_case_success_status[idx] = test_case_success
success = all(test_case_success_status)
+ for test_case_instance in test_case_instances:
+ test_case_instance.teardown()
+
except TimeoutException:
error = self.timeout_msg
except OSError:
msg = traceback.format_exc(limit=0)
error = "Error: {0}".format(msg)
- except Exception:
+ except Exception as e:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
if len(tb_list) > 2:
@@ -150,66 +176,3 @@ class CodeEvaluator(object):
def teardown(self):
# Cancel the signal
delete_signal_handler()
- self._change_dir(dirname(MY_DIR))
-
- def check_code(self):
- raise NotImplementedError("check_code method not implemented")
-
- def compile_code(self, user_answer, file_paths, **kwargs):
- pass
-
- def create_submit_code_file(self, file_name):
- """ Set the file path for code (`answer`)"""
- submit_path = abspath(file_name)
- if not exists(submit_path):
- submit_f = open(submit_path, 'w')
- submit_f.close()
-
- return submit_path
-
- def write_to_submit_code_file(self, file_path, user_answer):
- """ Write the code (`answer`) to a file"""
- submit_f = open(file_path, 'w')
- submit_f.write(user_answer.lstrip())
- submit_f.close()
-
- def _set_file_as_executable(self, fname):
- os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
- | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
- | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
-
- def _set_test_code_file_path(self, ref_path=None, test_case_path=None):
- if ref_path and not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- if test_case_path and not test_case_path.startswith('/'):
- test_case_path = join(MY_DIR, test_case_path)
-
- return ref_path, test_case_path
-
- def _run_command(self, cmd_args, *args, **kw):
- """Run a command in a subprocess while blocking, the process is killed
- if it takes more than 2 seconds to run. Return the Popen object, the
- stdout and stderr.
- """
- try:
- proc = subprocess.Popen(cmd_args, *args, **kw)
- stdout, stderr = proc.communicate()
- except TimeoutException:
- # Runaway code, so kill it.
- proc.kill()
- # Re-raise exception.
- raise
- return proc, stdout.decode('utf-8'), stderr.decode('utf-8')
-
- def _change_dir(self, in_dir):
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- def _remove_null_substitute_char(self, string):
- """Returns a string without any null and substitute characters"""
- stripped = ""
- for c in string:
- if ord(c) is not 26 and ord(c) is not 0:
- stripped = stripped + c
- return ''.join(stripped)
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index d87e6e3..5d3fd28 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -8,21 +8,28 @@ import subprocess
import importlib
# Local imports
-from .code_evaluator import CodeEvaluator
+from .base_evaluator import BaseEvaluator
from .file_utils import copy_files, delete_files
-class JavaCodeEvaluator(CodeEvaluator):
+class JavaCodeEvaluator(BaseEvaluator):
"""Tests the Java code obtained from Code Server"""
- def setup(self):
- super(JavaCodeEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('Test.java')
self.compiled_user_answer = None
self.compiled_test_code = None
self.user_output_path = ""
self.ref_output_path = ""
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
+
def teardown(self):
# Delete the created file.
os.remove(self.submit_code_path)
@@ -32,8 +39,6 @@ class JavaCodeEvaluator(CodeEvaluator):
os.remove(self.ref_output_path)
if self.files:
delete_files(self.files)
- super(JavaCodeEvaluator, self).teardown()
-
def get_commands(self, clean_ref_code_path, user_code_directory):
compile_command = 'javac {0}'.format(self.submit_code_path),
@@ -47,15 +52,16 @@ class JavaCodeEvaluator(CodeEvaluator):
output_path = "{0}{1}.class".format(directory, file_name)
return output_path
- def compile_code(self, user_answer, file_paths, test_case, weight):
+ def compile_code(self):
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
- ref_code_path = test_case
+ self.submit_code_path = self.create_submit_code_file('Test.java')
+ ref_code_path = self.test_case
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
msg = "No file at %s or Incorrect path" % clean_ref_code_path
return False, msg
@@ -65,7 +71,7 @@ class JavaCodeEvaluator(CodeEvaluator):
user_code_directory = os.getcwd() + '/'
self.write_to_submit_code_file(self.submit_code_path,
- user_answer
+ self.user_answer
)
ref_file_name = (clean_ref_code_path.split('/')[-1]).split('.')[0]
self.user_output_path = self.set_file_paths(user_code_directory,
@@ -82,6 +88,7 @@ class JavaCodeEvaluator(CodeEvaluator):
user_code_directory,
ref_file_name
)
+
self.compiled_user_answer = self._run_command(compile_command,
shell=True,
stdout=subprocess.PIPE,
@@ -96,7 +103,7 @@ class JavaCodeEvaluator(CodeEvaluator):
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
+ def check_code(self):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -117,7 +124,7 @@ class JavaCodeEvaluator(CodeEvaluator):
"""
success = False
- test_case_weight = 0.0
+ mark_fraction = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
@@ -136,7 +143,7 @@ class JavaCodeEvaluator(CodeEvaluator):
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
- test_case_weight = float(weight) if partial_grading else 0.0
+ mark_fraction = float(seelf.weight) if self.partial_grading else 0.0
else:
err = stdout + "\n" + stderr
else:
@@ -161,4 +168,5 @@ class JavaCodeEvaluator(CodeEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- return success, err, test_case_weight
+
+ return success, err, mark_fraction
diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py
index 88d4c88..a854847 100644
--- a/yaksh/java_stdio_evaluator.py
+++ b/yaksh/java_stdio_evaluator.py
@@ -9,19 +9,25 @@ from .stdio_evaluator import StdIOEvaluator
from .file_utils import copy_files, delete_files
-class JavaStdioEvaluator(StdIOEvaluator):
+class JavaStdIOEvaluator(StdIOEvaluator):
"""Evaluates Java StdIO based code"""
-
- def setup(self):
- super(JavaStdioEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('Test.java')
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.expected_input = test_case_data.get('expected_input')
+ self.expected_output = test_case_data.get('expected_output')
+ self.weight = test_case_data.get('weight')
def teardown(self):
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(JavaStdioEvaluator, self).teardown()
def set_file_paths(self, directory, file_name):
output_path = "{0}{1}.class".format(directory, file_name)
@@ -31,14 +37,15 @@ class JavaStdioEvaluator(StdIOEvaluator):
compile_command = 'javac {0}'.format(self.submit_code_path)
return compile_command
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
+ def compile_code(self):
+ self.submit_code_path = self.create_submit_code_file('Test.java')
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
- if file_paths:
- self.files = copy_files(file_paths)
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
user_code_directory = os.getcwd() + '/'
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
self.user_output_path = self.set_file_paths(user_code_directory,
'Test'
)
@@ -50,10 +57,9 @@ class JavaStdioEvaluator(StdIOEvaluator):
)
return self.compiled_user_answer
- def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weight):
+ def check_code(self):
success = False
- test_case_weight = 0.0
+ mark_fraction = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
if stdnt_stderr == '' or "error" not in stdnt_stderr:
@@ -63,9 +69,9 @@ class JavaStdioEvaluator(StdIOEvaluator):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
- success, err = self.evaluate_stdio(user_answer, proc,
- expected_input,
- expected_output
+ success, err = self.evaluate_stdio(self.user_answer, proc,
+ self.expected_input,
+ self.expected_output
)
os.remove(self.user_output_path)
else:
@@ -79,5 +85,5 @@ class JavaStdioEvaluator(StdIOEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- test_case_weight = float(weight) if partial_grading and success else 0.0
- return success, err, test_case_weight
+ mark_fraction = float(self.weight) if self.partial_grading and success else 0.0
+ return success, err, mark_fraction
diff --git a/yaksh/language_registry.py b/yaksh/language_registry.py
index 0e0140b..994e9ed 100644
--- a/yaksh/language_registry.py
+++ b/yaksh/language_registry.py
@@ -14,15 +14,11 @@ def get_registry():
registry = _LanguageRegistry()
return registry
-def unpack_json(json_data):
- data = json.loads(json_data)
- return data
-
-def create_evaluator_instance(language, test_case_type, json_data, in_dir):
+def create_evaluator_instance(metadata, test_case):
"""Create instance of relevant EvaluateCode class based on language"""
registry = get_registry()
- cls = registry.get_class(language, test_case_type)
- instance = cls(in_dir)
+ cls = registry.get_class(metadata.get('language'), test_case.get('test_case_type'))
+ instance = cls(metadata, test_case)
return instance
class _LanguageRegistry(object):
@@ -36,8 +32,8 @@ class _LanguageRegistry(object):
""" Get the code evaluator class for the given language """
if not self._register.get(language):
self._register[language] = code_evaluators.get(language)
-
test_case_register = self._register[language]
+
cls = test_case_register.get(test_case_type)
module_name, class_name = cls.rsplit(".", 1)
# load the module, will raise ImportError if module cannot be loaded
diff --git a/yaksh/models.py b/yaksh/models.py
index 7fae305..6e1744c 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -255,6 +255,7 @@ class Question(models.Model):
def consolidate_answer_data(self, user_answer):
question_data = {}
+ metadata = {}
test_case_data = []
test_cases = self.get_test_cases()
@@ -264,12 +265,15 @@ class Question(models.Model):
test_case_data.append(test_case_as_dict)
question_data['test_case_data'] = test_case_data
- question_data['user_answer'] = user_answer
- question_data['partial_grading'] = self.partial_grading
+ metadata['user_answer'] = user_answer
+ metadata['language'] = self.language
+ metadata['partial_grading'] = self.partial_grading
files = FileUpload.objects.filter(question=self)
if files:
- question_data['file_paths'] = [(file.file.path, file.extract)
+ metadata['file_paths'] = [(file.file.path, file.extract)
for file in files]
+ question_data['metadata'] = metadata
+
return json.dumps(question_data)
@@ -302,31 +306,40 @@ class Question(models.Model):
que, result = Question.objects.get_or_create(**question)
if file_names:
que._add_files_to_db(file_names, file_path)
- model_class = get_model_class(que.test_case_type)
for test_case in test_cases:
- model_class.objects.get_or_create(question=que, **test_case)
+ test_case_type = test_case.pop('test_case_type')
+ model_class = get_model_class(test_case_type)
+ new_test_case, obj_create_status = model_class.objects.get_or_create(question=que, **test_case)
+ new_test_case.type = test_case_type
+ new_test_case.save()
if files_list:
delete_files(files_list, file_path)
def get_test_cases(self, **kwargs):
- test_case_ctype = ContentType.objects.get(app_label="yaksh",
- model=self.test_case_type
- )
- test_cases = test_case_ctype.get_all_objects_for_this_type(
- question=self,
- **kwargs
- )
-
- return test_cases
+ tc_list = []
+ for tc in self.testcase_set.all():
+ test_case_type = tc.type
+ test_case_ctype = ContentType.objects.get(app_label="yaksh",
+ model=test_case_type
+ )
+ test_case = test_case_ctype.get_object_for_this_type(
+ question=self,
+ **kwargs
+ )
+ tc_list.append(test_case)
+
+ return tc_list
def get_test_case(self, **kwargs):
- test_case_ctype = ContentType.objects.get(app_label="yaksh",
- model=self.test_case_type
- )
- test_case = test_case_ctype.get_object_for_this_type(
- question=self,
- **kwargs
- )
+ for tc in self.testcase_set.all():
+ test_case_type = tc.type
+ test_case_ctype = ContentType.objects.get(app_label="yaksh",
+ model=self.test_case_type
+ )
+ test_case = test_case_ctype.get_object_for_this_type(
+ question=self,
+ **kwargs
+ )
return test_case
@@ -1137,7 +1150,8 @@ class StandardTestCase(TestCase):
weight = models.FloatField(default=1.0)
def get_field_value(self):
- return {"test_case": self.test_case,
+ return {"test_case_type": "standardtestcase",
+ "test_case": self.test_case,
"weight": self.weight}
def __str__(self):
@@ -1146,13 +1160,14 @@ class StandardTestCase(TestCase):
)
-class StdioBasedTestCase(TestCase):
+class StdIOBasedTestCase(TestCase):
expected_input = models.CharField(max_length=100, blank=True)
expected_output = models.CharField(max_length=100)
weight = models.IntegerField(default=1.0)
def get_field_value(self):
- return {"expected_output": self.expected_output,
+ return {"test_case_type": "stdiobasedtestcase",
+ "expected_output": self.expected_output,
"expected_input": self.expected_input,
"weight": self.weight}
@@ -1167,7 +1182,7 @@ class McqTestCase(TestCase):
correct = models.BooleanField(default=False)
def get_field_value(self):
- return {"options": self.options, "correct": self.correct}
+ return {"test_case_type": "mcqtestcase", "options": self.options, "correct": self.correct}
def __str__(self):
return u'Question: {0} | Correct: {1}'.format(self.question,
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index 986dbf2..4d44838 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -7,36 +7,44 @@ from os.path import join
import importlib
# Local imports
-from .code_evaluator import CodeEvaluator, TimeoutException
from .file_utils import copy_files, delete_files
+from .base_evaluator import BaseEvaluator
+from .grader import TimeoutException
-class PythonAssertionEvaluator(CodeEvaluator):
+class PythonAssertionEvaluator(BaseEvaluator):
"""Tests the Python code obtained from Code Server"""
- def setup(self):
- super(PythonAssertionEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.exec_scope = None
self.files = []
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
+
def teardown(self):
# Delete the created file.
if self.files:
delete_files(self.files)
- super(PythonAssertionEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, test_case, weight):
- if file_paths:
- self.files = copy_files(file_paths)
+ def compile_code(self):
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
if self.exec_scope:
return None
else:
- submitted = compile(user_answer, '<string>', mode='exec')
+ submitted = compile(self.user_answer, '<string>', mode='exec')
self.exec_scope = {}
exec(submitted, self.exec_scope)
return self.exec_scope
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
+ def check_code(self):
""" Function validates user answer by running an assertion based test case
against it
@@ -58,29 +66,26 @@ class PythonAssertionEvaluator(CodeEvaluator):
the required permissions are not given to the file(s).
"""
success = False
- test_case_weight = 0.0
+ mark_fraction = 0.0
try:
tb = None
- _tests = compile(test_case, '<string>', mode='exec')
+ _tests = compile(self.test_case, '<string>', mode='exec')
exec(_tests, self.exec_scope)
except AssertionError:
type, value, tb = sys.exc_info()
info = traceback.extract_tb(tb)
fname, lineno, func, text = info[-1]
- text = str(test_case).splitlines()[lineno-1]
+ text = str(self.test_case).splitlines()[lineno-1]
err = ("-----\nExpected Test Case:\n{0}\n"
- "Error - {1} {2} in: {3}\n-----").format(test_case,
- type.__name__,
- str(value), text
- )
+ "Error - {1} {2} in: {3}\n-----").format(self.test_case, type.__name__, str(value), text)
except TimeoutException:
raise
except Exception:
- msg = traceback.format_exc(limit=0)
- err = "Error in Test case: {0}".format(msg)
+ msg = traceback.format_exc(limit=0)
+ err = "Error in Test case: {0}".format(msg)
else:
success = True
- err = '-----\nCorrect answer\nTest Case: {0}\n-----'.format(test_case)
- test_case_weight = float(weight) if partial_grading else 0.0
+ err = '-----\nCorrect answer\nTest Case: {0}\n-----'.format(self.test_case)
+ mark_fraction = float(self.weight) if self.partial_grading else 0.0
del tb
- return success, err, test_case_weight
+ return success, err, mark_fraction
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index 1506685..da0c954 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -14,8 +14,8 @@ except ImportError:
from io import StringIO
# Local imports
-from .code_evaluator import CodeEvaluator
from .file_utils import copy_files, delete_files
+from .base_evaluator import BaseEvaluator
@contextmanager
@@ -28,27 +28,33 @@ def redirect_stdout():
sys.stdout = old_target # restore to the previous value
-class PythonStdioEvaluator(CodeEvaluator):
+class PythonStdIOEvaluator(BaseEvaluator):
"""Tests the Python code obtained from Code Server"""
-
- def setup(self):
- super(PythonStdioEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.expected_input = test_case_data.get('expected_input')
+ self.expected_output = test_case_data.get('expected_output')
+ self.weight = test_case_data.get('weight')
+
def teardown(self):
# Delete the created file.
if self.files:
delete_files(self.files)
- super(PythonStdioEvaluator, self).teardown()
-
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
- if file_paths:
- self.files = copy_files(file_paths)
- submitted = compile(user_answer, '<string>', mode='exec')
- if expected_input:
+ def compile_code(self):
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
+ submitted = compile(self.user_answer, '<string>', mode='exec')
+ if self.expected_input:
input_buffer = StringIO()
- input_buffer.write(expected_input)
+ input_buffer.write(self.expected_input)
input_buffer.seek(0)
sys.stdin = input_buffer
with redirect_stdout() as output_buffer:
@@ -57,16 +63,15 @@ class PythonStdioEvaluator(CodeEvaluator):
self.output_value = output_buffer.getvalue().rstrip("\n")
return self.output_value
- def check_code(self, user_answer, file_paths, partial_grading, expected_input,
- expected_output, weight):
+ def check_code(self):
success = False
- test_case_weight = 0.0
+ mark_fraction = 0.0
tb = None
- if self.output_value == expected_output:
+ if self.output_value == self.expected_output:
success = True
err = "Correct answer"
- test_case_weight = weight
+ mark_fraction = self.weight
else:
success = False
err = dedent("""
@@ -74,10 +79,10 @@ class PythonStdioEvaluator(CodeEvaluator):
Given input - {0}
Expected output - {1}
Your output - {2}
- """.format(expected_input,
- expected_output,
+ """.format(self.expected_input,
+ self.expected_output,
self.output_value
)
)
del tb
- return success, err, test_case_weight
+ return success, err, mark_fraction
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index 3c2d44c..cc3c401 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -8,38 +8,43 @@ import re
import importlib
# Local imports
-from .code_evaluator import CodeEvaluator
+from .base_evaluator import BaseEvaluator
from .file_utils import copy_files, delete_files
-class ScilabCodeEvaluator(CodeEvaluator):
+class ScilabCodeEvaluator(BaseEvaluator):
"""Tests the Scilab code obtained from Code Server"""
- def setup(self):
- super(ScilabCodeEvaluator, self).setup()
+ def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = \
- self.create_submit_code_file('function.sci')
+
+ # Set metadata values
+ self.user_answer = metadata.get('user_answer')
+ self.file_paths = metadata.get('file_paths')
+ self.partial_grading = metadata.get('partial_grading')
+
+ # Set test case data values
+ self.test_case = test_case_data.get('test_case')
+ self.weight = test_case_data.get('weight')
def teardown(self):
# Delete the created file.
os.remove(self.submit_code_path)
if self.files:
delete_files(self.files)
- super(ScilabCodeEvaluator, self).teardown()
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
- if file_paths:
- self.files = copy_files(file_paths)
- ref_code_path = test_case
+ def check_code(self):
+ self.submit_code_path = self.create_submit_code_file('function.sci')
+ if self.file_paths:
+ self.files = copy_files(self.file_paths)
+ ref_code_path = self.test_case
clean_ref_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
- user_answer, terminate_commands = \
- self._remove_scilab_exit(user_answer.lstrip())
+ self.user_answer, terminate_commands = \
+ self._remove_scilab_exit(self.user_answer.lstrip())
success = False
test_case_weight = 0.0
-
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path, self.user_answer)
# Throw message if there are commmands that terminates scilab
add_err = ""
if terminate_commands:
@@ -50,7 +55,7 @@ class ScilabCodeEvaluator(CodeEvaluator):
cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(
clean_ref_path
)
- cmd += ' | timeout 8 scilab-cli -nb'
+ cmd += ' | scilab-cli -nb'
ret = self._run_command(cmd,
shell=True,
stdout=subprocess.PIPE,
@@ -65,11 +70,12 @@ class ScilabCodeEvaluator(CodeEvaluator):
stdout = self._strip_output(stdout)
if proc.returncode == 5:
success, err = True, "Correct answer"
- test_case_weight = float(weight) if partial_grading else 0.0
+ test_case_weight = float(self.weight) if self.partial_grading else 0.0
else:
err = add_err + stdout
else:
err = add_err + stderr
+
return success, err, test_case_weight
def _remove_scilab_exit(self, string):
diff --git a/yaksh/settings.py b/yaksh/settings.py
index 6383999..0e432cf 100644
--- a/yaksh/settings.py
+++ b/yaksh/settings.py
@@ -21,20 +21,18 @@ URL_ROOT = ''
code_evaluators = {
"python": {"standardtestcase": "yaksh.python_assertion_evaluator.PythonAssertionEvaluator",
- "stdiobasedtestcase": "yaksh.python_stdio_evaluator.PythonStdioEvaluator"
+ "stdiobasedtestcase": "yaksh.python_stdio_evaluator.PythonStdIOEvaluator"
},
"c": {"standardtestcase": "yaksh.cpp_code_evaluator.CppCodeEvaluator",
- "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdioEvaluator"
+ "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdIOEvaluator"
},
"cpp": {"standardtestcase": "yaksh.cpp_code_evaluator.CppCodeEvaluator",
- "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdioEvaluator"
+ "stdiobasedtestcase": "yaksh.cpp_stdio_evaluator.CppStdIOEvaluator"
},
"java": {"standardtestcase": "yaksh.java_code_evaluator.JavaCodeEvaluator",
- "stdiobasedtestcase": "yaksh.java_stdio_evaluator.JavaStdioEvaluator"},
-
+ "stdiobasedtestcase": "yaksh.java_stdio_evaluator.JavaStdIOEvaluator"},
"bash": {"standardtestcase": "yaksh.bash_code_evaluator.BashCodeEvaluator",
- "stdiobasedtestcase": "yaksh.bash_stdio_evaluator.BashStdioEvaluator"
+ "stdiobasedtestcase": "yaksh.bash_stdio_evaluator.BashStdIOEvaluator"
},
-
"scilab": {"standardtestcase": "yaksh.scilab_code_evaluator.ScilabCodeEvaluator"},
}
diff --git a/yaksh/stdio_evaluator.py b/yaksh/stdio_evaluator.py
index 7530b96..106facd 100644
--- a/yaksh/stdio_evaluator.py
+++ b/yaksh/stdio_evaluator.py
@@ -1,18 +1,10 @@
from __future__ import unicode_literals
# Local imports
-from .code_evaluator import CodeEvaluator
+from .base_evaluator import BaseEvaluator
-class StdIOEvaluator(CodeEvaluator):
- def setup(self):
- super(StdIOEvaluator, self).setup()
- pass
-
- def teardown(self):
- super(StdIOEvaluator, self).teardown()
- pass
-
+class StdIOEvaluator(BaseEvaluator):
def evaluate_stdio(self, user_answer, proc, expected_input, expected_output):
success = False
ip = expected_input.replace(",", " ")
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index 522da89..317c832 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -1,7 +1,7 @@
import unittest
from yaksh.models import User, Profile, Question, Quiz, QuestionPaper,\
QuestionSet, AnswerPaper, Answer, Course, StandardTestCase,\
- StdioBasedTestCase, FileUpload, McqTestCase
+ StdIOBasedTestCase, FileUpload, McqTestCase
import json
from datetime import datetime, timedelta
from django.utils import timezone
@@ -134,21 +134,23 @@ class QuestionTestCases(unittest.TestCase):
self.question1.tags.add('python', 'function')
self.assertion_testcase = StandardTestCase(question=self.question1,
- test_case='assert myfunc(12, 13) == 15'
+ test_case='assert myfunc(12, 13) == 15',
+ type='standardtestcase'
)
self.upload_test_case = StandardTestCase(question=self.question2,
- test_case='assert fact(3) == 6'
+ test_case='assert fact(3) == 6',
+ type='standardtestcase'
)
self.upload_test_case.save()
self.user_answer = "demo_answer"
self.test_case_upload_data = [{"test_case": "assert fact(3)==6",
+ "test_case_type": "standardtestcase",
"weight": 1.0
}]
questions_data = [{"snippet": "def fact()", "active": True,
"points": 1.0,
"description": "factorial of a no",
"language": "Python", "type": "Code",
- "test_case_type": "standardtestcase",
"testcase": self.test_case_upload_data,
"files": [[file1, 0]],
"summary": "Json Demo"}]
@@ -213,7 +215,6 @@ class QuestionTestCases(unittest.TestCase):
self.assertEqual(question_data.points, 1.0)
self.assertTrue(question_data.active)
self.assertEqual(question_data.snippet, 'def fact()')
- self.assertEqual(question_data.test_case_type, 'standardtestcase')
self.assertEqual(os.path.basename(file.file.path), "test.txt")
self.assertEqual([case.get_field_value() for case in test_case], self.test_case_upload_data)
@@ -511,19 +512,24 @@ class AnswerPaperTestCases(unittest.TestCase):
self.question3.save()
self.assertion_testcase = StandardTestCase(
question=self.question1,
- test_case='assert add(1, 3) == 4'
+ test_case='assert add(1, 3) == 4',
+ type = 'standardtestcase'
+
)
self.assertion_testcase.save()
self.mcq_based_testcase = McqTestCase(
options = 'a',
question=self.question2,
- correct = True
+ correct = True,
+ type = 'mcqtestcase'
+
)
self.mcq_based_testcase.save()
self.mcc_based_testcase = McqTestCase(
question=self.question3,
options = 'a',
- correct = True
+ correct = True,
+ type = 'mcqtestcase'
)
self.mcc_based_testcase.save()
@@ -870,21 +876,26 @@ class TestCaseTestCases(unittest.TestCase):
self.question2.save()
self.assertion_testcase = StandardTestCase(
question=self.question1,
- test_case='assert myfunc(12, 13) == 15'
+ test_case='assert myfunc(12, 13) == 15',
+ type='standardtestcase'
)
- self.stdout_based_testcase = StdioBasedTestCase(
+ self.stdout_based_testcase = StdIOBasedTestCase(
question=self.question2,
- expected_output='Hello World'
+ expected_output='Hello World',
+ type='standardtestcase'
+
)
self.assertion_testcase.save()
self.stdout_based_testcase.save()
- answer_data = {"user_answer": "demo_answer",
- "test_case_data": [
- {"test_case": "assert myfunc(12, 13) == 15",
- "weight": 1.0
- }
- ]
- }
+ answer_data = {'metadata': { 'user_answer': 'demo_answer',
+ 'language': 'python',
+ 'partial_grading': False
+ },
+ 'test_case_data': [{'test_case': 'assert myfunc(12, 13) == 15',
+ 'test_case_type': 'standardtestcase',
+ 'weight': 1.0
+ }]
+ }
self.answer_data_json = json.dumps(answer_data)
def test_assertion_testcase(self):
@@ -907,5 +918,5 @@ class TestCaseTestCases(unittest.TestCase):
)
actual_data = json.loads(result)
exp_data = json.loads(self.answer_data_json)
- self.assertEqual(actual_data['user_answer'], exp_data['user_answer'])
+ self.assertEqual(actual_data['metadata']['user_answer'], exp_data['metadata']['user_answer'])
self.assertEqual(actual_data['test_case_data'], exp_data['test_case_data'])
diff --git a/yaksh/test_views.py b/yaksh/test_views.py
index 30ebcaa..2419591 100644
--- a/yaksh/test_views.py
+++ b/yaksh/test_views.py
@@ -9,7 +9,7 @@ from django.utils import timezone
from yaksh.models import User, Profile, Question, Quiz, QuestionPaper,\
QuestionSet, AnswerPaper, Answer, Course, StandardTestCase,\
- StdioBasedTestCase, has_profile
+ StdIOBasedTestCase, has_profile
class TestProfile(TestCase):
diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py
index 7efd20b..d46c9dd 100644
--- a/yaksh/tests/test_code_server.py
+++ b/yaksh/tests/test_code_server.py
@@ -37,12 +37,15 @@ class TestCodeServer(unittest.TestCase):
def test_infinite_loop(self):
# Given
- testdata = {'user_answer': 'while True: pass',
- 'partial_grading': False,
+ testdata = {'metadata': {'user_answer': 'while True: pass',
+ 'language': 'python',
+ 'partial_grading': False
+ },
'test_case_data': [{'test_case':'assert 1==2',
+ 'test_case_type': 'standardtestcase',
'weight': 0.0
- }
- ]}
+ }]
+ }
# When
result = self.code_server.run_code(
@@ -56,12 +59,15 @@ class TestCodeServer(unittest.TestCase):
def test_correct_answer(self):
# Given
- testdata = {'user_answer': 'def f(): return 1',
- 'partial_grading': False,
+ testdata = {'metadata': { 'user_answer': 'def f(): return 1',
+ 'language': 'python',
+ 'partial_grading': False
+ },
'test_case_data': [{'test_case':'assert f() == 1',
+ 'test_case_type': 'standardtestcase',
'weight': 0.0
- }
- ]}
+ }]
+ }
# When
result = self.code_server.run_code(
@@ -75,12 +81,15 @@ class TestCodeServer(unittest.TestCase):
def test_wrong_answer(self):
# Given
- testdata = {'user_answer': 'def f(): return 1',
- 'partial_grading': False,
+ testdata = {'metadata': { 'user_answer': 'def f(): return 1',
+ 'language': 'python',
+ 'partial_grading': False
+ },
'test_case_data': [{'test_case':'assert f() == 2',
+ 'test_case_type': 'standardtestcase',
'weight': 0.0
- }
- ]}
+ }]
+ }
# When
result = self.code_server.run_code(
@@ -98,12 +107,15 @@ class TestCodeServer(unittest.TestCase):
def run_code():
"""Run an infinite loop."""
- testdata = {'user_answer': 'while True: pass',
- 'partial_grading': False,
- 'test_case_data': [{'test_case':'assert 1==2',
- 'weight': 0.0
- }
- ]}
+ testdata = {'metadata': { 'user_answer': 'while True: pass',
+ 'language': 'python',
+ 'partial_grading': False
+ },
+ 'test_case_data': [{'test_case':'assert 1==2',
+ 'test_case_type': 'standardtestcase',
+ 'weight': 0.0
+ }]
+ }
result = self.code_server.run_code(
'python', 'standardtestcase', json.dumps(testdata), ''
)
diff --git a/yaksh/views.py b/yaksh/views.py
index 0d77426..89274df 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -25,7 +25,7 @@ import six
# Local imports.
from yaksh.models import get_model_class, Quiz, Question, QuestionPaper, QuestionSet, Course
from yaksh.models import Profile, Answer, AnswerPaper, User, TestCase, FileUpload,\
- has_profile, StandardTestCase, McqTestCase, StdioBasedTestCase, HookTestCase
+ has_profile, StandardTestCase, McqTestCase, StdIOBasedTestCase, HookTestCase
from yaksh.forms import UserRegisterForm, UserLoginForm, QuizForm,\
QuestionForm, RandomQuestionForm,\
QuestionFilterForm, CourseForm, ProfileForm, UploadFileForm,\