summaryrefslogtreecommitdiff
path: root/yaksh
diff options
context:
space:
mode:
authorprathamesh2016-11-17 13:58:51 +0530
committerprathamesh2016-11-17 13:58:51 +0530
commit93b16753ded5f3ef07187e9413c94bd52f2e77f7 (patch)
treef137a0fd2a3036c5a6e87d323d8d14ee3d480c0b /yaksh
parent6ac9e99453543e6a5533f5ab77c7db3c08cc0cf9 (diff)
parentaa6ed71496c4a36faed9b42104c8426345bdc2e3 (diff)
downloadonline_test-93b16753ded5f3ef07187e9413c94bd52f2e77f7.tar.gz
online_test-93b16753ded5f3ef07187e9413c94bd52f2e77f7.tar.bz2
online_test-93b16753ded5f3ef07187e9413c94bd52f2e77f7.zip
Merge branch 'master' of https://github.com/FOSSEE/online_test into student-interface
Resolved Conflicts: yaksh/templates/yaksh/question.html Added testarea back for now.
Diffstat (limited to 'yaksh')
-rw-r--r--yaksh/bash_code_evaluator.py40
-rw-r--r--yaksh/bash_stdio_evaluator.py12
-rw-r--r--yaksh/code_evaluator.py38
-rw-r--r--yaksh/cpp_code_evaluator.py11
-rw-r--r--yaksh/cpp_stdio_evaluator.py13
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py69
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py145
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py103
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py140
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py18
-rw-r--r--yaksh/java_code_evaluator.py11
-rw-r--r--yaksh/java_stdio_evaluator.py11
-rw-r--r--yaksh/models.py44
-rw-r--r--yaksh/python_assertion_evaluator.py35
-rw-r--r--yaksh/python_stdio_evaluator.py24
-rw-r--r--yaksh/scilab_code_evaluator.py9
-rw-r--r--yaksh/templates/yaksh/add_question.html1
-rw-r--r--yaksh/templates/yaksh/grade_user.html2
-rw-r--r--yaksh/templates/yaksh/question.html2
-rw-r--r--yaksh/templates/yaksh/user_data.html2
-rw-r--r--yaksh/test_models.py8
-rw-r--r--yaksh/tests/test_code_server.py28
-rw-r--r--yaksh/views.py7
-rw-r--r--yaksh/xmlrpc_clients.py11
24 files changed, 531 insertions, 253 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index e4b961c..b5974d2 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -17,6 +17,7 @@ class BashCodeEvaluator(CodeEvaluator):
# Private Protocol ##########
def setup(self):
super(BashCodeEvaluator, self).setup()
+ self.files = []
self.submit_code_path = self.create_submit_code_file('submit.sh')
self._set_file_as_executable(self.submit_code_path)
@@ -27,7 +28,7 @@ class BashCodeEvaluator(CodeEvaluator):
delete_files(self.files)
super(BashCodeEvaluator, self).teardown()
- def check_code(self, user_answer, file_paths, test_case):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
""" Function validates student script using instructor script as
reference. Test cases can optionally be provided. The first argument
ref_path, is the path to instructor script, it is assumed to
@@ -39,41 +40,44 @@ class BashCodeEvaluator(CodeEvaluator):
Returns
--------
+ success - Boolean, indicating if code was executed successfully, correctly
+ weight - Float, indicating total weight of all successful test cases
+ error - String, error message if success is false
- returns (True, "Correct answer") : If the student script passes all
+ returns (True, "Correct answer", 1.0) : If the student script passes all
test cases/have same output, when compared to the instructor script
- returns (False, error_msg): If the student script fails a single
+ returns (False, error_msg, 0.0): If the student script fails a single
test/have dissimilar output, when compared to the instructor script.
- Returns (False, error_msg): If mandatory arguments are not files or if
+ Returns (False, error_msg, 0.0): If mandatory arguments are not files or if
the required permissions are not given to the file(s).
-
"""
ref_code_path = test_case
+ success = False
+ test_case_weight = 0.0
+
get_ref_path, get_test_case_path = ref_code_path.strip().split(',')
get_ref_path = get_ref_path.strip()
get_test_case_path = get_test_case_path.strip()
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(get_ref_path, get_test_case_path)
- self.files = []
if file_paths:
self.files = copy_files(file_paths)
if not isfile(clean_ref_code_path):
msg = "No file at %s or Incorrect path" % clean_ref_code_path
- return False, msg
+ return False, msg, 0.0
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
- return False, msg
+ return False, msg, 0.0
if not os.access(clean_ref_code_path, os.X_OK):
msg = "Script %s is not executable" % clean_ref_code_path
- return False, msg
+ return False, msg, 0.0
if not os.access(self.submit_code_path, os.X_OK):
msg = "Script %s is not executable" % self.submit_code_path
- return False, msg
+ return False, msg, 0.0
- success = False
user_answer = user_answer.replace("\r", "")
self.write_to_submit_code_file(self.submit_code_path, user_answer)
@@ -91,19 +95,20 @@ class BashCodeEvaluator(CodeEvaluator):
)
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
- return True, "Correct answer"
+ test_case_weight = float(weight) if partial_grading else 0.0
+ return True, "Correct answer", test_case_weight
else:
err = "Error: expected %s, got %s" % (inst_stderr,
stdnt_stderr
)
- return False, err
+ return False, err, 0.0
else:
if not isfile(clean_test_case_path):
msg = "No test case at %s" % clean_test_case_path
- return False, msg
+ return False, msg, 0.0
if not os.access(clean_ref_code_path, os.R_OK):
msg = "Test script %s, not readable" % clean_test_case_path
- return False, msg
+ return False, msg, 0.0
# valid_answer is True, so that we can stop once a test case fails
valid_answer = True
# loop_count has to be greater than or equal to one.
@@ -133,10 +138,11 @@ class BashCodeEvaluator(CodeEvaluator):
proc, stdnt_stdout, stdnt_stderr = ret
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
- return True, "Correct answer"
+ test_case_weight = float(weight) if partial_grading else 0.0
+ return True, "Correct answer", test_case_weight
else:
err = ("Error:expected"
" {0}, got {1}").format(inst_stdout+inst_stderr,
stdnt_stdout+stdnt_stderr
)
- return False, err
+ return False, err, 0.0
diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py
index a7ea1a4..1dd9fd5 100644
--- a/yaksh/bash_stdio_evaluator.py
+++ b/yaksh/bash_stdio_evaluator.py
@@ -14,6 +14,7 @@ class BashStdioEvaluator(StdIOEvaluator):
def setup(self):
super(BashStdioEvaluator, self).setup()
+ self.files = []
self.submit_code_path = self.create_submit_code_file('Test.sh')
def teardown(self):
@@ -22,8 +23,7 @@ class BashStdioEvaluator(StdIOEvaluator):
delete_files(self.files)
super(BashStdioEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, expected_input, expected_output):
- self.files = []
+ def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
if file_paths:
self.files = copy_files(file_paths)
if not isfile(self.submit_code_path):
@@ -33,8 +33,11 @@ class BashStdioEvaluator(StdIOEvaluator):
user_answer = user_answer.replace("\r", "")
self.write_to_submit_code_file(self.submit_code_path, user_answer)
- def check_code(self, user_answer, file_paths, expected_input, expected_output):
+ def check_code(self, user_answer, file_paths, partial_grading,
+ expected_input, expected_output, weight):
success = False
+ test_case_weight = 0.0
+
expected_input = str(expected_input).replace('\r', '')
proc = subprocess.Popen("bash ./Test.sh",
shell=True,
@@ -46,4 +49,5 @@ class BashStdioEvaluator(StdIOEvaluator):
expected_input,
expected_output
)
- return success, err
+ test_case_weight = float(weight) if partial_grading and success else 0.0
+ return success, err, test_case_weight
diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py
index 79f616d..afe18c3 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/code_evaluator.py
@@ -82,14 +82,14 @@ class CodeEvaluator(object):
Returns
-------
- A tuple: (success, error message).
+ A tuple: (success, error message, weight).
"""
self.setup()
- success, err = self.safe_evaluate(**kwargs)
+ success, error, weight = self.safe_evaluate(**kwargs)
self.teardown()
- result = {'success': success, 'error': err}
+ result = {'success': success, 'error': error, 'weight': weight}
return result
# Private Protocol ##########
@@ -99,7 +99,7 @@ class CodeEvaluator(object):
os.makedirs(self.in_dir)
self._change_dir(self.in_dir)
- def safe_evaluate(self, user_answer, test_case_data, file_paths=None):
+ def safe_evaluate(self, user_answer, partial_grading, test_case_data, file_paths=None):
"""
Handles code evaluation along with compilation, signal handling
and Exception handling
@@ -108,32 +108,44 @@ class CodeEvaluator(object):
# Add a new signal handler for the execution of this code.
prev_handler = create_signal_handler()
success = False
+ test_case_success_status = [False] * len(test_case_data)
+ error = ""
+ weight = 0.0
# Do whatever testing needed.
try:
- for test_case in test_case_data:
- success = False
+ for idx, test_case in enumerate(test_case_data):
+ test_case_success = False
self.compile_code(user_answer, file_paths, **test_case)
- success, err = self.check_code(user_answer, file_paths, **test_case)
- if not success:
- break
+ test_case_success, err, test_case_weight = self.check_code(user_answer,
+ file_paths,
+ partial_grading,
+ **test_case
+ )
+ if test_case_success:
+ weight += test_case_weight
+
+ error += err + "\n"
+ test_case_success_status[idx] = test_case_success
+
+ success = all(test_case_success_status)
except TimeoutException:
- err = self.timeout_msg
+ error = self.timeout_msg
except OSError:
msg = traceback.format_exc(limit=0)
- err = "Error: {0}".format(msg)
+ error = "Error: {0}".format(msg)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
if len(tb_list) > 2:
del tb_list[1:3]
- err = "Error: {0}".format("".join(tb_list))
+ error = "Error: {0}".format("".join(tb_list))
finally:
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
- return success, err
+ return success, error, weight
def teardown(self):
# Cancel the signal
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index 5380dea..716a522 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -16,6 +16,7 @@ class CppCodeEvaluator(CodeEvaluator):
"""Tests the C code obtained from Code Server"""
def setup(self):
super(CppCodeEvaluator, self).setup()
+ self.files = []
self.submit_code_path = self.create_submit_code_file('submit.c')
self.compiled_user_answer = None
self.compiled_test_code = None
@@ -49,8 +50,7 @@ class CppCodeEvaluator(CodeEvaluator):
ref_output_path)
return compile_command, compile_main
- def compile_code(self, user_answer, file_paths, test_case):
- self.files = []
+ def compile_code(self, user_answer, file_paths, test_case, weight):
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
@@ -89,7 +89,7 @@ class CppCodeEvaluator(CodeEvaluator):
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, test_case):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -109,6 +109,8 @@ class CppCodeEvaluator(CodeEvaluator):
if the required permissions are not given to the file(s).
"""
success = False
+ test_case_weight = 0.0
+
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
@@ -127,6 +129,7 @@ class CppCodeEvaluator(CodeEvaluator):
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
+ test_case_weight = float(weight) if partial_grading else 0.0
else:
err = "{0} \n {1}".format(stdout, stderr)
else:
@@ -152,4 +155,4 @@ class CppCodeEvaluator(CodeEvaluator):
except:
err = "{0} \n {1}".format(err, stdnt_stderr)
- return success, err
+ return success, err, test_case_weight
diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py
index 9d2b969..00fad92 100644
--- a/yaksh/cpp_stdio_evaluator.py
+++ b/yaksh/cpp_stdio_evaluator.py
@@ -14,6 +14,7 @@ class CppStdioEvaluator(StdIOEvaluator):
def setup(self):
super(CppStdioEvaluator, self).setup()
+ self.files = []
self.submit_code_path = self.create_submit_code_file('main.c')
def teardown(self):
@@ -34,9 +35,7 @@ class CppStdioEvaluator(StdIOEvaluator):
ref_output_path)
return compile_command, compile_main
- def compile_code(self, user_answer, file_paths, expected_input, expected_output):
-
- self.files = []
+ def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
if file_paths:
self.files = copy_files(file_paths)
if not isfile(self.submit_code_path):
@@ -62,8 +61,11 @@ class CppStdioEvaluator(StdIOEvaluator):
)
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, expected_input, expected_output):
+ def check_code(self, user_answer, file_paths, partial_grading,
+ expected_input, expected_output, weight):
success = False
+ test_case_weight = 0.0
+
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
if stdnt_stderr == '':
@@ -104,4 +106,5 @@ class CppStdioEvaluator(StdIOEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- return success, err
+ test_case_weight = float(weight) if partial_grading and success else 0.0
+ return success, err, test_case_weight
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index 66ade19..99e5122 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -13,12 +13,12 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
def setUp(self):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
- tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [
- {"test_case": "bash_files/sample.sh,bash_files/sample.args"}
+ {"test_case": "bash_files/sample.sh,bash_files/sample.args",
+ "weight": 0.0
+ }
]
- tmp_in_dir_path = tempfile.mkdtemp()
- self.in_dir = tmp_in_dir_path
+ self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in your"
" code.").format(SERVER_TIMEOUT)
@@ -33,19 +33,21 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
" && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
)
get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
result = get_class.evaluate(**kwargs)
self.assertTrue(result.get('success'))
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
def test_error(self):
user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] "
"&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))")
get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -57,7 +59,8 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
user_answer = ("#!/bin/bash\nwhile [ 1 ] ;"
" do echo "" > /dev/null ; done")
get_class = BashCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -68,20 +71,24 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
def test_file_based_assert(self):
self.file_paths = [('/tmp/test.txt', False)]
self.test_case_data = [
- {"test_case": "bash_files/sample1.sh,bash_files/sample1.args"}
+ {"test_case": "bash_files/sample1.sh,bash_files/sample1.args",
+ "weight": 0.0
+ }
]
user_answer = ("#!/bin/bash\ncat $1")
get_class = BashCodeEvaluator()
kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
result = get_class.evaluate(**kwargs)
self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
+ self.assertEqual(result.get("error"), "Correct answer\n")
class BashStdioEvaluationTestCases(unittest.TestCase):
def setUp(self):
+ self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in your"
" code.").format(SERVER_TIMEOUT)
@@ -93,13 +100,17 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
echo -n `expr $A + $B`
"""
)
- test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}]
+ test_case_data = [{'expected_output': '11',
+ 'expected_input': '5\n6',
+ 'weight': 0.0
+ }]
get_class = BashStdioEvaluator()
kwargs = {"user_answer": user_answer,
- "test_case_data": test_case_data
- }
+ "partial_grading": True,
+ "test_case_data": test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
@@ -112,14 +123,16 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
"""
)
test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n',
- 'expected_input': '1,2,3\n4,5,6\n7,8,9'
+ 'expected_input': '1,2,3\n4,5,6\n7,8,9',
+ 'weight': 0.0
}]
get_class = BashStdioEvaluator()
kwargs = {"user_answer": user_answer,
- "test_case_data": test_case_data
- }
+ "partial_grading": True,
+ "test_case_data": test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
@@ -129,11 +142,15 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
echo -n `expr $A - $B`
"""
)
- test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}]
+ test_case_data = [{'expected_output': '11',
+ 'expected_input': '5\n6',
+ 'weight': 0.0
+ }]
get_class = BashStdioEvaluator()
kwargs = {"user_answer": user_answer,
- "test_case_data": test_case_data
- }
+ "partial_grading": True,
+ "test_case_data": test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertIn("Incorrect", result.get('error'))
self.assertFalse(result.get('success'))
@@ -146,14 +163,16 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
"""
)
test_case_data = [{'expected_output': '10',
- 'expected_input': ''
+ 'expected_input': '',
+ 'weight': 0.0
}]
get_class = BashStdioEvaluator()
kwargs = {"user_answer": user_answer,
- "test_case_data": test_case_data
- }
+ "partial_grading": True,
+ "test_case_data": test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
if __name__ == '__main__':
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index c990436..d5193d3 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -14,7 +14,9 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
with open('/tmp/test.txt', 'wb') as f:
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
- self.test_case_data = [{"test_case": "c_cpp_files/main.cpp"}]
+ self.test_case_data = [{"test_case": "c_cpp_files/main.cpp",
+ "weight": 0.0
+ }]
self.in_dir = tmp_in_dir_path
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in your"
@@ -29,17 +31,19 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
user_answer = "int add(int a, int b)\n{return a+b;}"
get_class = CppCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
+ 'partial_grading': False,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
result = get_class.evaluate(**kwargs)
self.assertTrue(result.get('success'))
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
def test_incorrect_answer(self):
user_answer = "int add(int a, int b)\n{return a-b;}"
get_class = CppCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
+ 'partial_grading': False,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -52,7 +56,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
def test_compilation_error(self):
user_answer = "int add(int a, int b)\n{return a+b}"
get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': False,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -63,7 +68,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
def test_infinite_loop(self):
user_answer = "int add(int a, int b)\n{while(1>0){}}"
get_class = CppCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': False,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -73,7 +79,9 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
def test_file_based_assert(self):
self.file_paths = [('/tmp/test.txt', False)]
- self.test_case_data = [{"test_case": "c_cpp_files/file_data.c"}]
+ self.test_case_data = [{"test_case": "c_cpp_files/file_data.c",
+ "weight": 0.0
+ }]
user_answer = dedent("""
#include<stdio.h>
char ans()
@@ -88,18 +96,21 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
""")
get_class = CppCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data,
+ 'file_paths': self.file_paths
+ }
result = get_class.evaluate(**kwargs)
self.assertTrue(result.get('success'))
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
class CppStdioEvaluationTestCases(unittest.TestCase):
-
def setUp(self):
- self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}]
- self.in_dir = os.getcwd()
+ self.test_case_data = [{'expected_output': '11',
+ 'expected_input': '5\n6',
+ 'weight': 0.0
+ }]
+ self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
" your code.").format(SERVER_TIMEOUT)
@@ -114,15 +125,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
self.test_case_data = [{'expected_output': '561',
- 'expected_input': '5\n6\n1'}]
+ 'expected_input': '5\n6\n1',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
#include<stdio.h>
int main(void){
@@ -134,15 +148,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_string_input(self):
self.test_case_data = [{'expected_output': 'abc',
- 'expected_input': 'abc'}]
+ 'expected_input': 'abc',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
#include<stdio.h>
int main(void){
@@ -152,10 +169,11 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
@@ -167,8 +185,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
@@ -184,8 +203,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
@@ -199,15 +219,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
- 'expected_input': ''}]
+ 'expected_input': '',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
#include<stdio.h>
int main(void){
@@ -216,10 +239,11 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_correct_answer(self):
@@ -233,15 +257,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_array_input(self):
self.test_case_data = [{'expected_output': '561',
- 'expected_input': '5\n6\n1'}]
+ 'expected_input': '5\n6\n1',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
#include<iostream>
using namespace std;
@@ -254,15 +281,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_string_input(self):
self.test_case_data = [{'expected_output': 'abc',
- 'expected_input': 'abc'}]
+ 'expected_input': 'abc',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
#include<iostream>
using namespace std;
@@ -273,10 +303,11 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_cpp_incorrect_answer(self):
@@ -289,8 +320,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
@@ -307,8 +339,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
@@ -323,15 +356,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_cpp_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
- 'expected_input': ''}]
+ 'expected_input': '',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
#include<iostream>
using namespace std;
@@ -341,10 +377,11 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = CppStdioEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': False,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
if __name__ == '__main__':
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index e375bdb..f7ecd97 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -16,7 +16,9 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [
- {"test_case": "java_files/main_square.java"}
+ {"test_case": "java_files/main_square.java",
+ "weight": 0.0
+ }
]
self.in_dir = tmp_in_dir_path
evaluator.SERVER_TIMEOUT = 9
@@ -32,18 +34,20 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
def test_correct_answer(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}"
get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}"
get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -57,7 +61,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
def test_error(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a"
get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -68,7 +73,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
def test_infinite_loop(self):
user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}"
get_class = JavaCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -79,7 +85,9 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
def test_file_based_assert(self):
self.file_paths = [("/tmp/test.txt", False)]
self.test_case_data = [
- {"test_case": "java_files/read_file.java"}
+ {"test_case": "java_files/read_file.java",
+ "weight": 0.0
+ }
]
user_answer = dedent("""
import java.io.BufferedReader;
@@ -101,12 +109,13 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
""")
get_class = JavaCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data,
+ 'file_paths': self.file_paths
+ }
result = get_class.evaluate(**kwargs)
self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
+ self.assertEqual(result.get("error"), "Correct answer\n")
class JavaStdioEvaluationTestCases(unittest.TestCase):
@@ -116,7 +125,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
tmp_in_dir_path = tempfile.mkdtemp()
self.in_dir = tmp_in_dir_path
self.test_case_data = [{'expected_output': '11',
- 'expected_input': '5\n6'}]
+ 'expected_input': '5\n6',
+ 'weight': 0.0
+ }]
evaluator.SERVER_TIMEOUT = 4
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
@@ -139,16 +150,19 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
}}""")
get_class = JavaStdioEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_array_input(self):
self.test_case_data = [{'expected_output': '561',
- 'expected_input': '5\n6\n1'}]
+ 'expected_input': '5\n6\n1',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
import java.util.Scanner;
class Test
@@ -161,10 +175,11 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
}}""")
get_class = JavaStdioEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
@@ -180,8 +195,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
}}""")
get_class = JavaStdioEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
lines_of_error = len(result.get('error').splitlines())
self.assertFalse(result.get('success'))
@@ -197,8 +213,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
}""")
get_class = JavaStdioEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertTrue("Compilation Error" in result.get("error"))
@@ -214,15 +231,18 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
}}""")
get_class = JavaStdioEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get("success"))
self.assertEqual(result.get("error"), self.timeout_msg)
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
- 'expected_input': ''}]
+ 'expected_input': '',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
class Test
{public static void main(String[] args){
@@ -232,15 +252,18 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
}}""")
get_class = JavaStdioEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_string_input(self):
self.test_case_data = [{'expected_output': 'HelloWorld',
- 'expected_input': 'Hello\nWorld'}]
+ 'expected_input': 'Hello\nWorld',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
import java.util.Scanner;
class Test
@@ -252,16 +275,19 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
}}""")
get_class = JavaStdioEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_file_based_stdout(self):
self.file_paths = [("/tmp/test.txt", False)]
self.test_case_data = [{'expected_output': '2',
- 'expected_input': ''}]
+ 'expected_input': '',
+ 'weight': 0.0
+ }]
user_answer = dedent("""
import java.io.BufferedReader;
import java.io.FileReader;
@@ -282,12 +308,13 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
""")
get_class = JavaStdioEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
- }
+ 'partial_grading': True,
+ 'test_case_data': self.test_case_data,
+ 'file_paths': self.file_paths
+ }
result = get_class.evaluate(**kwargs)
self.assertTrue(result.get("success"))
- self.assertEqual(result.get("error"), "Correct answer")
+ self.assertEqual(result.get("error"), "Correct answer\n")
if __name__ == '__main__':
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 45cc40d..9796fa2 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -17,9 +17,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.in_dir = tmp_in_dir_path
- self.test_case_data = [{"test_case": 'assert(add(1,2)==3)'},
- {"test_case": 'assert(add(-1,2)==1)'},
- {"test_case": 'assert(add(-1,-2)==-3)'},
+ self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weight': 0.0},
+ {"test_case": 'assert(add(-1,2)==1)', 'weight': 0.0},
+ {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0},
]
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
@@ -35,7 +35,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
user_answer = "def add(a,b):\n\treturn a + b"
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -44,14 +45,15 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertTrue(result.get('success'))
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertIn("Correct answer", result.get('error'))
def test_incorrect_answer(self):
# Given
user_answer = "def add(a,b):\n\treturn a - b"
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -60,16 +62,50 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Then
self.assertFalse(result.get('success'))
- self.assertEqual(result.get('error'),
- "AssertionError in: assert(add(1,2)==3)"
- )
+ self.assertIn('AssertionError in: assert(add(1,2)==3)',
+ result.get('error')
+ )
+ self.assertIn('AssertionError in: assert(add(-1,2)==1)',
+ result.get('error')
+ )
+ self.assertIn('AssertionError in: assert(add(-1,-2)==-3)',
+ result.get('error')
+ )
+
+ def test_partial_incorrect_answer(self):
+ # Given
+ user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)"
+ test_case_data = [{"test_case": 'assert(add(-1,2)==1)', 'weight': 1.0},
+ {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 1.0},
+ {"test_case": 'assert(add(1,2)==3)', 'weight': 2.0}
+ ]
+ kwargs = {'user_answer': user_answer,
+ 'test_case_data': test_case_data,
+ 'file_paths': self.file_paths,
+ 'partial_grading': True
+ }
+
+ # When
+ evaluator = PythonAssertionEvaluator()
+ result = evaluator.evaluate(**kwargs)
+
+ # Then
+ self.assertFalse(result.get('success'))
+ self.assertEqual(result.get('weight'), 2.0)
+ self.assertIn('AssertionError in: assert(add(-1,2)==1)',
+ result.get('error')
+ )
+ self.assertIn('AssertionError in: assert(add(-1,-2)==-3)',
+ result.get('error')
+ )
def test_infinite_loop(self):
# Given
user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -96,7 +132,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -125,7 +162,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
+
}
# When
@@ -150,7 +189,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -176,7 +216,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -202,7 +243,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -231,7 +273,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -247,7 +290,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def test_file_based_assert(self):
# Given
- self.test_case_data = [{"test_case": "assert(ans()=='2')"}]
+ self.test_case_data = [{"test_case": "assert(ans()=='2')", "weight": 0.0}]
self.file_paths = [('/tmp/test.txt', False)]
user_answer = dedent("""
def ans():
@@ -256,7 +299,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
""")
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -264,7 +308,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
result = evaluator.evaluate(**kwargs)
# Then
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertIn("Correct answer", result.get('error'))
self.assertTrue(result.get('success'))
def test_single_testcase_error(self):
@@ -272,7 +316,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
""" Tests the user answer with just an incorrect test case """
user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
- test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False'}
+ test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False',
+ "weight": 0.0
+ }
]
syntax_error_msg = ["Traceback",
"call",
@@ -284,7 +330,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
kwargs = {'user_answer': user_answer,
'test_case_data': test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -304,8 +351,12 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
first and then with an incorrect test case """
# Given
user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
- test_case_data = [{"test_case": 'assert(palindrome("abba")==True)'},
- {"test_case": 's="abbb"\nassert palindrome(S)==False'}
+ test_case_data = [{"test_case": 'assert(palindrome("abba")==True)',
+ "weight": 0.0
+ },
+ {"test_case": 's="abbb"\nassert palindrome(S)==False',
+ "weight": 0.0
+ }
]
name_error_msg = ["Traceback",
"call",
@@ -317,7 +368,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
]
kwargs = {'user_answer': user_answer,
'test_case_data': test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -341,7 +393,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_correct_answer_integer(self):
# Given
self.test_case_data = [{"expected_input": "1\n2",
- "expected_output": "3"
+ "expected_output": "3",
+ "weight": 0.0
}]
user_answer = dedent("""
a = int(input())
@@ -350,7 +403,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
"""
)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
+ 'test_case_data': self.test_case_data,
+ 'partial_grading': False
}
# When
@@ -364,7 +418,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_correct_answer_list(self):
# Given
self.test_case_data = [{"expected_input": "1,2,3\n5,6,7",
- "expected_output": "[1, 2, 3, 5, 6, 7]"
+ "expected_output": "[1, 2, 3, 5, 6, 7]",
+ "weight": 0.0
}]
user_answer = dedent("""
from six.moves import input
@@ -376,7 +431,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
"""
)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
+ 'test_case_data': self.test_case_data,
+ 'partial_grading': False
}
# When
@@ -390,7 +446,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_correct_answer_string(self):
# Given
self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"),
- "expected_output": "2"
+ "expected_output": "2",
+ "weight": 0.0
}]
user_answer = dedent("""
from six.moves import input
@@ -400,7 +457,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
"""
)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
+ 'test_case_data': self.test_case_data,
+ 'partial_grading': False
}
# When
@@ -414,7 +472,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_incorrect_answer_integer(self):
# Given
self.test_case_data = [{"expected_input": "1\n2",
- "expected_output": "3"
+ "expected_output": "3",
+ "weight": 0.0
}]
user_answer = dedent("""
a = int(input())
@@ -424,6 +483,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
+ 'partial_grading': False
}
# When
@@ -436,7 +496,10 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
def test_file_based_answer(self):
# Given
- self.test_case_data = [{"expected_input": "", "expected_output": "2"}]
+ self.test_case_data = [{"expected_input": "",
+ "expected_output": "2",
+ "weight": 0.0
+ }]
self.file_paths = [('/tmp/test.txt', False)]
user_answer = dedent("""
@@ -447,7 +510,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data,
- 'file_paths': self.file_paths
+ 'file_paths': self.file_paths,
+ 'partial_grading': False
}
# When
@@ -455,20 +519,22 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
result = evaluator.evaluate(**kwargs)
# Then
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_infinite_loop(self):
# Given
test_case_data = [{"expected_input": "1\n2",
- "expected_output": "3"
- }]
+ "expected_output": "3",
+ "weight": 0.0
+ }]
timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
" your code.").format(SERVER_TIMEOUT)
user_answer = "while True:\n\tpass"
kwargs = {'user_answer': user_answer,
- 'test_case_data': test_case_data
+ 'test_case_data': test_case_data,
+ 'partial_grading': False
}
# When
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index b366480..c30f652 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -11,7 +11,9 @@ from yaksh.settings import SERVER_TIMEOUT
class ScilabEvaluationTestCases(unittest.TestCase):
def setUp(self):
tmp_in_dir_path = tempfile.mkdtemp()
- self.test_case_data = [{"test_case": "scilab_files/test_add.sce"}]
+ self.test_case_data = [{"test_case": "scilab_files/test_add.sce",
+ "weight": 0.0
+ }]
self.in_dir = tmp_in_dir_path
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop"
@@ -25,19 +27,21 @@ class ScilabEvaluationTestCases(unittest.TestCase):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a+b;\nendfunction")
get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
result = get_class.evaluate(**kwargs)
- self.assertEqual(result.get('error'), "Correct answer")
+ self.assertEqual(result.get('error'), "Correct answer\n")
self.assertTrue(result.get('success'))
def test_error(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a+b;\ndis(\tendfunction")
get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -50,7 +54,8 @@ class ScilabEvaluationTestCases(unittest.TestCase):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a-b;\nendfunction")
get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
@@ -64,7 +69,8 @@ class ScilabEvaluationTestCases(unittest.TestCase):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
"\n\tc=a;\nwhile(1==1)\nend\nendfunction")
get_class = ScilabCodeEvaluator(self.in_dir)
- kwargs = {'user_answer': user_answer,
+ kwargs = {'user_answer': user_answer,
+ 'partial_grading': True,
'test_case_data': self.test_case_data,
'file_paths': self.file_paths
}
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index 1ce1c0e..d87e6e3 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -16,6 +16,7 @@ class JavaCodeEvaluator(CodeEvaluator):
"""Tests the Java code obtained from Code Server"""
def setup(self):
super(JavaCodeEvaluator, self).setup()
+ self.files = []
self.submit_code_path = self.create_submit_code_file('Test.java')
self.compiled_user_answer = None
self.compiled_test_code = None
@@ -46,8 +47,7 @@ class JavaCodeEvaluator(CodeEvaluator):
output_path = "{0}{1}.class".format(directory, file_name)
return output_path
- def compile_code(self, user_answer, file_paths, test_case):
- self.files = []
+ def compile_code(self, user_answer, file_paths, test_case, weight):
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
@@ -96,7 +96,7 @@ class JavaCodeEvaluator(CodeEvaluator):
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, test_case):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -117,6 +117,8 @@ class JavaCodeEvaluator(CodeEvaluator):
"""
success = False
+ test_case_weight = 0.0
+
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
@@ -134,6 +136,7 @@ class JavaCodeEvaluator(CodeEvaluator):
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
+ test_case_weight = float(weight) if partial_grading else 0.0
else:
err = stdout + "\n" + stderr
else:
@@ -158,4 +161,4 @@ class JavaCodeEvaluator(CodeEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- return success, err
+ return success, err, test_case_weight
diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py
index bc9cf80..88d4c88 100644
--- a/yaksh/java_stdio_evaluator.py
+++ b/yaksh/java_stdio_evaluator.py
@@ -14,6 +14,7 @@ class JavaStdioEvaluator(StdIOEvaluator):
def setup(self):
super(JavaStdioEvaluator, self).setup()
+ self.files = []
self.submit_code_path = self.create_submit_code_file('Test.java')
def teardown(self):
@@ -30,8 +31,7 @@ class JavaStdioEvaluator(StdIOEvaluator):
compile_command = 'javac {0}'.format(self.submit_code_path)
return compile_command
- def compile_code(self, user_answer, file_paths, expected_input, expected_output):
- self.files = []
+ def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
@@ -50,8 +50,10 @@ class JavaStdioEvaluator(StdIOEvaluator):
)
return self.compiled_user_answer
- def check_code(self, user_answer, file_paths, expected_input, expected_output):
+ def check_code(self, user_answer, file_paths, partial_grading,
+ expected_input, expected_output, weight):
success = False
+ test_case_weight = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
if stdnt_stderr == '' or "error" not in stdnt_stderr:
@@ -77,4 +79,5 @@ class JavaStdioEvaluator(StdIOEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- return success, err
+ test_case_weight = float(weight) if partial_grading and success else 0.0
+ return success, err, test_case_weight
diff --git a/yaksh/models.py b/yaksh/models.py
index 7f9eead..8907df0 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -245,6 +245,9 @@ class Question(models.Model):
# user for particular question
user = models.ForeignKey(User, related_name="user")
+ # Does this question allow partial grading
+ partial_grading = models.BooleanField(default=False)
+
def consolidate_answer_data(self, user_answer):
question_data = {}
test_case_data = []
@@ -257,6 +260,7 @@ class Question(models.Model):
question_data['test_case_data'] = test_case_data
question_data['user_answer'] = user_answer
+ question_data['partial_grading'] = self.partial_grading
files = FileUpload.objects.filter(question=self)
if files:
question_data['file_paths'] = [(file.file.path, file.extract)
@@ -321,6 +325,13 @@ class Question(models.Model):
return test_case
+ def get_maximum_test_case_weight(self, **kwargs):
+ max_weight = 0.0
+ for test_case in self.get_test_cases():
+ max_weight += test_case.weight
+
+ return max_weight
+
def _add_and_get_files(self, zip_file):
files = FileUpload.objects.filter(question=self)
files_list = []
@@ -937,11 +948,13 @@ class AnswerPaper(models.Model):
def _update_marks_obtained(self):
"""Updates the total marks earned by student for this paper."""
- marks = sum([x.marks for x in self.answers.filter(marks__gt=0.0)])
- if not marks:
- self.marks_obtained = 0
- else:
- self.marks_obtained = marks
+ marks = 0
+ for question in self.questions.all():
+ marks_list = [a.marks for a in self.answers.filter(question=question)]
+ max_marks = max(marks_list) if marks_list else 0.0
+ marks += max_marks
+ self.marks_obtained = marks
+
def _update_percent(self):
"""Updates the percent gained by the student for this paper."""
@@ -1023,7 +1036,7 @@ class AnswerPaper(models.Model):
For code questions success is True only if the answer is correct.
"""
- result = {'success': True, 'error': 'Incorrect answer'}
+ result = {'success': True, 'error': 'Incorrect answer', 'weight': 0.0}
correct = False
if user_answer is not None:
if question.type == 'mcq':
@@ -1071,11 +1084,18 @@ class AnswerPaper(models.Model):
json_data = question.consolidate_answer_data(answer) \
if question.type == 'code' else None
correct, result = self.validate_answer(answer, question, json_data)
- user_answer.marks = question.points if correct else 0.0
user_answer.correct = correct
user_answer.error = result.get('error')
+ if correct:
+ user_answer.marks = (question.points * result['weight'] /
+ question.get_maximum_test_case_weight()) \
+ if question.partial_grading and question.type == 'code' else question.points
+ else:
+ user_answer.marks = (question.points * result['weight'] /
+ question.get_maximum_test_case_weight()) \
+ if question.partial_grading and question.type == 'code' else 0
user_answer.save()
- self.update_marks('complete')
+ self.update_marks('completed')
return True, msg
def __str__(self):
@@ -1098,9 +1118,11 @@ class TestCase(models.Model):
class StandardTestCase(TestCase):
test_case = models.TextField(blank=True)
+ weight = models.FloatField(default=0.0)
def get_field_value(self):
- return {"test_case": self.test_case}
+ return {"test_case": self.test_case,
+ "weight": self.weight}
def __str__(self):
return u'Question: {0} | Test Case: {1}'.format(self.question,
@@ -1111,10 +1133,12 @@ class StandardTestCase(TestCase):
class StdioBasedTestCase(TestCase):
expected_input = models.TextField(blank=True)
expected_output = models.TextField()
+ weight = models.IntegerField(default=0.0)
def get_field_value(self):
return {"expected_output": self.expected_output,
- "expected_input": self.expected_input}
+ "expected_input": self.expected_input,
+ "weight": self.weight}
def __str__(self):
return u'Question: {0} | Exp. Output: {1} | Exp. Input: {2}'.format(self.question,
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index dd1c041..275244a 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -17,6 +17,7 @@ class PythonAssertionEvaluator(CodeEvaluator):
def setup(self):
super(PythonAssertionEvaluator, self).setup()
self.exec_scope = None
+ self.files = []
def teardown(self):
# Delete the created file.
@@ -24,8 +25,7 @@ class PythonAssertionEvaluator(CodeEvaluator):
delete_files(self.files)
super(PythonAssertionEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, test_case):
- self.files = []
+ def compile_code(self, user_answer, file_paths, test_case, weight):
if file_paths:
self.files = copy_files(file_paths)
if self.exec_scope:
@@ -36,8 +36,29 @@ class PythonAssertionEvaluator(CodeEvaluator):
exec(submitted, self.exec_scope)
return self.exec_scope
- def check_code(self, user_answer, file_paths, test_case):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
+ """ Function validates user answer by running an assertion based test case
+ against it
+
+ Returns
+ --------
+ Returns a tuple (success, error, test_case_weight)
+
+ success - Boolean, indicating if code was executed successfully, correctly
+ weight - Float, indicating total weight of all successful test cases
+ error - String, error message if success is false
+
+ returns (True, "Correct answer", 1.0) : If the student script passes all
+ test cases/have same output, when compared to the instructor script
+
+ returns (False, error_msg, 0.0): If the student script fails a single
+ test/have dissimilar output, when compared to the instructor script.
+
+ Returns (False, error_msg, 0.0): If mandatory arguments are not files or if
+ the required permissions are not given to the file(s).
+ """
success = False
+ test_case_weight = 0.0
try:
tb = None
_tests = compile(test_case, '<string>', mode='exec')
@@ -47,11 +68,13 @@ class PythonAssertionEvaluator(CodeEvaluator):
info = traceback.extract_tb(tb)
fname, lineno, func, text = info[-1]
text = str(test_case).splitlines()[lineno-1]
- err = "{0} {1} in: {2}".format(type.__name__, str(value), text)
+ err = ("-----\nExpected Test Case:\n{0}\n"
+ "Error - {1} {2} in: {3}\n-----").format(test_case, type.__name__, str(value), text)
except Exception:
raise # Exception will be caught in CodeEvaluator.
else:
success = True
- err = 'Correct answer'
+ err = '-----\nCorrect answer\nTest Case: {0}\n-----'.format(test_case)
+ test_case_weight = float(weight) if partial_grading else 0.0
del tb
- return success, err
+ return success, err, test_case_weight
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index cbbbfd6..1506685 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -31,6 +31,10 @@ def redirect_stdout():
class PythonStdioEvaluator(CodeEvaluator):
"""Tests the Python code obtained from Code Server"""
+ def setup(self):
+ super(PythonStdioEvaluator, self).setup()
+ self.files = []
+
def teardown(self):
# Delete the created file.
if self.files:
@@ -38,8 +42,7 @@ class PythonStdioEvaluator(CodeEvaluator):
super(PythonStdioEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, expected_input, expected_output):
- self.files = []
+ def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
if file_paths:
self.files = copy_files(file_paths)
submitted = compile(user_answer, '<string>', mode='exec')
@@ -54,13 +57,16 @@ class PythonStdioEvaluator(CodeEvaluator):
self.output_value = output_buffer.getvalue().rstrip("\n")
return self.output_value
- def check_code(self, user_answer, file_paths, expected_input, expected_output):
+ def check_code(self, user_answer, file_paths, partial_grading, expected_input,
+ expected_output, weight):
success = False
+ test_case_weight = 0.0
tb = None
if self.output_value == expected_output:
success = True
err = "Correct answer"
+ test_case_weight = weight
else:
success = False
err = dedent("""
@@ -68,10 +74,10 @@ class PythonStdioEvaluator(CodeEvaluator):
Given input - {0}
Expected output - {1}
Your output - {2}
- """
- .format(expected_input,
- expected_output, self.output_value
- )
- )
+ """.format(expected_input,
+ expected_output,
+ self.output_value
+ )
+ )
del tb
- return success, err
+ return success, err, test_case_weight
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index 915491c..3c2d44c 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -16,6 +16,7 @@ class ScilabCodeEvaluator(CodeEvaluator):
"""Tests the Scilab code obtained from Code Server"""
def setup(self):
super(ScilabCodeEvaluator, self).setup()
+ self.files = []
self.submit_code_path = \
self.create_submit_code_file('function.sci')
@@ -26,8 +27,7 @@ class ScilabCodeEvaluator(CodeEvaluator):
delete_files(self.files)
super(ScilabCodeEvaluator, self).teardown()
- def check_code(self, user_answer, file_paths, test_case):
- self.files = []
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
if file_paths:
self.files = copy_files(file_paths)
ref_code_path = test_case
@@ -37,6 +37,8 @@ class ScilabCodeEvaluator(CodeEvaluator):
self._remove_scilab_exit(user_answer.lstrip())
success = False
+ test_case_weight = 0.0
+
self.write_to_submit_code_file(self.submit_code_path, user_answer)
# Throw message if there are commmands that terminates scilab
add_err = ""
@@ -63,11 +65,12 @@ class ScilabCodeEvaluator(CodeEvaluator):
stdout = self._strip_output(stdout)
if proc.returncode == 5:
success, err = True, "Correct answer"
+ test_case_weight = float(weight) if partial_grading else 0.0
else:
err = add_err + stdout
else:
err = add_err + stderr
- return success, err
+ return success, err, test_case_weight
def _remove_scilab_exit(self, string):
"""
diff --git a/yaksh/templates/yaksh/add_question.html b/yaksh/templates/yaksh/add_question.html
index c0d53f8..9822333 100644
--- a/yaksh/templates/yaksh/add_question.html
+++ b/yaksh/templates/yaksh/add_question.html
@@ -24,6 +24,7 @@
<tr><td>Description: <td>{{ form.description}} {{form.description.errors}}
<tr><td>Tags: <td>{{ form.tags }}
<tr><td>Snippet: <td>{{ form.snippet }}
+ <tr><td>Partial Grading: <td>{{ form.partial_grading }}
<tr><td> Test Case Type: <td> {{ form.test_case_type }}{{ form.test_case_type.errors }}
<tr><td> File: <td> {{ upload_form.file_field }}{{ upload_form.file_field.errors }}
{% if uploaded_files %}<br><b>Uploaded files:</b><br>Check the box to delete or extract files<br>
diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html
index ced3ca2..38f31ca 100644
--- a/yaksh/templates/yaksh/grade_user.html
+++ b/yaksh/templates/yaksh/grade_user.html
@@ -157,7 +157,7 @@ Status : <b style="color: green;"> Passed </b><br/>
<h5>Student answer: </h5>
{% for answer in answers %}
{% if not answer.skipped %}
- {% if "Correct answer" in answer.error %}
+ {% if answer.correct %}
<div class="panel panel-success">
{% else %}
<div class="panel panel-danger">
diff --git a/yaksh/templates/yaksh/question.html b/yaksh/templates/yaksh/question.html
index 11484fe..bfb235b 100644
--- a/yaksh/templates/yaksh/question.html
+++ b/yaksh/templates/yaksh/question.html
@@ -211,7 +211,7 @@ function call_skip(url)
{% if error_message %}
<p> Output Message</p>
<div class="alert alert-danger" role="alert">
- {{ error_message }}
+ <textarea style="width:100%" class="error" readonly="yes">{{ error_message }}</textarea>
</div>
{% endif %}
</div>
diff --git a/yaksh/templates/yaksh/user_data.html b/yaksh/templates/yaksh/user_data.html
index 0a7e4aa..378e7fd 100644
--- a/yaksh/templates/yaksh/user_data.html
+++ b/yaksh/templates/yaksh/user_data.html
@@ -94,7 +94,7 @@ User IP address: {{ paper.user_ip }}
<h5>Student answer: </h5>
{% for answer in answers %}
{% if not answer.skipped %}
- {% if "Correct answer" in answer.error %}
+ {% if answer.correct %}
<div class="panel panel-success">
{% else %}
<div class="panel panel-danger">
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index 019a339..e7f3016 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -141,7 +141,9 @@ class QuestionTestCases(unittest.TestCase):
)
self.upload_test_case.save()
self.user_answer = "demo_answer"
- self.test_case_upload_data = [{"test_case": "assert fact(3)==6"}]
+ self.test_case_upload_data = [{"test_case": "assert fact(3)==6",
+ "weight": 0.0
+ }]
questions_data = [{"snippet": "def fact()", "active": True,
"points": 1.0,
"description": "factorial of a no",
@@ -877,7 +879,9 @@ class TestCaseTestCases(unittest.TestCase):
self.stdout_based_testcase.save()
answer_data = {"user_answer": "demo_answer",
"test_case_data": [
- {"test_case": "assert myfunc(12, 13) == 15"}
+ {"test_case": "assert myfunc(12, 13) == 15",
+ "weight": 0.0
+ }
]
}
self.answer_data_json = json.dumps(answer_data)
diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py
index 8835110..7efd20b 100644
--- a/yaksh/tests/test_code_server.py
+++ b/yaksh/tests/test_code_server.py
@@ -35,10 +35,14 @@ class TestCodeServer(unittest.TestCase):
def setUp(self):
self.code_server = CodeServerProxy()
- def test_inifinite_loop(self):
+ def test_infinite_loop(self):
# Given
testdata = {'user_answer': 'while True: pass',
- 'test_case_data': [{'test_case':'assert 1==2'}]}
+ 'partial_grading': False,
+ 'test_case_data': [{'test_case':'assert 1==2',
+ 'weight': 0.0
+ }
+ ]}
# When
result = self.code_server.run_code(
@@ -53,7 +57,11 @@ class TestCodeServer(unittest.TestCase):
def test_correct_answer(self):
# Given
testdata = {'user_answer': 'def f(): return 1',
- 'test_case_data': [{'test_case':'assert f() == 1'}]}
+ 'partial_grading': False,
+ 'test_case_data': [{'test_case':'assert f() == 1',
+ 'weight': 0.0
+ }
+ ]}
# When
result = self.code_server.run_code(
@@ -63,12 +71,16 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertTrue(data['success'])
- self.assertEqual(data['error'], 'Correct answer')
+ self.assertIn('Correct answer', data['error'])
def test_wrong_answer(self):
# Given
testdata = {'user_answer': 'def f(): return 1',
- 'test_case_data': [{'test_case':'assert f() == 2'}]}
+ 'partial_grading': False,
+ 'test_case_data': [{'test_case':'assert f() == 2',
+ 'weight': 0.0
+ }
+ ]}
# When
result = self.code_server.run_code(
@@ -87,7 +99,11 @@ class TestCodeServer(unittest.TestCase):
def run_code():
"""Run an infinite loop."""
testdata = {'user_answer': 'while True: pass',
- 'test_case_data': [{'test_case':'assert 1==2'}]}
+ 'partial_grading': False,
+ 'test_case_data': [{'test_case':'assert 1==2',
+ 'weight': 0.0
+ }
+ ]}
result = self.code_server.run_code(
'python', 'standardtestcase', json.dumps(testdata), ''
)
diff --git a/yaksh/views.py b/yaksh/views.py
index 1afcef7..c3d743b 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -517,11 +517,16 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
if question.type == 'code' else None
correct, result = paper.validate_answer(user_answer, question, json_data)
if correct:
+ new_answer.marks = (question.points * result['weight'] /
+ question.get_maximum_test_case_weight()) \
+ if question.partial_grading and question.type == 'code' else question.points
new_answer.correct = correct
- new_answer.marks = question.points
new_answer.error = result.get('error')
else:
new_answer.error = result.get('error')
+ new_answer.marks = (question.points * result['weight'] /
+ question.get_maximum_test_case_weight()) \
+ if question.partial_grading and question.type == 'code' else 0
new_answer.save()
paper.update_marks('inprogress')
paper.set_end_time(timezone.now())
diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py
index 4da70dd..bb8260d 100644
--- a/yaksh/xmlrpc_clients.py
+++ b/yaksh/xmlrpc_clients.py
@@ -55,14 +55,21 @@ class CodeServerProxy(object):
Returns
-------
- A json string of a dict: {success: success, err: error message}.
+ A json string of a dict containing:
+ {"success": success, "weight": weight, "error": error message}
+
+ success - Boolean, indicating if code was executed successfully, correctly
+ weight - Float, indicating total weight of all successful test cases
+ error - String, error message if success is false
"""
try:
server = self._get_server()
result = server.check_code(language, test_case_type, json_data, user_dir)
except ConnectionError:
- result = json.dumps({'success': False, 'error': 'Unable to connect to any code servers!'})
+ result = json.dumps({'success': False,
+ 'weight': 0.0,
+ 'error': 'Unable to connect to any code servers!'})
return result
def _get_server(self):