diff options
-rw-r--r-- | yaksh/bash_code_evaluator.py | 20 | ||||
-rw-r--r-- | yaksh/bash_stdio_evaluator.py | 10 | ||||
-rw-r--r-- | yaksh/code_evaluator.py | 10 | ||||
-rw-r--r-- | yaksh/cpp_code_evaluator.py | 10 | ||||
-rw-r--r-- | yaksh/cpp_stdio_evaluator.py | 10 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_bash_evaluation.py | 12 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_c_cpp_evaluation.py | 18 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_java_evaluation.py | 14 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_python_evaluation.py | 26 | ||||
-rw-r--r-- | yaksh/evaluator_tests/test_scilab_evaluation.py | 2 | ||||
-rw-r--r-- | yaksh/java_code_evaluator.py | 10 | ||||
-rw-r--r-- | yaksh/java_stdio_evaluator.py | 10 | ||||
-rw-r--r-- | yaksh/models.py | 27 | ||||
-rw-r--r-- | yaksh/python_assertion_evaluator.py | 10 | ||||
-rw-r--r-- | yaksh/python_stdio_evaluator.py | 21 | ||||
-rw-r--r-- | yaksh/scilab_code_evaluator.py | 8 | ||||
-rw-r--r-- | yaksh/test_models.py | 4 | ||||
-rw-r--r-- | yaksh/tests/test_code_server.py | 18 | ||||
-rw-r--r-- | yaksh/views.py | 10 | ||||
-rw-r--r-- | yaksh/xmlrpc_clients.py | 2 |
20 files changed, 134 insertions, 118 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index 4ca5445..978586f 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -28,7 +28,7 @@ class BashCodeEvaluator(CodeEvaluator): delete_files(self.files) super(BashCodeEvaluator, self).teardown() - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): """ Function validates student script using instructor script as reference. Test cases can optionally be provided. The first argument ref_path, is the path to instructor script, it is assumed to @@ -74,7 +74,7 @@ class BashCodeEvaluator(CodeEvaluator): return False, msg success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 user_answer = user_answer.replace("\r", "") self.write_to_submit_code_file(self.submit_code_path, user_answer) @@ -93,20 +93,20 @@ class BashCodeEvaluator(CodeEvaluator): ) proc, stdnt_stdout, stdnt_stderr = ret if inst_stdout == stdnt_stdout: - test_case_marks = float(marks) if partial_grading else 0.0 - return True, "Correct answer", test_case_marks + test_case_weightage = float(weightage) if partial_grading else 0.0 + return True, "Correct answer", test_case_weightage else: err = "Error: expected %s, got %s" % (inst_stderr, stdnt_stderr ) - return False, err, test_case_marks + return False, err, test_case_weightage else: if not isfile(clean_test_case_path): msg = "No test case at %s" % clean_test_case_path - return False, msg, test_case_marks + return False, msg, test_case_weightage if not os.access(clean_ref_code_path, os.R_OK): msg = "Test script %s, not readable" % clean_test_case_path - return False, msg, test_case_marks + return False, msg, test_case_weightage # valid_answer is True, so that we can stop once a test case fails valid_answer = True # loop_count has to be greater than or equal to one. @@ -136,11 +136,11 @@ class BashCodeEvaluator(CodeEvaluator): proc, stdnt_stdout, stdnt_stderr = ret valid_answer = inst_stdout == stdnt_stdout if valid_answer and (num_lines == loop_count): - test_case_marks = float(marks) if partial_grading else 0.0 - return True, "Correct answer", test_case_marks + test_case_weightage = float(weightage) if partial_grading else 0.0 + return True, "Correct answer", test_case_weightage else: err = ("Error:expected" " {0}, got {1}").format(inst_stdout+inst_stderr, stdnt_stdout+stdnt_stderr ) - return False, err, test_case_marks + return False, err, test_case_weightage diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py index 2826a6b..fab19bf 100644 --- a/yaksh/bash_stdio_evaluator.py +++ b/yaksh/bash_stdio_evaluator.py @@ -23,7 +23,7 @@ class BashStdioEvaluator(StdIOEvaluator): delete_files(self.files) super(BashStdioEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): if file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -34,9 +34,9 @@ class BashStdioEvaluator(StdIOEvaluator): self.write_to_submit_code_file(self.submit_code_path, user_answer) def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, marks): + expected_input, expected_output, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 expected_input = str(expected_input).replace('\r', '') proc = subprocess.Popen("bash ./Test.sh", @@ -49,5 +49,5 @@ class BashStdioEvaluator(StdIOEvaluator): expected_input, expected_output ) - test_case_marks = float(marks) if partial_grading and success else 0.0 - return success, err, test_case_marks + test_case_weightage = float(weightage) if partial_grading and success else 0.0 + return success, err, test_case_weightage diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index 1672efa..fda0a8d 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -82,7 +82,7 @@ class CodeEvaluator(object): Returns ------- - A tuple: (success, error message, marks). + A tuple: (success, error message, weightage). """ self.setup() @@ -109,20 +109,20 @@ class CodeEvaluator(object): prev_handler = create_signal_handler() success = False error = "" - marks = 0.0 + weightage = 0 # Do whatever testing needed. try: for test_case in test_case_data: success = False self.compile_code(user_answer, file_paths, **test_case) - success, err, test_case_marks = self.check_code(user_answer, + success, err, test_case_weightage = self.check_code(user_answer, file_paths, partial_grading, **test_case ) if success: - marks += test_case_marks + weightage += test_case_weightage error = err else: error += err + "\n" @@ -142,7 +142,7 @@ class CodeEvaluator(object): # Set back any original signal handler. set_original_signal_handler(prev_handler) - return success, error, marks + return success, error, weightage def teardown(self): # Cancel the signal diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py index d6c72d6..f069b03 100644 --- a/yaksh/cpp_code_evaluator.py +++ b/yaksh/cpp_code_evaluator.py @@ -50,7 +50,7 @@ class CppCodeEvaluator(CodeEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, file_paths, test_case, marks): + def compile_code(self, user_answer, file_paths, test_case, weightage): if self.compiled_user_answer and self.compiled_test_code: return None else: @@ -89,7 +89,7 @@ class CppCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -109,7 +109,7 @@ class CppCodeEvaluator(CodeEvaluator): if the required permissions are not given to the file(s). """ success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -129,7 +129,7 @@ class CppCodeEvaluator(CodeEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, "Correct answer" - test_case_marks = float(marks) if partial_grading else 0.0 + test_case_weightage = float(weightage) if partial_grading else 0.0 else: err = "{0} \n {1}".format(stdout, stderr) else: @@ -155,4 +155,4 @@ class CppCodeEvaluator(CodeEvaluator): except: err = "{0} \n {1}".format(err, stdnt_stderr) - return success, err, test_case_marks + return success, err, test_case_weightage diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py index 9e2fd2e..050cec8 100644 --- a/yaksh/cpp_stdio_evaluator.py +++ b/yaksh/cpp_stdio_evaluator.py @@ -35,7 +35,7 @@ class CppStdioEvaluator(StdIOEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): if file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -62,9 +62,9 @@ class CppStdioEvaluator(StdIOEvaluator): return self.compiled_user_answer, self.compiled_test_code def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, marks): + expected_input, expected_output, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -106,5 +106,5 @@ class CppStdioEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - test_case_marks = float(marks) if partial_grading and success else 0.0 - return success, err, test_case_marks + test_case_weightage = float(weightage) if partial_grading and success else 0.0 + return success, err, test_case_weightage diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 5f4be4b..6c90d3c 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -15,7 +15,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) self.test_case_data = [ {"test_case": "bash_files/sample.sh,bash_files/sample.args", - "marks": 0.0 + "weightage": 0.0 } ] self.in_dir = tempfile.mkdtemp() @@ -72,7 +72,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [ {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", - "marks": 0.0 + "weightage": 0.0 } ] user_answer = ("#!/bin/bash\ncat $1") @@ -102,7 +102,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'marks': 0.0 + 'weightage': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -124,7 +124,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', 'expected_input': '1,2,3\n4,5,6\n7,8,9', - 'marks': 0.0 + 'weightage': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -144,7 +144,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'marks': 0.0 + 'weightage': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -164,7 +164,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '10', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index 79326d4..f58833a 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -15,7 +15,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [{"test_case": "c_cpp_files/main.cpp", - "marks": 0.0 + "weightage": 0.0 }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -80,7 +80,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [{"test_case": "c_cpp_files/file_data.c", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" #include<stdio.h> @@ -108,7 +108,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def setUp(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'marks': 0.0 + 'weightage': 0.0 }] self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -135,7 +135,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_array_input(self): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include<stdio.h> @@ -158,7 +158,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_string_input(self): self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include<stdio.h> @@ -229,7 +229,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include<stdio.h> @@ -267,7 +267,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_array_input(self): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include<iostream> @@ -291,7 +291,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_string_input(self): self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include<iostream> @@ -366,7 +366,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include<iostream> diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index 33e0e35..142f0bf 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -17,7 +17,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [ {"test_case": "java_files/main_square.java", - "marks": 0.0 + "weightage": 0.0 } ] self.in_dir = tmp_in_dir_path @@ -86,7 +86,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [ {"test_case": "java_files/read_file.java", - "marks": 0.0 + "weightage": 0.0 } ] user_answer = dedent(""" @@ -126,7 +126,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.in_dir = tmp_in_dir_path self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'marks': 0.0 + 'weightage': 0.0 }] evaluator.SERVER_TIMEOUT = 4 self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -161,7 +161,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" import java.util.Scanner; @@ -241,7 +241,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" class Test @@ -262,7 +262,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): def test_string_input(self): self.test_case_data = [{'expected_output': 'HelloWorld', 'expected_input': 'Hello\nWorld', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" import java.util.Scanner; @@ -286,7 +286,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [{'expected_output': '2', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" import java.io.BufferedReader; diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index fdc1c35..690f474 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -17,9 +17,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path - self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'marks': 0.0}, - {"test_case": 'assert(add(-1,2)==1)', 'marks': 0.0}, - {"test_case": 'assert(add(-1,-2)==-3)', 'marks': 0.0}, + self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weightage': 0.0}, + {"test_case": 'assert(add(-1,2)==1)', 'weightage': 0.0}, + {"test_case": 'assert(add(-1,-2)==-3)', 'weightage': 0.0}, ] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -260,7 +260,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): # Given - self.test_case_data = [{"test_case": "assert(ans()=='2')", "marks": 0.0}] + self.test_case_data = [{"test_case": "assert(ans()=='2')", "weightage": 0.0}] self.file_paths = [('/tmp/test.txt', False)] user_answer = dedent(""" def ans(): @@ -287,7 +287,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): user_answer = "def palindrome(a):\n\treturn a == a[::-1]" test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False', - "marks": 0.0 + "weightage": 0.0 } ] syntax_error_msg = ["Traceback", @@ -322,10 +322,10 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): # Given user_answer = "def palindrome(a):\n\treturn a == a[::-1]" test_case_data = [{"test_case": 'assert(palindrome("abba")==True)', - "marks": 0.0 + "weightage": 0.0 }, {"test_case": 's="abbb"\nassert palindrome(S)==False', - "marks": 0.0 + "weightage": 0.0 } ] name_error_msg = ["Traceback", @@ -364,7 +364,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -389,7 +389,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1,2,3\n5,6,7", "expected_output": "[1, 2, 3, 5, 6, 7]", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -417,7 +417,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"), "expected_output": "2", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -443,7 +443,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -468,7 +468,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "", "expected_output": "2", - "marks": 0.0 + "weightage": 0.0 }] self.file_paths = [('/tmp/test.txt', False)] @@ -496,7 +496,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "marks": 0.0 + "weightage": 0.0 }] timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index b46a65b..bc03e04 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -12,7 +12,7 @@ class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [{"test_case": "scilab_files/test_add.sce", - "marks": 0.0 + "weightage": 0.0 }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py index c945a38..05e6405 100644 --- a/yaksh/java_code_evaluator.py +++ b/yaksh/java_code_evaluator.py @@ -47,7 +47,7 @@ class JavaCodeEvaluator(CodeEvaluator): output_path = "{0}{1}.class".format(directory, file_name) return output_path - def compile_code(self, user_answer, file_paths, test_case, marks): + def compile_code(self, user_answer, file_paths, test_case, weightage): if self.compiled_user_answer and self.compiled_test_code: return None else: @@ -96,7 +96,7 @@ class JavaCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -117,7 +117,7 @@ class JavaCodeEvaluator(CodeEvaluator): """ success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -136,7 +136,7 @@ class JavaCodeEvaluator(CodeEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, "Correct answer" - test_case_marks = float(marks) if partial_grading else 0.0 + test_case_weightage = float(weightage) if partial_grading else 0.0 else: err = stdout + "\n" + stderr else: @@ -161,4 +161,4 @@ class JavaCodeEvaluator(CodeEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - return success, err, test_case_marks + return success, err, test_case_weightage diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py index b074e1c..bc50744 100644 --- a/yaksh/java_stdio_evaluator.py +++ b/yaksh/java_stdio_evaluator.py @@ -31,7 +31,7 @@ class JavaStdioEvaluator(StdIOEvaluator): compile_command = 'javac {0}'.format(self.submit_code_path) return compile_command - def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): if not isfile(self.submit_code_path): msg = "No file at %s or Incorrect path" % self.submit_code_path return False, msg @@ -51,9 +51,9 @@ class JavaStdioEvaluator(StdIOEvaluator): return self.compiled_user_answer def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, marks): + expected_input, expected_output, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) if stdnt_stderr == '' or "error" not in stdnt_stderr: @@ -79,5 +79,5 @@ class JavaStdioEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - test_case_marks = float(marks) if partial_grading and success else 0.0 - return success, err, test_case_marks + test_case_weightage = float(weightage) if partial_grading and success else 0.0 + return success, err, test_case_weightage diff --git a/yaksh/models.py b/yaksh/models.py index 33c3b42..05528c7 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -325,6 +325,13 @@ class Question(models.Model): return test_case + def get_maximum_test_case_weightage(self, **kwargs): + max_weightage = 0.0 + for test_case in self.get_test_cases(): + max_weightage += test_case.weightage + + return max_weightage + def _add_and_get_files(self, zip_file): files = FileUpload.objects.filter(question=self) files_list = [] @@ -1029,7 +1036,7 @@ class AnswerPaper(models.Model): For code questions success is True only if the answer is correct. """ - result = {'success': True, 'error': 'Incorrect answer', 'marks': 0.0} + result = {'success': True, 'error': 'Incorrect answer', 'weightage': 0.0} correct = False if user_answer is not None: if question.type == 'mcq': @@ -1080,11 +1087,17 @@ class AnswerPaper(models.Model): user_answer.correct = correct user_answer.error = result.get('error') if correct: - user_answer.marks = question.points * result['marks'] \ + user_answer.marks = (question.points * result['weightage'] / + question.get_maximum_test_case_weightage()) \ if question.partial_grading and question.type == 'code' else question.points + # user_answer.marks = question.points * result['weightage'] \ + # if question.partial_grading and question.type == 'code' else question.points else: - user_answer.marks = question.points * result['marks'] \ + user_answer.marks = (question.points * result['weightage'] / + question.get_maximum_test_case_weightage()) \ if question.partial_grading and question.type == 'code' else 0 + # user_answer.marks = question.points * result['weightage'] \ + # if question.partial_grading and question.type == 'code' else 0 user_answer.save() self.update_marks('completed') return True, msg @@ -1109,11 +1122,11 @@ class TestCase(models.Model): class StandardTestCase(TestCase): test_case = models.TextField(blank=True) - marks = models.FloatField(default=0.0) + weightage = models.FloatField(default=0.0) def get_field_value(self): return {"test_case": self.test_case, - "marks": self.marks} + "weightage": self.weightage} def __str__(self): return u'Question: {0} | Test Case: {1}'.format(self.question, @@ -1124,12 +1137,12 @@ class StandardTestCase(TestCase): class StdioBasedTestCase(TestCase): expected_input = models.TextField(blank=True) expected_output = models.TextField() - marks = models.FloatField(default=0.0) + weightage = models.IntegerField(default=0.0) def get_field_value(self): return {"expected_output": self.expected_output, "expected_input": self.expected_input, - "marks": self.marks} + "weightage": self.weightage} def __str__(self): return u'Question: {0} | Exp. Output: {1} | Exp. Input: {2}'.format(self.question, diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 350bc38..6503566 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -25,7 +25,7 @@ class PythonAssertionEvaluator(CodeEvaluator): delete_files(self.files) super(PythonAssertionEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, test_case, marks): + def compile_code(self, user_answer, file_paths, test_case, weightage): if file_paths: self.files = copy_files(file_paths) if self.exec_scope: @@ -36,9 +36,9 @@ class PythonAssertionEvaluator(CodeEvaluator): exec(submitted, self.exec_scope) return self.exec_scope - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 try: tb = None _tests = compile(test_case, '<string>', mode='exec') @@ -54,6 +54,6 @@ class PythonAssertionEvaluator(CodeEvaluator): else: success = True err = 'Correct answer' - test_case_marks = float(marks) if partial_grading else 0.0 + test_case_weightage = float(weightage) if partial_grading else 0.0 del tb - return success, err, test_case_marks + return success, err, test_case_weightage diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py index 7df0ba1..cd8c52a 100644 --- a/yaksh/python_stdio_evaluator.py +++ b/yaksh/python_stdio_evaluator.py @@ -42,7 +42,7 @@ class PythonStdioEvaluator(CodeEvaluator): super(PythonStdioEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): if file_paths: self.files = copy_files(file_paths) submitted = compile(user_answer, '<string>', mode='exec') @@ -57,15 +57,16 @@ class PythonStdioEvaluator(CodeEvaluator): self.output_value = output_buffer.getvalue().rstrip("\n") return self.output_value - def check_code(self, user_answer, file_paths, partial_grading, expected_input, expected_output, marks): + def check_code(self, user_answer, file_paths, partial_grading, expected_input, + expected_output, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 tb = None if self.output_value == expected_output: success = True err = "Correct answer" - test_case_marks = marks + test_case_weightage = weightage else: success = False err = dedent(""" @@ -73,10 +74,10 @@ class PythonStdioEvaluator(CodeEvaluator): Given input - {0} Expected output - {1} Your output - {2} - """ - .format(expected_input, - expected_output, self.output_value - ) - ) + """.format(expected_input, + expected_output, + self.output_value + ) + ) del tb - return success, err, test_case_marks + return success, err, test_case_weightage diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py index 1aca309..927b84d 100644 --- a/yaksh/scilab_code_evaluator.py +++ b/yaksh/scilab_code_evaluator.py @@ -27,7 +27,7 @@ class ScilabCodeEvaluator(CodeEvaluator): delete_files(self.files) super(ScilabCodeEvaluator, self).teardown() - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): if file_paths: self.files = copy_files(file_paths) ref_code_path = test_case @@ -37,7 +37,7 @@ class ScilabCodeEvaluator(CodeEvaluator): self._remove_scilab_exit(user_answer.lstrip()) success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 self.write_to_submit_code_file(self.submit_code_path, user_answer) # Throw message if there are commmands that terminates scilab @@ -65,12 +65,12 @@ class ScilabCodeEvaluator(CodeEvaluator): stdout = self._strip_output(stdout) if proc.returncode == 5: success, err = True, "Correct answer" - test_case_marks = float(marks) if partial_grading else 0.0 + test_case_weightage = float(weightage) if partial_grading else 0.0 else: err = add_err + stdout else: err = add_err + stderr - return success, err, test_case_marks + return success, err, test_case_weightage def _remove_scilab_exit(self, string): """ diff --git a/yaksh/test_models.py b/yaksh/test_models.py index 50d39c1..d05fac3 100644 --- a/yaksh/test_models.py +++ b/yaksh/test_models.py @@ -142,7 +142,7 @@ class QuestionTestCases(unittest.TestCase): self.upload_test_case.save() self.user_answer = "demo_answer" self.test_case_upload_data = [{"test_case": "assert fact(3)==6", - "marks": 0.0 + "weightage": 0.0 }] questions_data = [{"snippet": "def fact()", "active": True, "points": 1.0, @@ -880,7 +880,7 @@ class TestCaseTestCases(unittest.TestCase): answer_data = {"user_answer": "demo_answer", "test_case_data": [ {"test_case": "assert myfunc(12, 13) == 15", - "marks": 0.0 + "weightage": 0.0 } ] } diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py index d446444..19560e4 100644 --- a/yaksh/tests/test_code_server.py +++ b/yaksh/tests/test_code_server.py @@ -35,12 +35,12 @@ class TestCodeServer(unittest.TestCase): def setUp(self): self.code_server = CodeServerProxy() - def test_inifinite_loop(self): + def test_infinite_loop(self): # Given testdata = {'user_answer': 'while True: pass', - 'partial_grading': True, + 'partial_grading': False, 'test_case_data': [{'test_case':'assert 1==2', - 'marks': 0.0 + 'weightage': 0.0 } ]} @@ -57,9 +57,9 @@ class TestCodeServer(unittest.TestCase): def test_correct_answer(self): # Given testdata = {'user_answer': 'def f(): return 1', - 'partial_grading': True, + 'partial_grading': False, 'test_case_data': [{'test_case':'assert f() == 1', - 'marks': 0.0 + 'weightage': 0.0 } ]} @@ -76,9 +76,9 @@ class TestCodeServer(unittest.TestCase): def test_wrong_answer(self): # Given testdata = {'user_answer': 'def f(): return 1', - 'partial_grading': True, + 'partial_grading': False, 'test_case_data': [{'test_case':'assert f() == 2', - 'marks': 0.0 + 'weightage': 0.0 } ]} @@ -99,9 +99,9 @@ class TestCodeServer(unittest.TestCase): def run_code(): """Run an infinite loop.""" testdata = {'user_answer': 'while True: pass', - 'partial_grading': True, + 'partial_grading': False, 'test_case_data': [{'test_case':'assert 1==2', - 'marks': 0.0 + 'weightage': 0.0 } ]} result = self.code_server.run_code( diff --git a/yaksh/views.py b/yaksh/views.py index 2478544..aca89ef 100644 --- a/yaksh/views.py +++ b/yaksh/views.py @@ -517,14 +517,16 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None): if question.type == 'code' else None correct, result = paper.validate_answer(user_answer, question, json_data) if correct: - new_answer.marks = question.points * result['marks'] if question.partial_grading \ - and question.type == 'code' else question.points + new_answer.marks = (question.points * result['weightage'] / + question.get_maximum_test_case_weightage()) \ + if question.partial_grading and question.type == 'code' else question.points new_answer.correct = correct new_answer.error = result.get('error') else: new_answer.error = result.get('error') - new_answer.marks = question.points * result['marks'] if question.partial_grading \ - and question.type == 'code' else question.points + new_answer.marks = (question.points * result['weightage'] / + question.get_maximum_test_case_weightage()) \ + if question.partial_grading and question.type == 'code' else 0 new_answer.save() paper.update_marks('inprogress') paper.set_end_time(timezone.now()) diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py index ff0a2a7..53b8c38 100644 --- a/yaksh/xmlrpc_clients.py +++ b/yaksh/xmlrpc_clients.py @@ -63,7 +63,7 @@ class CodeServerProxy(object): result = server.check_code(language, test_case_type, json_data, user_dir) except ConnectionError: result = json.dumps({'success': False, - 'marks': 0.0, + 'weightage': 0.0, 'error': 'Unable to connect to any code servers!'}) return result |