From 053e27000540396b84c26d5a5f593d4389e0787a Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Wed, 2 Nov 2016 16:02:03 +0530 Subject: dd basic partial marking feature per test case --- yaksh/code_evaluator.py | 30 +++++++++++++++++----------- yaksh/models.py | 35 ++++++++++++++++++++++++--------- yaksh/python_assertion_evaluator.py | 10 ++++++---- yaksh/templates/yaksh/add_question.html | 1 + yaksh/views.py | 5 ++++- yaksh/xmlrpc_clients.py | 4 +++- 6 files changed, 59 insertions(+), 26 deletions(-) (limited to 'yaksh') diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index 79f616d..b404d57 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -82,14 +82,14 @@ class CodeEvaluator(object): Returns ------- - A tuple: (success, error message). + A tuple: (success, error message, marks). """ self.setup() - success, err = self.safe_evaluate(**kwargs) + success, error, marks = self.safe_evaluate(**kwargs) self.teardown() - result = {'success': success, 'error': err} + result = {'success': success, 'error': error, 'marks': marks} return result # Private Protocol ########## @@ -99,7 +99,7 @@ class CodeEvaluator(object): os.makedirs(self.in_dir) self._change_dir(self.in_dir) - def safe_evaluate(self, user_answer, test_case_data, file_paths=None): + def safe_evaluate(self, user_answer, partial_grading, test_case_data, file_paths=None): """ Handles code evaluation along with compilation, signal handling and Exception handling @@ -108,32 +108,40 @@ class CodeEvaluator(object): # Add a new signal handler for the execution of this code. prev_handler = create_signal_handler() success = False + error = "" + marks = 0.0 # Do whatever testing needed. try: for test_case in test_case_data: success = False self.compile_code(user_answer, file_paths, **test_case) - success, err = self.check_code(user_answer, file_paths, **test_case) - if not success: - break + success, err, test_case_marks = self.check_code(user_answer, + file_paths, + partial_grading, + **test_case + ) + if success: + marks += test_case_marks + else: + error += err + "\n" except TimeoutException: - err = self.timeout_msg + error = self.timeout_msg except OSError: msg = traceback.format_exc(limit=0) - err = "Error: {0}".format(msg) + error = "Error: {0}".format(msg) except Exception: exc_type, exc_value, exc_tb = sys.exc_info() tb_list = traceback.format_exception(exc_type, exc_value, exc_tb) if len(tb_list) > 2: del tb_list[1:3] - err = "Error: {0}".format("".join(tb_list)) + error = "Error: {0}".format("".join(tb_list)) finally: # Set back any original signal handler. set_original_signal_handler(prev_handler) - return success, err + return success, error, marks def teardown(self): # Cancel the signal diff --git a/yaksh/models.py b/yaksh/models.py index 7f9eead..e7a96c9 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -245,6 +245,9 @@ class Question(models.Model): # user for particular question user = models.ForeignKey(User, related_name="user") + # Does this question allow partial grading + partial_grading = models.BooleanField(default=False) + def consolidate_answer_data(self, user_answer): question_data = {} test_case_data = [] @@ -257,6 +260,7 @@ class Question(models.Model): question_data['test_case_data'] = test_case_data question_data['user_answer'] = user_answer + question_data['partial_grading'] = self.partial_grading files = FileUpload.objects.filter(question=self) if files: question_data['file_paths'] = [(file.file.path, file.extract) @@ -937,11 +941,17 @@ class AnswerPaper(models.Model): def _update_marks_obtained(self): """Updates the total marks earned by student for this paper.""" - marks = sum([x.marks for x in self.answers.filter(marks__gt=0.0)]) - if not marks: - self.marks_obtained = 0 - else: - self.marks_obtained = marks + # marks = sum([x.marks for x in self.answers.filter(marks__gt=0.0)]) + # if not marks: + # self.marks_obtained = 0 + # else: + # self.marks_obtained = marks + marks = 0 + for question in self.questions.all(): + max_marks = max([a.marks for a in self.answers.filter(question=question)]) + marks += max_marks + self.marks_obtained = marks + def _update_percent(self): """Updates the percent gained by the student for this paper.""" @@ -1023,7 +1033,7 @@ class AnswerPaper(models.Model): For code questions success is True only if the answer is correct. """ - result = {'success': True, 'error': 'Incorrect answer'} + result = {'success': True, 'error': 'Incorrect answer', 'marks': 0.0} correct = False if user_answer is not None: if question.type == 'mcq': @@ -1071,11 +1081,16 @@ class AnswerPaper(models.Model): json_data = question.consolidate_answer_data(answer) \ if question.type == 'code' else None correct, result = self.validate_answer(answer, question, json_data) - user_answer.marks = question.points if correct else 0.0 user_answer.correct = correct user_answer.error = result.get('error') + if correct: + user_answer.marks = question.points * result['marks'] \ + if question.partial_grading and question.type == 'code' else question.points + else: + user_answer.marks = question.points * result['marks'] \ + if question.partial_grading and question.type == 'code' else 0 user_answer.save() - self.update_marks('complete') + self.update_marks('completed') return True, msg def __str__(self): @@ -1098,9 +1113,11 @@ class TestCase(models.Model): class StandardTestCase(TestCase): test_case = models.TextField(blank=True) + marks = models.FloatField(default=0.0) def get_field_value(self): - return {"test_case": self.test_case} + return {"test_case": self.test_case, + "marks": self.marks} def __str__(self): return u'Question: {0} | Test Case: {1}'.format(self.question, diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index dd1c041..350bc38 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -17,6 +17,7 @@ class PythonAssertionEvaluator(CodeEvaluator): def setup(self): super(PythonAssertionEvaluator, self).setup() self.exec_scope = None + self.files = [] def teardown(self): # Delete the created file. @@ -24,8 +25,7 @@ class PythonAssertionEvaluator(CodeEvaluator): delete_files(self.files) super(PythonAssertionEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, test_case): - self.files = [] + def compile_code(self, user_answer, file_paths, test_case, marks): if file_paths: self.files = copy_files(file_paths) if self.exec_scope: @@ -36,8 +36,9 @@ class PythonAssertionEvaluator(CodeEvaluator): exec(submitted, self.exec_scope) return self.exec_scope - def check_code(self, user_answer, file_paths, test_case): + def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): success = False + test_case_marks = 0.0 try: tb = None _tests = compile(test_case, '', mode='exec') @@ -53,5 +54,6 @@ class PythonAssertionEvaluator(CodeEvaluator): else: success = True err = 'Correct answer' + test_case_marks = float(marks) if partial_grading else 0.0 del tb - return success, err + return success, err, test_case_marks diff --git a/yaksh/templates/yaksh/add_question.html b/yaksh/templates/yaksh/add_question.html index c0d53f8..9822333 100644 --- a/yaksh/templates/yaksh/add_question.html +++ b/yaksh/templates/yaksh/add_question.html @@ -24,6 +24,7 @@ Description: {{ form.description}} {{form.description.errors}} Tags: {{ form.tags }} Snippet: {{ form.snippet }} + Partial Grading: {{ form.partial_grading }} Test Case Type: {{ form.test_case_type }}{{ form.test_case_type.errors }} File: {{ upload_form.file_field }}{{ upload_form.file_field.errors }} {% if uploaded_files %}
Uploaded files:
Check the box to delete or extract files
diff --git a/yaksh/views.py b/yaksh/views.py index 1afcef7..2478544 100644 --- a/yaksh/views.py +++ b/yaksh/views.py @@ -517,11 +517,14 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None): if question.type == 'code' else None correct, result = paper.validate_answer(user_answer, question, json_data) if correct: + new_answer.marks = question.points * result['marks'] if question.partial_grading \ + and question.type == 'code' else question.points new_answer.correct = correct - new_answer.marks = question.points new_answer.error = result.get('error') else: new_answer.error = result.get('error') + new_answer.marks = question.points * result['marks'] if question.partial_grading \ + and question.type == 'code' else question.points new_answer.save() paper.update_marks('inprogress') paper.set_end_time(timezone.now()) diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py index 4da70dd..ff0a2a7 100644 --- a/yaksh/xmlrpc_clients.py +++ b/yaksh/xmlrpc_clients.py @@ -62,7 +62,9 @@ class CodeServerProxy(object): server = self._get_server() result = server.check_code(language, test_case_type, json_data, user_dir) except ConnectionError: - result = json.dumps({'success': False, 'error': 'Unable to connect to any code servers!'}) + result = json.dumps({'success': False, + 'marks': 0.0, + 'error': 'Unable to connect to any code servers!'}) return result def _get_server(self): -- cgit From 0bfa58d8705fa08b45a208a4cec98dd267799f8a Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Thu, 3 Nov 2016 11:47:50 +0530 Subject: Add partial grading to multiple evaluators --- yaksh/bash_code_evaluator.py | 20 ++++++++++++-------- yaksh/bash_stdio_evaluator.py | 10 +++++++--- yaksh/cpp_code_evaluator.py | 11 +++++++---- yaksh/cpp_stdio_evaluator.py | 13 ++++++++----- yaksh/java_code_evaluator.py | 11 +++++++---- yaksh/java_stdio_evaluator.py | 11 +++++++---- yaksh/scilab_code_evaluator.py | 9 ++++++--- 7 files changed, 54 insertions(+), 31 deletions(-) (limited to 'yaksh') diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index e4b961c..7dc0f0f 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -17,6 +17,7 @@ class BashCodeEvaluator(CodeEvaluator): # Private Protocol ########## def setup(self): super(BashCodeEvaluator, self).setup() + self.files = [] self.submit_code_path = self.create_submit_code_file('submit.sh') self._set_file_as_executable(self.submit_code_path) @@ -27,7 +28,7 @@ class BashCodeEvaluator(CodeEvaluator): delete_files(self.files) super(BashCodeEvaluator, self).teardown() - def check_code(self, user_answer, file_paths, test_case): + def check_code(self, file_paths, partial_grading, test_case, marks): """ Function validates student script using instructor script as reference. Test cases can optionally be provided. The first argument ref_path, is the path to instructor script, it is assumed to @@ -57,7 +58,6 @@ class BashCodeEvaluator(CodeEvaluator): clean_ref_code_path, clean_test_case_path = \ self._set_test_code_file_path(get_ref_path, get_test_case_path) - self.files = [] if file_paths: self.files = copy_files(file_paths) if not isfile(clean_ref_code_path): @@ -74,6 +74,8 @@ class BashCodeEvaluator(CodeEvaluator): return False, msg success = False + test_case_marks = 0.0 + user_answer = user_answer.replace("\r", "") self.write_to_submit_code_file(self.submit_code_path, user_answer) @@ -91,19 +93,20 @@ class BashCodeEvaluator(CodeEvaluator): ) proc, stdnt_stdout, stdnt_stderr = ret if inst_stdout == stdnt_stdout: - return True, "Correct answer" + test_case_marks = float(marks) if partial_grading else 0.0 + return True, "Correct answer", test_case_marks else: err = "Error: expected %s, got %s" % (inst_stderr, stdnt_stderr ) - return False, err + return False, err, test_case_marks else: if not isfile(clean_test_case_path): msg = "No test case at %s" % clean_test_case_path - return False, msg + return False, msg, test_case_marks if not os.access(clean_ref_code_path, os.R_OK): msg = "Test script %s, not readable" % clean_test_case_path - return False, msg + return False, msg, test_case_marks # valid_answer is True, so that we can stop once a test case fails valid_answer = True # loop_count has to be greater than or equal to one. @@ -133,10 +136,11 @@ class BashCodeEvaluator(CodeEvaluator): proc, stdnt_stdout, stdnt_stderr = ret valid_answer = inst_stdout == stdnt_stdout if valid_answer and (num_lines == loop_count): - return True, "Correct answer" + test_case_marks = float(marks) if partial_grading else 0.0 + return True, "Correct answer", test_case_marks else: err = ("Error:expected" " {0}, got {1}").format(inst_stdout+inst_stderr, stdnt_stdout+stdnt_stderr ) - return False, err + return False, err, test_case_marks diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py index a7ea1a4..0bab0f0 100644 --- a/yaksh/bash_stdio_evaluator.py +++ b/yaksh/bash_stdio_evaluator.py @@ -14,6 +14,7 @@ class BashStdioEvaluator(StdIOEvaluator): def setup(self): super(BashStdioEvaluator, self).setup() + self.files = [] self.submit_code_path = self.create_submit_code_file('Test.sh') def teardown(self): @@ -22,8 +23,7 @@ class BashStdioEvaluator(StdIOEvaluator): delete_files(self.files) super(BashStdioEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, expected_input, expected_output): - self.files = [] + def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): if file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -33,8 +33,11 @@ class BashStdioEvaluator(StdIOEvaluator): user_answer = user_answer.replace("\r", "") self.write_to_submit_code_file(self.submit_code_path, user_answer) - def check_code(self, user_answer, file_paths, expected_input, expected_output): + def check_code(self, user_answer, file_paths, partial_grading, + expected_input, expected_output, marks): success = False + test_case_marks = 0.0 + expected_input = str(expected_input).replace('\r', '') proc = subprocess.Popen("bash ./Test.sh", shell=True, @@ -46,4 +49,5 @@ class BashStdioEvaluator(StdIOEvaluator): expected_input, expected_output ) + test_case_marks = float(marks) if partial_grading and success else 0.0 return success, err diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py index 5380dea..d6c72d6 100644 --- a/yaksh/cpp_code_evaluator.py +++ b/yaksh/cpp_code_evaluator.py @@ -16,6 +16,7 @@ class CppCodeEvaluator(CodeEvaluator): """Tests the C code obtained from Code Server""" def setup(self): super(CppCodeEvaluator, self).setup() + self.files = [] self.submit_code_path = self.create_submit_code_file('submit.c') self.compiled_user_answer = None self.compiled_test_code = None @@ -49,8 +50,7 @@ class CppCodeEvaluator(CodeEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, file_paths, test_case): - self.files = [] + def compile_code(self, user_answer, file_paths, test_case, marks): if self.compiled_user_answer and self.compiled_test_code: return None else: @@ -89,7 +89,7 @@ class CppCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, test_case): + def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -109,6 +109,8 @@ class CppCodeEvaluator(CodeEvaluator): if the required permissions are not given to the file(s). """ success = False + test_case_marks = 0.0 + proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -127,6 +129,7 @@ class CppCodeEvaluator(CodeEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, "Correct answer" + test_case_marks = float(marks) if partial_grading else 0.0 else: err = "{0} \n {1}".format(stdout, stderr) else: @@ -152,4 +155,4 @@ class CppCodeEvaluator(CodeEvaluator): except: err = "{0} \n {1}".format(err, stdnt_stderr) - return success, err + return success, err, test_case_marks diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py index 9d2b969..9e2fd2e 100644 --- a/yaksh/cpp_stdio_evaluator.py +++ b/yaksh/cpp_stdio_evaluator.py @@ -14,6 +14,7 @@ class CppStdioEvaluator(StdIOEvaluator): def setup(self): super(CppStdioEvaluator, self).setup() + self.files = [] self.submit_code_path = self.create_submit_code_file('main.c') def teardown(self): @@ -34,9 +35,7 @@ class CppStdioEvaluator(StdIOEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, file_paths, expected_input, expected_output): - - self.files = [] + def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): if file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -62,8 +61,11 @@ class CppStdioEvaluator(StdIOEvaluator): ) return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, expected_input, expected_output): + def check_code(self, user_answer, file_paths, partial_grading, + expected_input, expected_output, marks): success = False + test_case_marks = 0.0 + proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) if stdnt_stderr == '': @@ -104,4 +106,5 @@ class CppStdioEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - return success, err + test_case_marks = float(marks) if partial_grading and success else 0.0 + return success, err, test_case_marks diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py index 1ce1c0e..c945a38 100644 --- a/yaksh/java_code_evaluator.py +++ b/yaksh/java_code_evaluator.py @@ -16,6 +16,7 @@ class JavaCodeEvaluator(CodeEvaluator): """Tests the Java code obtained from Code Server""" def setup(self): super(JavaCodeEvaluator, self).setup() + self.files = [] self.submit_code_path = self.create_submit_code_file('Test.java') self.compiled_user_answer = None self.compiled_test_code = None @@ -46,8 +47,7 @@ class JavaCodeEvaluator(CodeEvaluator): output_path = "{0}{1}.class".format(directory, file_name) return output_path - def compile_code(self, user_answer, file_paths, test_case): - self.files = [] + def compile_code(self, user_answer, file_paths, test_case, marks): if self.compiled_user_answer and self.compiled_test_code: return None else: @@ -96,7 +96,7 @@ class JavaCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, test_case): + def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -117,6 +117,8 @@ class JavaCodeEvaluator(CodeEvaluator): """ success = False + test_case_marks = 0.0 + proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -134,6 +136,7 @@ class JavaCodeEvaluator(CodeEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, "Correct answer" + test_case_marks = float(marks) if partial_grading else 0.0 else: err = stdout + "\n" + stderr else: @@ -158,4 +161,4 @@ class JavaCodeEvaluator(CodeEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - return success, err + return success, err, test_case_marks diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py index bc9cf80..b074e1c 100644 --- a/yaksh/java_stdio_evaluator.py +++ b/yaksh/java_stdio_evaluator.py @@ -14,6 +14,7 @@ class JavaStdioEvaluator(StdIOEvaluator): def setup(self): super(JavaStdioEvaluator, self).setup() + self.files = [] self.submit_code_path = self.create_submit_code_file('Test.java') def teardown(self): @@ -30,8 +31,7 @@ class JavaStdioEvaluator(StdIOEvaluator): compile_command = 'javac {0}'.format(self.submit_code_path) return compile_command - def compile_code(self, user_answer, file_paths, expected_input, expected_output): - self.files = [] + def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): if not isfile(self.submit_code_path): msg = "No file at %s or Incorrect path" % self.submit_code_path return False, msg @@ -50,8 +50,10 @@ class JavaStdioEvaluator(StdIOEvaluator): ) return self.compiled_user_answer - def check_code(self, user_answer, file_paths, expected_input, expected_output): + def check_code(self, user_answer, file_paths, partial_grading, + expected_input, expected_output, marks): success = False + test_case_marks = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) if stdnt_stderr == '' or "error" not in stdnt_stderr: @@ -77,4 +79,5 @@ class JavaStdioEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - return success, err + test_case_marks = float(marks) if partial_grading and success else 0.0 + return success, err, test_case_marks diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py index 915491c..1aca309 100644 --- a/yaksh/scilab_code_evaluator.py +++ b/yaksh/scilab_code_evaluator.py @@ -16,6 +16,7 @@ class ScilabCodeEvaluator(CodeEvaluator): """Tests the Scilab code obtained from Code Server""" def setup(self): super(ScilabCodeEvaluator, self).setup() + self.files = [] self.submit_code_path = \ self.create_submit_code_file('function.sci') @@ -26,8 +27,7 @@ class ScilabCodeEvaluator(CodeEvaluator): delete_files(self.files) super(ScilabCodeEvaluator, self).teardown() - def check_code(self, user_answer, file_paths, test_case): - self.files = [] + def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): if file_paths: self.files = copy_files(file_paths) ref_code_path = test_case @@ -37,6 +37,8 @@ class ScilabCodeEvaluator(CodeEvaluator): self._remove_scilab_exit(user_answer.lstrip()) success = False + test_case_marks = 0.0 + self.write_to_submit_code_file(self.submit_code_path, user_answer) # Throw message if there are commmands that terminates scilab add_err = "" @@ -63,11 +65,12 @@ class ScilabCodeEvaluator(CodeEvaluator): stdout = self._strip_output(stdout) if proc.returncode == 5: success, err = True, "Correct answer" + test_case_marks = float(marks) if partial_grading else 0.0 else: err = add_err + stdout else: err = add_err + stderr - return success, err + return success, err, test_case_marks def _remove_scilab_exit(self, string): """ -- cgit From 576c92b3b7d8989bf6bb99316d096f48f5b4cc24 Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Fri, 4 Nov 2016 18:07:35 +0530 Subject: Fix test cases for partial grading feature --- yaksh/bash_code_evaluator.py | 2 +- yaksh/bash_stdio_evaluator.py | 2 +- yaksh/code_evaluator.py | 1 + yaksh/evaluator_tests/test_bash_evaluation.py | 59 +++++++---- yaksh/evaluator_tests/test_c_cpp_evaluation.py | 125 +++++++++++++++--------- yaksh/evaluator_tests/test_java_evaluation.py | 89 +++++++++++------ yaksh/evaluator_tests/test_python_evaluation.py | 103 ++++++++++++------- yaksh/evaluator_tests/test_scilab_evaluation.py | 16 ++- yaksh/python_stdio_evaluator.py | 13 ++- yaksh/tests/test_code_server.py | 24 ++++- 10 files changed, 291 insertions(+), 143 deletions(-) (limited to 'yaksh') diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index 7dc0f0f..4ca5445 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -28,7 +28,7 @@ class BashCodeEvaluator(CodeEvaluator): delete_files(self.files) super(BashCodeEvaluator, self).teardown() - def check_code(self, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): """ Function validates student script using instructor script as reference. Test cases can optionally be provided. The first argument ref_path, is the path to instructor script, it is assumed to diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py index 0bab0f0..2826a6b 100644 --- a/yaksh/bash_stdio_evaluator.py +++ b/yaksh/bash_stdio_evaluator.py @@ -50,4 +50,4 @@ class BashStdioEvaluator(StdIOEvaluator): expected_output ) test_case_marks = float(marks) if partial_grading and success else 0.0 - return success, err + return success, err, test_case_marks diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index b404d57..1672efa 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -123,6 +123,7 @@ class CodeEvaluator(object): ) if success: marks += test_case_marks + error = err else: error += err + "\n" diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 66ade19..5f4be4b 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -13,12 +13,12 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): def setUp(self): with open('/tmp/test.txt', 'wb') as f: f.write('2'.encode('ascii')) - tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [ - {"test_case": "bash_files/sample.sh,bash_files/sample.args"} + {"test_case": "bash_files/sample.sh,bash_files/sample.args", + "marks": 0.0 + } ] - tmp_in_dir_path = tempfile.mkdtemp() - self.in_dir = tmp_in_dir_path + self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" " code.").format(SERVER_TIMEOUT) @@ -33,7 +33,8 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" ) get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -45,7 +46,8 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] " "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -57,7 +59,8 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" " do echo "" > /dev/null ; done") get_class = BashCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -68,11 +71,14 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [ - {"test_case": "bash_files/sample1.sh,bash_files/sample1.args"} + {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", + "marks": 0.0 + } ] user_answer = ("#!/bin/bash\ncat $1") get_class = BashCodeEvaluator() kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -82,6 +88,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): class BashStdioEvaluationTestCases(unittest.TestCase): def setUp(self): + self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" " code.").format(SERVER_TIMEOUT) @@ -93,11 +100,15 @@ class BashStdioEvaluationTestCases(unittest.TestCase): echo -n `expr $A + $B` """ ) - test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}] + test_case_data = [{'expected_output': '11', + 'expected_input': '5\n6', + 'marks': 0.0 + }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, - "test_case_data": test_case_data - } + "partial_grading": True, + "test_case_data": test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) @@ -112,12 +123,14 @@ class BashStdioEvaluationTestCases(unittest.TestCase): """ ) test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', - 'expected_input': '1,2,3\n4,5,6\n7,8,9' + 'expected_input': '1,2,3\n4,5,6\n7,8,9', + 'marks': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, - "test_case_data": test_case_data - } + "partial_grading": True, + "test_case_data": test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) @@ -129,11 +142,15 @@ class BashStdioEvaluationTestCases(unittest.TestCase): echo -n `expr $A - $B` """ ) - test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}] + test_case_data = [{'expected_output': '11', + 'expected_input': '5\n6', + 'marks': 0.0 + }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, - "test_case_data": test_case_data - } + "partial_grading": True, + "test_case_data": test_case_data + } result = get_class.evaluate(**kwargs) self.assertIn("Incorrect", result.get('error')) self.assertFalse(result.get('success')) @@ -146,12 +163,14 @@ class BashStdioEvaluationTestCases(unittest.TestCase): """ ) test_case_data = [{'expected_output': '10', - 'expected_input': '' + 'expected_input': '', + 'marks': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, - "test_case_data": test_case_data - } + "partial_grading": True, + "test_case_data": test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index c990436..79326d4 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -14,7 +14,9 @@ class CAssertionEvaluationTestCases(unittest.TestCase): with open('/tmp/test.txt', 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() - self.test_case_data = [{"test_case": "c_cpp_files/main.cpp"}] + self.test_case_data = [{"test_case": "c_cpp_files/main.cpp", + "marks": 0.0 + }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" @@ -29,6 +31,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): user_answer = "int add(int a, int b)\n{return a+b;}" get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, + 'partial_grading': False, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -40,6 +43,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): user_answer = "int add(int a, int b)\n{return a-b;}" get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, + 'partial_grading': False, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -52,7 +56,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_compilation_error(self): user_answer = "int add(int a, int b)\n{return a+b}" get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': False, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -63,7 +68,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_infinite_loop(self): user_answer = "int add(int a, int b)\n{while(1>0){}}" get_class = CppCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': False, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -73,7 +79,9 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [('/tmp/test.txt', False)] - self.test_case_data = [{"test_case": "c_cpp_files/file_data.c"}] + self.test_case_data = [{"test_case": "c_cpp_files/file_data.c", + "marks": 0.0 + }] user_answer = dedent(""" #include char ans() @@ -88,18 +96,21 @@ class CAssertionEvaluationTestCases(unittest.TestCase): """) get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } + 'partial_grading': False, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } result = get_class.evaluate(**kwargs) self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer") class CppStdioEvaluationTestCases(unittest.TestCase): - def setUp(self): - self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}] - self.in_dir = os.getcwd() + self.test_case_data = [{'expected_output': '11', + 'expected_input': '5\n6', + 'marks': 0.0 + }] + self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" " your code.").format(SERVER_TIMEOUT) @@ -114,15 +125,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) def test_array_input(self): self.test_case_data = [{'expected_output': '561', - 'expected_input': '5\n6\n1'}] + 'expected_input': '5\n6\n1', + 'marks': 0.0 + }] user_answer = dedent(""" #include int main(void){ @@ -134,15 +148,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) def test_string_input(self): self.test_case_data = [{'expected_output': 'abc', - 'expected_input': 'abc'}] + 'expected_input': 'abc', + 'marks': 0.0 + }] user_answer = dedent(""" #include int main(void){ @@ -152,8 +169,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) @@ -167,8 +185,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) @@ -184,8 +203,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -199,15 +219,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', - 'expected_input': ''}] + 'expected_input': '', + 'marks': 0.0 + }] user_answer = dedent(""" #include int main(void){ @@ -216,8 +239,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) @@ -233,15 +257,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) def test_cpp_array_input(self): self.test_case_data = [{'expected_output': '561', - 'expected_input': '5\n6\n1'}] + 'expected_input': '5\n6\n1', + 'marks': 0.0 + }] user_answer = dedent(""" #include using namespace std; @@ -254,15 +281,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) def test_cpp_string_input(self): self.test_case_data = [{'expected_output': 'abc', - 'expected_input': 'abc'}] + 'expected_input': 'abc', + 'marks': 0.0 + }] user_answer = dedent(""" #include using namespace std; @@ -273,8 +303,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) @@ -289,8 +320,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) @@ -307,8 +339,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -323,15 +356,18 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_cpp_only_stdout(self): self.test_case_data = [{'expected_output': '11', - 'expected_input': ''}] + 'expected_input': '', + 'marks': 0.0 + }] user_answer = dedent(""" #include using namespace std; @@ -341,8 +377,9 @@ class CppStdioEvaluationTestCases(unittest.TestCase): }""") get_class = CppStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': False, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index e375bdb..33e0e35 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -16,7 +16,9 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [ - {"test_case": "java_files/main_square.java"} + {"test_case": "java_files/main_square.java", + "marks": 0.0 + } ] self.in_dir = tmp_in_dir_path evaluator.SERVER_TIMEOUT = 9 @@ -32,7 +34,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_correct_answer(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -43,7 +46,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_incorrect_answer(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -57,7 +61,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_error(self): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -68,7 +73,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_infinite_loop(self): user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -79,7 +85,9 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [ - {"test_case": "java_files/read_file.java"} + {"test_case": "java_files/read_file.java", + "marks": 0.0 + } ] user_answer = dedent(""" import java.io.BufferedReader; @@ -101,9 +109,10 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): """) get_class = JavaCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } + 'partial_grading': True, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } result = get_class.evaluate(**kwargs) self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer") @@ -116,7 +125,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path self.test_case_data = [{'expected_output': '11', - 'expected_input': '5\n6'}] + 'expected_input': '5\n6', + 'marks': 0.0 + }] evaluator.SERVER_TIMEOUT = 4 self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -139,8 +150,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) @@ -148,7 +160,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): def test_array_input(self): self.test_case_data = [{'expected_output': '561', - 'expected_input': '5\n6\n1'}] + 'expected_input': '5\n6\n1', + 'marks': 0.0 + }] user_answer = dedent(""" import java.util.Scanner; class Test @@ -161,8 +175,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) @@ -180,8 +195,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) lines_of_error = len(result.get('error').splitlines()) self.assertFalse(result.get('success')) @@ -197,8 +213,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in result.get("error")) @@ -214,15 +231,18 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEqual(result.get("error"), self.timeout_msg) def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', - 'expected_input': ''}] + 'expected_input': '', + 'marks': 0.0 + }] user_answer = dedent(""" class Test {public static void main(String[] args){ @@ -232,15 +252,18 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) def test_string_input(self): self.test_case_data = [{'expected_output': 'HelloWorld', - 'expected_input': 'Hello\nWorld'}] + 'expected_input': 'Hello\nWorld', + 'marks': 0.0 + }] user_answer = dedent(""" import java.util.Scanner; class Test @@ -252,8 +275,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): }}""") get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'partial_grading': True, + 'test_case_data': self.test_case_data + } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct answer") self.assertTrue(result.get('success')) @@ -261,7 +285,9 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): def test_file_based_stdout(self): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [{'expected_output': '2', - 'expected_input': ''}] + 'expected_input': '', + 'marks': 0.0 + }] user_answer = dedent(""" import java.io.BufferedReader; import java.io.FileReader; @@ -282,9 +308,10 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): """) get_class = JavaStdioEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths - } + 'partial_grading': True, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } result = get_class.evaluate(**kwargs) self.assertTrue(result.get("success")) self.assertEqual(result.get("error"), "Correct answer") diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 45cc40d..da64dc4 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -17,9 +17,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path - self.test_case_data = [{"test_case": 'assert(add(1,2)==3)'}, - {"test_case": 'assert(add(-1,2)==1)'}, - {"test_case": 'assert(add(-1,-2)==-3)'}, + self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'marks': 0.0}, + {"test_case": 'assert(add(-1,2)==1)', 'marks': 0.0}, + {"test_case": 'assert(add(-1,-2)==-3)', 'marks': 0.0}, ] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -35,7 +35,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): user_answer = "def add(a,b):\n\treturn a + b" kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -51,7 +52,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): user_answer = "def add(a,b):\n\treturn a - b" kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -59,17 +61,22 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): result = evaluator.evaluate(**kwargs) # Then + print repr(result.get('error')) self.assertFalse(result.get('success')) self.assertEqual(result.get('error'), - "AssertionError in: assert(add(1,2)==3)" - ) + ('AssertionError in: assert(add(1,2)==3)\n' + 'AssertionError in: assert(add(-1,2)==1)\n' + 'AssertionError in: assert(add(-1,-2)==-3)\n' + ) + ) def test_infinite_loop(self): # Given user_answer = "def add(a, b):\n\twhile True:\n\t\tpass" kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -96,7 +103,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -125,7 +133,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False + } # When @@ -150,7 +160,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -176,7 +187,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -202,7 +214,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -231,7 +244,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -247,7 +261,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): # Given - self.test_case_data = [{"test_case": "assert(ans()=='2')"}] + self.test_case_data = [{"test_case": "assert(ans()=='2')", "marks": 0.0}] self.file_paths = [('/tmp/test.txt', False)] user_answer = dedent(""" def ans(): @@ -256,7 +270,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): """) kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -272,7 +287,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): """ Tests the user answer with just an incorrect test case """ user_answer = "def palindrome(a):\n\treturn a == a[::-1]" - test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False'} + test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False', + "marks": 0.0 + } ] syntax_error_msg = ["Traceback", "call", @@ -284,7 +301,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -304,8 +322,12 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): first and then with an incorrect test case """ # Given user_answer = "def palindrome(a):\n\treturn a == a[::-1]" - test_case_data = [{"test_case": 'assert(palindrome("abba")==True)'}, - {"test_case": 's="abbb"\nassert palindrome(S)==False'} + test_case_data = [{"test_case": 'assert(palindrome("abba")==True)', + "marks": 0.0 + }, + {"test_case": 's="abbb"\nassert palindrome(S)==False', + "marks": 0.0 + } ] name_error_msg = ["Traceback", "call", @@ -317,7 +339,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] kwargs = {'user_answer': user_answer, 'test_case_data': test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -341,7 +364,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_correct_answer_integer(self): # Given self.test_case_data = [{"expected_input": "1\n2", - "expected_output": "3" + "expected_output": "3", + "marks": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -350,7 +374,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): """ ) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'partial_grading': False } # When @@ -364,7 +389,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_correct_answer_list(self): # Given self.test_case_data = [{"expected_input": "1,2,3\n5,6,7", - "expected_output": "[1, 2, 3, 5, 6, 7]" + "expected_output": "[1, 2, 3, 5, 6, 7]", + "marks": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -376,7 +402,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): """ ) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'partial_grading': False } # When @@ -390,7 +417,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_correct_answer_string(self): # Given self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"), - "expected_output": "2" + "expected_output": "2", + "marks": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -400,7 +428,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): """ ) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'partial_grading': False } # When @@ -414,7 +443,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_incorrect_answer_integer(self): # Given self.test_case_data = [{"expected_input": "1\n2", - "expected_output": "3" + "expected_output": "3", + "marks": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -424,6 +454,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): ) kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, + 'partial_grading': False } # When @@ -436,7 +467,10 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_file_based_answer(self): # Given - self.test_case_data = [{"expected_input": "", "expected_output": "2"}] + self.test_case_data = [{"expected_input": "", + "expected_output": "2", + "marks": 0.0 + }] self.file_paths = [('/tmp/test.txt', False)] user_answer = dedent(""" @@ -447,7 +481,8 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): ) kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data, - 'file_paths': self.file_paths + 'file_paths': self.file_paths, + 'partial_grading': False } # When @@ -461,14 +496,16 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): def test_infinite_loop(self): # Given test_case_data = [{"expected_input": "1\n2", - "expected_output": "3" - }] + "expected_output": "3", + "marks": 0.0 + }] timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" " your code.").format(SERVER_TIMEOUT) user_answer = "while True:\n\tpass" kwargs = {'user_answer': user_answer, - 'test_case_data': test_case_data + 'test_case_data': test_case_data, + 'partial_grading': False } # When diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index b366480..b46a65b 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -11,7 +11,9 @@ from yaksh.settings import SERVER_TIMEOUT class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() - self.test_case_data = [{"test_case": "scilab_files/test_add.sce"}] + self.test_case_data = [{"test_case": "scilab_files/test_add.sce", + "marks": 0.0 + }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop" @@ -25,7 +27,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a+b;\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -37,7 +40,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a+b;\ndis(\tendfunction") get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -50,7 +54,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a-b;\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } @@ -64,7 +69,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a;\nwhile(1==1)\nend\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) - kwargs = {'user_answer': user_answer, + kwargs = {'user_answer': user_answer, + 'partial_grading': True, 'test_case_data': self.test_case_data, 'file_paths': self.file_paths } diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py index cbbbfd6..7df0ba1 100644 --- a/yaksh/python_stdio_evaluator.py +++ b/yaksh/python_stdio_evaluator.py @@ -31,6 +31,10 @@ def redirect_stdout(): class PythonStdioEvaluator(CodeEvaluator): """Tests the Python code obtained from Code Server""" + def setup(self): + super(PythonStdioEvaluator, self).setup() + self.files = [] + def teardown(self): # Delete the created file. if self.files: @@ -38,8 +42,7 @@ class PythonStdioEvaluator(CodeEvaluator): super(PythonStdioEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, expected_input, expected_output): - self.files = [] + def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): if file_paths: self.files = copy_files(file_paths) submitted = compile(user_answer, '', mode='exec') @@ -54,13 +57,15 @@ class PythonStdioEvaluator(CodeEvaluator): self.output_value = output_buffer.getvalue().rstrip("\n") return self.output_value - def check_code(self, user_answer, file_paths, expected_input, expected_output): + def check_code(self, user_answer, file_paths, partial_grading, expected_input, expected_output, marks): success = False + test_case_marks = 0.0 tb = None if self.output_value == expected_output: success = True err = "Correct answer" + test_case_marks = marks else: success = False err = dedent(""" @@ -74,4 +79,4 @@ class PythonStdioEvaluator(CodeEvaluator): ) ) del tb - return success, err + return success, err, test_case_marks diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py index 8835110..d446444 100644 --- a/yaksh/tests/test_code_server.py +++ b/yaksh/tests/test_code_server.py @@ -38,7 +38,11 @@ class TestCodeServer(unittest.TestCase): def test_inifinite_loop(self): # Given testdata = {'user_answer': 'while True: pass', - 'test_case_data': [{'test_case':'assert 1==2'}]} + 'partial_grading': True, + 'test_case_data': [{'test_case':'assert 1==2', + 'marks': 0.0 + } + ]} # When result = self.code_server.run_code( @@ -53,7 +57,11 @@ class TestCodeServer(unittest.TestCase): def test_correct_answer(self): # Given testdata = {'user_answer': 'def f(): return 1', - 'test_case_data': [{'test_case':'assert f() == 1'}]} + 'partial_grading': True, + 'test_case_data': [{'test_case':'assert f() == 1', + 'marks': 0.0 + } + ]} # When result = self.code_server.run_code( @@ -68,7 +76,11 @@ class TestCodeServer(unittest.TestCase): def test_wrong_answer(self): # Given testdata = {'user_answer': 'def f(): return 1', - 'test_case_data': [{'test_case':'assert f() == 2'}]} + 'partial_grading': True, + 'test_case_data': [{'test_case':'assert f() == 2', + 'marks': 0.0 + } + ]} # When result = self.code_server.run_code( @@ -87,7 +99,11 @@ class TestCodeServer(unittest.TestCase): def run_code(): """Run an infinite loop.""" testdata = {'user_answer': 'while True: pass', - 'test_case_data': [{'test_case':'assert 1==2'}]} + 'partial_grading': True, + 'test_case_data': [{'test_case':'assert 1==2', + 'marks': 0.0 + } + ]} result = self.code_server.run_code( 'python', 'standardtestcase', json.dumps(testdata), '' ) -- cgit From 4904a8305e7e83a00cef718a42bbbf8e7d5f8740 Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Fri, 4 Nov 2016 19:20:20 +0530 Subject: Add partial grade mark for stdio test case and fix model test cases --- yaksh/evaluator_tests/test_python_evaluation.py | 1 - yaksh/models.py | 12 +++++------- yaksh/test_models.py | 8 ++++++-- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'yaksh') diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index da64dc4..fdc1c35 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -61,7 +61,6 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): result = evaluator.evaluate(**kwargs) # Then - print repr(result.get('error')) self.assertFalse(result.get('success')) self.assertEqual(result.get('error'), ('AssertionError in: assert(add(1,2)==3)\n' diff --git a/yaksh/models.py b/yaksh/models.py index e7a96c9..33c3b42 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -941,14 +941,10 @@ class AnswerPaper(models.Model): def _update_marks_obtained(self): """Updates the total marks earned by student for this paper.""" - # marks = sum([x.marks for x in self.answers.filter(marks__gt=0.0)]) - # if not marks: - # self.marks_obtained = 0 - # else: - # self.marks_obtained = marks marks = 0 for question in self.questions.all(): - max_marks = max([a.marks for a in self.answers.filter(question=question)]) + marks_list = [a.marks for a in self.answers.filter(question=question)] + max_marks = max(marks_list) if marks_list else 0.0 marks += max_marks self.marks_obtained = marks @@ -1128,10 +1124,12 @@ class StandardTestCase(TestCase): class StdioBasedTestCase(TestCase): expected_input = models.TextField(blank=True) expected_output = models.TextField() + marks = models.FloatField(default=0.0) def get_field_value(self): return {"expected_output": self.expected_output, - "expected_input": self.expected_input} + "expected_input": self.expected_input, + "marks": self.marks} def __str__(self): return u'Question: {0} | Exp. Output: {1} | Exp. Input: {2}'.format(self.question, diff --git a/yaksh/test_models.py b/yaksh/test_models.py index 019a339..50d39c1 100644 --- a/yaksh/test_models.py +++ b/yaksh/test_models.py @@ -141,7 +141,9 @@ class QuestionTestCases(unittest.TestCase): ) self.upload_test_case.save() self.user_answer = "demo_answer" - self.test_case_upload_data = [{"test_case": "assert fact(3)==6"}] + self.test_case_upload_data = [{"test_case": "assert fact(3)==6", + "marks": 0.0 + }] questions_data = [{"snippet": "def fact()", "active": True, "points": 1.0, "description": "factorial of a no", @@ -877,7 +879,9 @@ class TestCaseTestCases(unittest.TestCase): self.stdout_based_testcase.save() answer_data = {"user_answer": "demo_answer", "test_case_data": [ - {"test_case": "assert myfunc(12, 13) == 15"} + {"test_case": "assert myfunc(12, 13) == 15", + "marks": 0.0 + } ] } self.answer_data_json = json.dumps(answer_data) -- cgit From a09df642d4f3623ee517aaed2eac1372ebacc0e0 Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Thu, 10 Nov 2016 12:36:13 +0530 Subject: Add point based weightage for partial grading instead of percentage based partial grading --- yaksh/bash_code_evaluator.py | 20 +++++++++--------- yaksh/bash_stdio_evaluator.py | 10 ++++----- yaksh/code_evaluator.py | 10 ++++----- yaksh/cpp_code_evaluator.py | 10 ++++----- yaksh/cpp_stdio_evaluator.py | 10 ++++----- yaksh/evaluator_tests/test_bash_evaluation.py | 12 +++++------ yaksh/evaluator_tests/test_c_cpp_evaluation.py | 18 ++++++++--------- yaksh/evaluator_tests/test_java_evaluation.py | 14 ++++++------- yaksh/evaluator_tests/test_python_evaluation.py | 26 ++++++++++++------------ yaksh/evaluator_tests/test_scilab_evaluation.py | 2 +- yaksh/java_code_evaluator.py | 10 ++++----- yaksh/java_stdio_evaluator.py | 10 ++++----- yaksh/models.py | 27 ++++++++++++++++++------- yaksh/python_assertion_evaluator.py | 10 ++++----- yaksh/python_stdio_evaluator.py | 21 ++++++++++--------- yaksh/scilab_code_evaluator.py | 8 ++++---- yaksh/test_models.py | 4 ++-- yaksh/tests/test_code_server.py | 18 ++++++++--------- yaksh/views.py | 10 +++++---- yaksh/xmlrpc_clients.py | 2 +- 20 files changed, 134 insertions(+), 118 deletions(-) (limited to 'yaksh') diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index 4ca5445..978586f 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -28,7 +28,7 @@ class BashCodeEvaluator(CodeEvaluator): delete_files(self.files) super(BashCodeEvaluator, self).teardown() - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): """ Function validates student script using instructor script as reference. Test cases can optionally be provided. The first argument ref_path, is the path to instructor script, it is assumed to @@ -74,7 +74,7 @@ class BashCodeEvaluator(CodeEvaluator): return False, msg success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 user_answer = user_answer.replace("\r", "") self.write_to_submit_code_file(self.submit_code_path, user_answer) @@ -93,20 +93,20 @@ class BashCodeEvaluator(CodeEvaluator): ) proc, stdnt_stdout, stdnt_stderr = ret if inst_stdout == stdnt_stdout: - test_case_marks = float(marks) if partial_grading else 0.0 - return True, "Correct answer", test_case_marks + test_case_weightage = float(weightage) if partial_grading else 0.0 + return True, "Correct answer", test_case_weightage else: err = "Error: expected %s, got %s" % (inst_stderr, stdnt_stderr ) - return False, err, test_case_marks + return False, err, test_case_weightage else: if not isfile(clean_test_case_path): msg = "No test case at %s" % clean_test_case_path - return False, msg, test_case_marks + return False, msg, test_case_weightage if not os.access(clean_ref_code_path, os.R_OK): msg = "Test script %s, not readable" % clean_test_case_path - return False, msg, test_case_marks + return False, msg, test_case_weightage # valid_answer is True, so that we can stop once a test case fails valid_answer = True # loop_count has to be greater than or equal to one. @@ -136,11 +136,11 @@ class BashCodeEvaluator(CodeEvaluator): proc, stdnt_stdout, stdnt_stderr = ret valid_answer = inst_stdout == stdnt_stdout if valid_answer and (num_lines == loop_count): - test_case_marks = float(marks) if partial_grading else 0.0 - return True, "Correct answer", test_case_marks + test_case_weightage = float(weightage) if partial_grading else 0.0 + return True, "Correct answer", test_case_weightage else: err = ("Error:expected" " {0}, got {1}").format(inst_stdout+inst_stderr, stdnt_stdout+stdnt_stderr ) - return False, err, test_case_marks + return False, err, test_case_weightage diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py index 2826a6b..fab19bf 100644 --- a/yaksh/bash_stdio_evaluator.py +++ b/yaksh/bash_stdio_evaluator.py @@ -23,7 +23,7 @@ class BashStdioEvaluator(StdIOEvaluator): delete_files(self.files) super(BashStdioEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): if file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -34,9 +34,9 @@ class BashStdioEvaluator(StdIOEvaluator): self.write_to_submit_code_file(self.submit_code_path, user_answer) def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, marks): + expected_input, expected_output, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 expected_input = str(expected_input).replace('\r', '') proc = subprocess.Popen("bash ./Test.sh", @@ -49,5 +49,5 @@ class BashStdioEvaluator(StdIOEvaluator): expected_input, expected_output ) - test_case_marks = float(marks) if partial_grading and success else 0.0 - return success, err, test_case_marks + test_case_weightage = float(weightage) if partial_grading and success else 0.0 + return success, err, test_case_weightage diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index 1672efa..fda0a8d 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -82,7 +82,7 @@ class CodeEvaluator(object): Returns ------- - A tuple: (success, error message, marks). + A tuple: (success, error message, weightage). """ self.setup() @@ -109,20 +109,20 @@ class CodeEvaluator(object): prev_handler = create_signal_handler() success = False error = "" - marks = 0.0 + weightage = 0 # Do whatever testing needed. try: for test_case in test_case_data: success = False self.compile_code(user_answer, file_paths, **test_case) - success, err, test_case_marks = self.check_code(user_answer, + success, err, test_case_weightage = self.check_code(user_answer, file_paths, partial_grading, **test_case ) if success: - marks += test_case_marks + weightage += test_case_weightage error = err else: error += err + "\n" @@ -142,7 +142,7 @@ class CodeEvaluator(object): # Set back any original signal handler. set_original_signal_handler(prev_handler) - return success, error, marks + return success, error, weightage def teardown(self): # Cancel the signal diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py index d6c72d6..f069b03 100644 --- a/yaksh/cpp_code_evaluator.py +++ b/yaksh/cpp_code_evaluator.py @@ -50,7 +50,7 @@ class CppCodeEvaluator(CodeEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, file_paths, test_case, marks): + def compile_code(self, user_answer, file_paths, test_case, weightage): if self.compiled_user_answer and self.compiled_test_code: return None else: @@ -89,7 +89,7 @@ class CppCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -109,7 +109,7 @@ class CppCodeEvaluator(CodeEvaluator): if the required permissions are not given to the file(s). """ success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -129,7 +129,7 @@ class CppCodeEvaluator(CodeEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, "Correct answer" - test_case_marks = float(marks) if partial_grading else 0.0 + test_case_weightage = float(weightage) if partial_grading else 0.0 else: err = "{0} \n {1}".format(stdout, stderr) else: @@ -155,4 +155,4 @@ class CppCodeEvaluator(CodeEvaluator): except: err = "{0} \n {1}".format(err, stdnt_stderr) - return success, err, test_case_marks + return success, err, test_case_weightage diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py index 9e2fd2e..050cec8 100644 --- a/yaksh/cpp_stdio_evaluator.py +++ b/yaksh/cpp_stdio_evaluator.py @@ -35,7 +35,7 @@ class CppStdioEvaluator(StdIOEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): if file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -62,9 +62,9 @@ class CppStdioEvaluator(StdIOEvaluator): return self.compiled_user_answer, self.compiled_test_code def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, marks): + expected_input, expected_output, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -106,5 +106,5 @@ class CppStdioEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - test_case_marks = float(marks) if partial_grading and success else 0.0 - return success, err, test_case_marks + test_case_weightage = float(weightage) if partial_grading and success else 0.0 + return success, err, test_case_weightage diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 5f4be4b..6c90d3c 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -15,7 +15,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) self.test_case_data = [ {"test_case": "bash_files/sample.sh,bash_files/sample.args", - "marks": 0.0 + "weightage": 0.0 } ] self.in_dir = tempfile.mkdtemp() @@ -72,7 +72,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [ {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", - "marks": 0.0 + "weightage": 0.0 } ] user_answer = ("#!/bin/bash\ncat $1") @@ -102,7 +102,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'marks': 0.0 + 'weightage': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -124,7 +124,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', 'expected_input': '1,2,3\n4,5,6\n7,8,9', - 'marks': 0.0 + 'weightage': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -144,7 +144,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'marks': 0.0 + 'weightage': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -164,7 +164,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '10', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index 79326d4..f58833a 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -15,7 +15,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [{"test_case": "c_cpp_files/main.cpp", - "marks": 0.0 + "weightage": 0.0 }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -80,7 +80,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [{"test_case": "c_cpp_files/file_data.c", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" #include @@ -108,7 +108,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def setUp(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'marks': 0.0 + 'weightage': 0.0 }] self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -135,7 +135,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_array_input(self): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include @@ -158,7 +158,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_string_input(self): self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include @@ -229,7 +229,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include @@ -267,7 +267,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_array_input(self): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include @@ -291,7 +291,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_string_input(self): self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include @@ -366,7 +366,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" #include diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index 33e0e35..142f0bf 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -17,7 +17,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [ {"test_case": "java_files/main_square.java", - "marks": 0.0 + "weightage": 0.0 } ] self.in_dir = tmp_in_dir_path @@ -86,7 +86,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [ {"test_case": "java_files/read_file.java", - "marks": 0.0 + "weightage": 0.0 } ] user_answer = dedent(""" @@ -126,7 +126,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.in_dir = tmp_in_dir_path self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'marks': 0.0 + 'weightage': 0.0 }] evaluator.SERVER_TIMEOUT = 4 self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -161,7 +161,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" import java.util.Scanner; @@ -241,7 +241,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" class Test @@ -262,7 +262,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): def test_string_input(self): self.test_case_data = [{'expected_output': 'HelloWorld', 'expected_input': 'Hello\nWorld', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" import java.util.Scanner; @@ -286,7 +286,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [{'expected_output': '2', 'expected_input': '', - 'marks': 0.0 + 'weightage': 0.0 }] user_answer = dedent(""" import java.io.BufferedReader; diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index fdc1c35..690f474 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -17,9 +17,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path - self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'marks': 0.0}, - {"test_case": 'assert(add(-1,2)==1)', 'marks': 0.0}, - {"test_case": 'assert(add(-1,-2)==-3)', 'marks': 0.0}, + self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weightage': 0.0}, + {"test_case": 'assert(add(-1,2)==1)', 'weightage': 0.0}, + {"test_case": 'assert(add(-1,-2)==-3)', 'weightage': 0.0}, ] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -260,7 +260,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): # Given - self.test_case_data = [{"test_case": "assert(ans()=='2')", "marks": 0.0}] + self.test_case_data = [{"test_case": "assert(ans()=='2')", "weightage": 0.0}] self.file_paths = [('/tmp/test.txt', False)] user_answer = dedent(""" def ans(): @@ -287,7 +287,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): user_answer = "def palindrome(a):\n\treturn a == a[::-1]" test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False', - "marks": 0.0 + "weightage": 0.0 } ] syntax_error_msg = ["Traceback", @@ -322,10 +322,10 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): # Given user_answer = "def palindrome(a):\n\treturn a == a[::-1]" test_case_data = [{"test_case": 'assert(palindrome("abba")==True)', - "marks": 0.0 + "weightage": 0.0 }, {"test_case": 's="abbb"\nassert palindrome(S)==False', - "marks": 0.0 + "weightage": 0.0 } ] name_error_msg = ["Traceback", @@ -364,7 +364,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -389,7 +389,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1,2,3\n5,6,7", "expected_output": "[1, 2, 3, 5, 6, 7]", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -417,7 +417,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"), "expected_output": "2", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -443,7 +443,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "marks": 0.0 + "weightage": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -468,7 +468,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "", "expected_output": "2", - "marks": 0.0 + "weightage": 0.0 }] self.file_paths = [('/tmp/test.txt', False)] @@ -496,7 +496,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "marks": 0.0 + "weightage": 0.0 }] timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index b46a65b..bc03e04 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -12,7 +12,7 @@ class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [{"test_case": "scilab_files/test_add.sce", - "marks": 0.0 + "weightage": 0.0 }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py index c945a38..05e6405 100644 --- a/yaksh/java_code_evaluator.py +++ b/yaksh/java_code_evaluator.py @@ -47,7 +47,7 @@ class JavaCodeEvaluator(CodeEvaluator): output_path = "{0}{1}.class".format(directory, file_name) return output_path - def compile_code(self, user_answer, file_paths, test_case, marks): + def compile_code(self, user_answer, file_paths, test_case, weightage): if self.compiled_user_answer and self.compiled_test_code: return None else: @@ -96,7 +96,7 @@ class JavaCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -117,7 +117,7 @@ class JavaCodeEvaluator(CodeEvaluator): """ success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -136,7 +136,7 @@ class JavaCodeEvaluator(CodeEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, "Correct answer" - test_case_marks = float(marks) if partial_grading else 0.0 + test_case_weightage = float(weightage) if partial_grading else 0.0 else: err = stdout + "\n" + stderr else: @@ -161,4 +161,4 @@ class JavaCodeEvaluator(CodeEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - return success, err, test_case_marks + return success, err, test_case_weightage diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py index b074e1c..bc50744 100644 --- a/yaksh/java_stdio_evaluator.py +++ b/yaksh/java_stdio_evaluator.py @@ -31,7 +31,7 @@ class JavaStdioEvaluator(StdIOEvaluator): compile_command = 'javac {0}'.format(self.submit_code_path) return compile_command - def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): if not isfile(self.submit_code_path): msg = "No file at %s or Incorrect path" % self.submit_code_path return False, msg @@ -51,9 +51,9 @@ class JavaStdioEvaluator(StdIOEvaluator): return self.compiled_user_answer def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, marks): + expected_input, expected_output, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) if stdnt_stderr == '' or "error" not in stdnt_stderr: @@ -79,5 +79,5 @@ class JavaStdioEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - test_case_marks = float(marks) if partial_grading and success else 0.0 - return success, err, test_case_marks + test_case_weightage = float(weightage) if partial_grading and success else 0.0 + return success, err, test_case_weightage diff --git a/yaksh/models.py b/yaksh/models.py index 33c3b42..05528c7 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -325,6 +325,13 @@ class Question(models.Model): return test_case + def get_maximum_test_case_weightage(self, **kwargs): + max_weightage = 0.0 + for test_case in self.get_test_cases(): + max_weightage += test_case.weightage + + return max_weightage + def _add_and_get_files(self, zip_file): files = FileUpload.objects.filter(question=self) files_list = [] @@ -1029,7 +1036,7 @@ class AnswerPaper(models.Model): For code questions success is True only if the answer is correct. """ - result = {'success': True, 'error': 'Incorrect answer', 'marks': 0.0} + result = {'success': True, 'error': 'Incorrect answer', 'weightage': 0.0} correct = False if user_answer is not None: if question.type == 'mcq': @@ -1080,11 +1087,17 @@ class AnswerPaper(models.Model): user_answer.correct = correct user_answer.error = result.get('error') if correct: - user_answer.marks = question.points * result['marks'] \ + user_answer.marks = (question.points * result['weightage'] / + question.get_maximum_test_case_weightage()) \ if question.partial_grading and question.type == 'code' else question.points + # user_answer.marks = question.points * result['weightage'] \ + # if question.partial_grading and question.type == 'code' else question.points else: - user_answer.marks = question.points * result['marks'] \ + user_answer.marks = (question.points * result['weightage'] / + question.get_maximum_test_case_weightage()) \ if question.partial_grading and question.type == 'code' else 0 + # user_answer.marks = question.points * result['weightage'] \ + # if question.partial_grading and question.type == 'code' else 0 user_answer.save() self.update_marks('completed') return True, msg @@ -1109,11 +1122,11 @@ class TestCase(models.Model): class StandardTestCase(TestCase): test_case = models.TextField(blank=True) - marks = models.FloatField(default=0.0) + weightage = models.FloatField(default=0.0) def get_field_value(self): return {"test_case": self.test_case, - "marks": self.marks} + "weightage": self.weightage} def __str__(self): return u'Question: {0} | Test Case: {1}'.format(self.question, @@ -1124,12 +1137,12 @@ class StandardTestCase(TestCase): class StdioBasedTestCase(TestCase): expected_input = models.TextField(blank=True) expected_output = models.TextField() - marks = models.FloatField(default=0.0) + weightage = models.IntegerField(default=0.0) def get_field_value(self): return {"expected_output": self.expected_output, "expected_input": self.expected_input, - "marks": self.marks} + "weightage": self.weightage} def __str__(self): return u'Question: {0} | Exp. Output: {1} | Exp. Input: {2}'.format(self.question, diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 350bc38..6503566 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -25,7 +25,7 @@ class PythonAssertionEvaluator(CodeEvaluator): delete_files(self.files) super(PythonAssertionEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, test_case, marks): + def compile_code(self, user_answer, file_paths, test_case, weightage): if file_paths: self.files = copy_files(file_paths) if self.exec_scope: @@ -36,9 +36,9 @@ class PythonAssertionEvaluator(CodeEvaluator): exec(submitted, self.exec_scope) return self.exec_scope - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 try: tb = None _tests = compile(test_case, '', mode='exec') @@ -54,6 +54,6 @@ class PythonAssertionEvaluator(CodeEvaluator): else: success = True err = 'Correct answer' - test_case_marks = float(marks) if partial_grading else 0.0 + test_case_weightage = float(weightage) if partial_grading else 0.0 del tb - return success, err, test_case_marks + return success, err, test_case_weightage diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py index 7df0ba1..cd8c52a 100644 --- a/yaksh/python_stdio_evaluator.py +++ b/yaksh/python_stdio_evaluator.py @@ -42,7 +42,7 @@ class PythonStdioEvaluator(CodeEvaluator): super(PythonStdioEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, expected_input, expected_output, marks): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): if file_paths: self.files = copy_files(file_paths) submitted = compile(user_answer, '', mode='exec') @@ -57,15 +57,16 @@ class PythonStdioEvaluator(CodeEvaluator): self.output_value = output_buffer.getvalue().rstrip("\n") return self.output_value - def check_code(self, user_answer, file_paths, partial_grading, expected_input, expected_output, marks): + def check_code(self, user_answer, file_paths, partial_grading, expected_input, + expected_output, weightage): success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 tb = None if self.output_value == expected_output: success = True err = "Correct answer" - test_case_marks = marks + test_case_weightage = weightage else: success = False err = dedent(""" @@ -73,10 +74,10 @@ class PythonStdioEvaluator(CodeEvaluator): Given input - {0} Expected output - {1} Your output - {2} - """ - .format(expected_input, - expected_output, self.output_value - ) - ) + """.format(expected_input, + expected_output, + self.output_value + ) + ) del tb - return success, err, test_case_marks + return success, err, test_case_weightage diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py index 1aca309..927b84d 100644 --- a/yaksh/scilab_code_evaluator.py +++ b/yaksh/scilab_code_evaluator.py @@ -27,7 +27,7 @@ class ScilabCodeEvaluator(CodeEvaluator): delete_files(self.files) super(ScilabCodeEvaluator, self).teardown() - def check_code(self, user_answer, file_paths, partial_grading, test_case, marks): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): if file_paths: self.files = copy_files(file_paths) ref_code_path = test_case @@ -37,7 +37,7 @@ class ScilabCodeEvaluator(CodeEvaluator): self._remove_scilab_exit(user_answer.lstrip()) success = False - test_case_marks = 0.0 + test_case_weightage = 0.0 self.write_to_submit_code_file(self.submit_code_path, user_answer) # Throw message if there are commmands that terminates scilab @@ -65,12 +65,12 @@ class ScilabCodeEvaluator(CodeEvaluator): stdout = self._strip_output(stdout) if proc.returncode == 5: success, err = True, "Correct answer" - test_case_marks = float(marks) if partial_grading else 0.0 + test_case_weightage = float(weightage) if partial_grading else 0.0 else: err = add_err + stdout else: err = add_err + stderr - return success, err, test_case_marks + return success, err, test_case_weightage def _remove_scilab_exit(self, string): """ diff --git a/yaksh/test_models.py b/yaksh/test_models.py index 50d39c1..d05fac3 100644 --- a/yaksh/test_models.py +++ b/yaksh/test_models.py @@ -142,7 +142,7 @@ class QuestionTestCases(unittest.TestCase): self.upload_test_case.save() self.user_answer = "demo_answer" self.test_case_upload_data = [{"test_case": "assert fact(3)==6", - "marks": 0.0 + "weightage": 0.0 }] questions_data = [{"snippet": "def fact()", "active": True, "points": 1.0, @@ -880,7 +880,7 @@ class TestCaseTestCases(unittest.TestCase): answer_data = {"user_answer": "demo_answer", "test_case_data": [ {"test_case": "assert myfunc(12, 13) == 15", - "marks": 0.0 + "weightage": 0.0 } ] } diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py index d446444..19560e4 100644 --- a/yaksh/tests/test_code_server.py +++ b/yaksh/tests/test_code_server.py @@ -35,12 +35,12 @@ class TestCodeServer(unittest.TestCase): def setUp(self): self.code_server = CodeServerProxy() - def test_inifinite_loop(self): + def test_infinite_loop(self): # Given testdata = {'user_answer': 'while True: pass', - 'partial_grading': True, + 'partial_grading': False, 'test_case_data': [{'test_case':'assert 1==2', - 'marks': 0.0 + 'weightage': 0.0 } ]} @@ -57,9 +57,9 @@ class TestCodeServer(unittest.TestCase): def test_correct_answer(self): # Given testdata = {'user_answer': 'def f(): return 1', - 'partial_grading': True, + 'partial_grading': False, 'test_case_data': [{'test_case':'assert f() == 1', - 'marks': 0.0 + 'weightage': 0.0 } ]} @@ -76,9 +76,9 @@ class TestCodeServer(unittest.TestCase): def test_wrong_answer(self): # Given testdata = {'user_answer': 'def f(): return 1', - 'partial_grading': True, + 'partial_grading': False, 'test_case_data': [{'test_case':'assert f() == 2', - 'marks': 0.0 + 'weightage': 0.0 } ]} @@ -99,9 +99,9 @@ class TestCodeServer(unittest.TestCase): def run_code(): """Run an infinite loop.""" testdata = {'user_answer': 'while True: pass', - 'partial_grading': True, + 'partial_grading': False, 'test_case_data': [{'test_case':'assert 1==2', - 'marks': 0.0 + 'weightage': 0.0 } ]} result = self.code_server.run_code( diff --git a/yaksh/views.py b/yaksh/views.py index 2478544..aca89ef 100644 --- a/yaksh/views.py +++ b/yaksh/views.py @@ -517,14 +517,16 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None): if question.type == 'code' else None correct, result = paper.validate_answer(user_answer, question, json_data) if correct: - new_answer.marks = question.points * result['marks'] if question.partial_grading \ - and question.type == 'code' else question.points + new_answer.marks = (question.points * result['weightage'] / + question.get_maximum_test_case_weightage()) \ + if question.partial_grading and question.type == 'code' else question.points new_answer.correct = correct new_answer.error = result.get('error') else: new_answer.error = result.get('error') - new_answer.marks = question.points * result['marks'] if question.partial_grading \ - and question.type == 'code' else question.points + new_answer.marks = (question.points * result['weightage'] / + question.get_maximum_test_case_weightage()) \ + if question.partial_grading and question.type == 'code' else 0 new_answer.save() paper.update_marks('inprogress') paper.set_end_time(timezone.now()) diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py index ff0a2a7..53b8c38 100644 --- a/yaksh/xmlrpc_clients.py +++ b/yaksh/xmlrpc_clients.py @@ -63,7 +63,7 @@ class CodeServerProxy(object): result = server.check_code(language, test_case_type, json_data, user_dir) except ConnectionError: result = json.dumps({'success': False, - 'marks': 0.0, + 'weightage': 0.0, 'error': 'Unable to connect to any code servers!'}) return result -- cgit From 2100ef108b7119370051f8117c3bb58315fad270 Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Thu, 10 Nov 2016 17:49:24 +0530 Subject: Fix minor errors, fix template rendering in grade user --- yaksh/code_evaluator.py | 4 ++-- yaksh/models.py | 4 ---- yaksh/templates/yaksh/user_data.html | 2 +- 3 files changed, 3 insertions(+), 7 deletions(-) (limited to 'yaksh') diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index fda0a8d..b39a1d7 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -86,10 +86,10 @@ class CodeEvaluator(object): """ self.setup() - success, error, marks = self.safe_evaluate(**kwargs) + success, error, weightage = self.safe_evaluate(**kwargs) self.teardown() - result = {'success': success, 'error': error, 'marks': marks} + result = {'success': success, 'error': error, 'weightage': weightage} return result # Private Protocol ########## diff --git a/yaksh/models.py b/yaksh/models.py index 05528c7..bdcc43e 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -1090,14 +1090,10 @@ class AnswerPaper(models.Model): user_answer.marks = (question.points * result['weightage'] / question.get_maximum_test_case_weightage()) \ if question.partial_grading and question.type == 'code' else question.points - # user_answer.marks = question.points * result['weightage'] \ - # if question.partial_grading and question.type == 'code' else question.points else: user_answer.marks = (question.points * result['weightage'] / question.get_maximum_test_case_weightage()) \ if question.partial_grading and question.type == 'code' else 0 - # user_answer.marks = question.points * result['weightage'] \ - # if question.partial_grading and question.type == 'code' else 0 user_answer.save() self.update_marks('completed') return True, msg diff --git a/yaksh/templates/yaksh/user_data.html b/yaksh/templates/yaksh/user_data.html index 0a7e4aa..378e7fd 100644 --- a/yaksh/templates/yaksh/user_data.html +++ b/yaksh/templates/yaksh/user_data.html @@ -94,7 +94,7 @@ User IP address: {{ paper.user_ip }}
Student answer:
{% for answer in answers %} {% if not answer.skipped %} - {% if "Correct answer" in answer.error %} + {% if answer.correct %}
{% else %}
-- cgit From 232c4480d47b75fcdb523e5ebb601959c56e40dc Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Wed, 16 Nov 2016 10:58:25 +0530 Subject: Fix docstrings, Fix return values of bash evaluator --- yaksh/bash_code_evaluator.py | 23 +++++++++++------------ yaksh/xmlrpc_clients.py | 7 ++++++- 2 files changed, 17 insertions(+), 13 deletions(-) (limited to 'yaksh') diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index 978586f..dd4445c 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -49,9 +49,11 @@ class BashCodeEvaluator(CodeEvaluator): Returns (False, error_msg): If mandatory arguments are not files or if the required permissions are not given to the file(s). - """ ref_code_path = test_case + success = False + test_case_weightage = 0.0 + get_ref_path, get_test_case_path = ref_code_path.strip().split(',') get_ref_path = get_ref_path.strip() get_test_case_path = get_test_case_path.strip() @@ -62,19 +64,16 @@ class BashCodeEvaluator(CodeEvaluator): self.files = copy_files(file_paths) if not isfile(clean_ref_code_path): msg = "No file at %s or Incorrect path" % clean_ref_code_path - return False, msg + return False, msg, 0.0 if not isfile(self.submit_code_path): msg = "No file at %s or Incorrect path" % self.submit_code_path - return False, msg + return False, msg, 0.0 if not os.access(clean_ref_code_path, os.X_OK): msg = "Script %s is not executable" % clean_ref_code_path - return False, msg + return False, msg, 0.0 if not os.access(self.submit_code_path, os.X_OK): msg = "Script %s is not executable" % self.submit_code_path - return False, msg - - success = False - test_case_weightage = 0.0 + return False, msg, 0.0 user_answer = user_answer.replace("\r", "") self.write_to_submit_code_file(self.submit_code_path, user_answer) @@ -99,14 +98,14 @@ class BashCodeEvaluator(CodeEvaluator): err = "Error: expected %s, got %s" % (inst_stderr, stdnt_stderr ) - return False, err, test_case_weightage + return False, err, 0.0 else: if not isfile(clean_test_case_path): msg = "No test case at %s" % clean_test_case_path - return False, msg, test_case_weightage + return False, msg, 0.0 if not os.access(clean_ref_code_path, os.R_OK): msg = "Test script %s, not readable" % clean_test_case_path - return False, msg, test_case_weightage + return False, msg, 0.0 # valid_answer is True, so that we can stop once a test case fails valid_answer = True # loop_count has to be greater than or equal to one. @@ -143,4 +142,4 @@ class BashCodeEvaluator(CodeEvaluator): " {0}, got {1}").format(inst_stdout+inst_stderr, stdnt_stdout+stdnt_stderr ) - return False, err, test_case_weightage + return False, err, 0.0 diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py index 53b8c38..437dbcb 100644 --- a/yaksh/xmlrpc_clients.py +++ b/yaksh/xmlrpc_clients.py @@ -55,7 +55,12 @@ class CodeServerProxy(object): Returns ------- - A json string of a dict: {success: success, err: error message}. + A json string of a dict containing: + {"success": success, "weightage": weightage, "error": error message} + + success - Boolean, indicating if code was executed successfully, correctly + weightage - Float, indicating total weightage of all successful test cases + error - String, error message if success is false """ try: -- cgit From b32d7e91fe608c4cbd09b50520d4a3cab75d2e53 Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Wed, 16 Nov 2016 11:25:30 +0530 Subject: Change test_case weightage field name to weight --- yaksh/bash_code_evaluator.py | 12 ++++++------ yaksh/bash_stdio_evaluator.py | 10 +++++----- yaksh/code_evaluator.py | 14 ++++++------- yaksh/cpp_code_evaluator.py | 10 +++++----- yaksh/cpp_stdio_evaluator.py | 10 +++++----- yaksh/evaluator_tests/test_bash_evaluation.py | 12 ++++++------ yaksh/evaluator_tests/test_c_cpp_evaluation.py | 18 ++++++++--------- yaksh/evaluator_tests/test_java_evaluation.py | 14 ++++++------- yaksh/evaluator_tests/test_python_evaluation.py | 26 ++++++++++++------------- yaksh/evaluator_tests/test_scilab_evaluation.py | 2 +- yaksh/java_code_evaluator.py | 10 +++++----- yaksh/java_stdio_evaluator.py | 10 +++++----- yaksh/models.py | 26 ++++++++++++------------- yaksh/python_assertion_evaluator.py | 10 +++++----- yaksh/python_stdio_evaluator.py | 10 +++++----- yaksh/scilab_code_evaluator.py | 8 ++++---- yaksh/test_models.py | 4 ++-- yaksh/tests/test_code_server.py | 8 ++++---- yaksh/views.py | 8 ++++---- yaksh/xmlrpc_clients.py | 6 +++--- 20 files changed, 114 insertions(+), 114 deletions(-) (limited to 'yaksh') diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index dd4445c..7575725 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -28,7 +28,7 @@ class BashCodeEvaluator(CodeEvaluator): delete_files(self.files) super(BashCodeEvaluator, self).teardown() - def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weight): """ Function validates student script using instructor script as reference. Test cases can optionally be provided. The first argument ref_path, is the path to instructor script, it is assumed to @@ -52,7 +52,7 @@ class BashCodeEvaluator(CodeEvaluator): """ ref_code_path = test_case success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 get_ref_path, get_test_case_path = ref_code_path.strip().split(',') get_ref_path = get_ref_path.strip() @@ -92,8 +92,8 @@ class BashCodeEvaluator(CodeEvaluator): ) proc, stdnt_stdout, stdnt_stderr = ret if inst_stdout == stdnt_stdout: - test_case_weightage = float(weightage) if partial_grading else 0.0 - return True, "Correct answer", test_case_weightage + test_case_weight = float(weight) if partial_grading else 0.0 + return True, "Correct answer", test_case_weight else: err = "Error: expected %s, got %s" % (inst_stderr, stdnt_stderr @@ -135,8 +135,8 @@ class BashCodeEvaluator(CodeEvaluator): proc, stdnt_stdout, stdnt_stderr = ret valid_answer = inst_stdout == stdnt_stdout if valid_answer and (num_lines == loop_count): - test_case_weightage = float(weightage) if partial_grading else 0.0 - return True, "Correct answer", test_case_weightage + test_case_weight = float(weight) if partial_grading else 0.0 + return True, "Correct answer", test_case_weight else: err = ("Error:expected" " {0}, got {1}").format(inst_stdout+inst_stderr, diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py index fab19bf..1dd9fd5 100644 --- a/yaksh/bash_stdio_evaluator.py +++ b/yaksh/bash_stdio_evaluator.py @@ -23,7 +23,7 @@ class BashStdioEvaluator(StdIOEvaluator): delete_files(self.files) super(BashStdioEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight): if file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -34,9 +34,9 @@ class BashStdioEvaluator(StdIOEvaluator): self.write_to_submit_code_file(self.submit_code_path, user_answer) def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, weightage): + expected_input, expected_output, weight): success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 expected_input = str(expected_input).replace('\r', '') proc = subprocess.Popen("bash ./Test.sh", @@ -49,5 +49,5 @@ class BashStdioEvaluator(StdIOEvaluator): expected_input, expected_output ) - test_case_weightage = float(weightage) if partial_grading and success else 0.0 - return success, err, test_case_weightage + test_case_weight = float(weight) if partial_grading and success else 0.0 + return success, err, test_case_weight diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index b39a1d7..50fc546 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -82,14 +82,14 @@ class CodeEvaluator(object): Returns ------- - A tuple: (success, error message, weightage). + A tuple: (success, error message, weight). """ self.setup() - success, error, weightage = self.safe_evaluate(**kwargs) + success, error, weight = self.safe_evaluate(**kwargs) self.teardown() - result = {'success': success, 'error': error, 'weightage': weightage} + result = {'success': success, 'error': error, 'weight': weight} return result # Private Protocol ########## @@ -109,20 +109,20 @@ class CodeEvaluator(object): prev_handler = create_signal_handler() success = False error = "" - weightage = 0 + weight = 0 # Do whatever testing needed. try: for test_case in test_case_data: success = False self.compile_code(user_answer, file_paths, **test_case) - success, err, test_case_weightage = self.check_code(user_answer, + success, err, test_case_weight = self.check_code(user_answer, file_paths, partial_grading, **test_case ) if success: - weightage += test_case_weightage + weight += test_case_weight error = err else: error += err + "\n" @@ -142,7 +142,7 @@ class CodeEvaluator(object): # Set back any original signal handler. set_original_signal_handler(prev_handler) - return success, error, weightage + return success, error, weight def teardown(self): # Cancel the signal diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py index f069b03..716a522 100644 --- a/yaksh/cpp_code_evaluator.py +++ b/yaksh/cpp_code_evaluator.py @@ -50,7 +50,7 @@ class CppCodeEvaluator(CodeEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, file_paths, test_case, weightage): + def compile_code(self, user_answer, file_paths, test_case, weight): if self.compiled_user_answer and self.compiled_test_code: return None else: @@ -89,7 +89,7 @@ class CppCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weight): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -109,7 +109,7 @@ class CppCodeEvaluator(CodeEvaluator): if the required permissions are not given to the file(s). """ success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -129,7 +129,7 @@ class CppCodeEvaluator(CodeEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, "Correct answer" - test_case_weightage = float(weightage) if partial_grading else 0.0 + test_case_weight = float(weight) if partial_grading else 0.0 else: err = "{0} \n {1}".format(stdout, stderr) else: @@ -155,4 +155,4 @@ class CppCodeEvaluator(CodeEvaluator): except: err = "{0} \n {1}".format(err, stdnt_stderr) - return success, err, test_case_weightage + return success, err, test_case_weight diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py index 050cec8..00fad92 100644 --- a/yaksh/cpp_stdio_evaluator.py +++ b/yaksh/cpp_stdio_evaluator.py @@ -35,7 +35,7 @@ class CppStdioEvaluator(StdIOEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight): if file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -62,9 +62,9 @@ class CppStdioEvaluator(StdIOEvaluator): return self.compiled_user_answer, self.compiled_test_code def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, weightage): + expected_input, expected_output, weight): success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -106,5 +106,5 @@ class CppStdioEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - test_case_weightage = float(weightage) if partial_grading and success else 0.0 - return success, err, test_case_weightage + test_case_weight = float(weight) if partial_grading and success else 0.0 + return success, err, test_case_weight diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index 6c90d3c..df24d4d 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -15,7 +15,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) self.test_case_data = [ {"test_case": "bash_files/sample.sh,bash_files/sample.args", - "weightage": 0.0 + "weight": 0.0 } ] self.in_dir = tempfile.mkdtemp() @@ -72,7 +72,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [ {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", - "weightage": 0.0 + "weight": 0.0 } ] user_answer = ("#!/bin/bash\ncat $1") @@ -102,7 +102,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'weightage': 0.0 + 'weight': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -124,7 +124,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n', 'expected_input': '1,2,3\n4,5,6\n7,8,9', - 'weightage': 0.0 + 'weight': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -144,7 +144,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'weightage': 0.0 + 'weight': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, @@ -164,7 +164,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase): ) test_case_data = [{'expected_output': '10', 'expected_input': '', - 'weightage': 0.0 + 'weight': 0.0 }] get_class = BashStdioEvaluator() kwargs = {"user_answer": user_answer, diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index f58833a..87b2653 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -15,7 +15,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [{"test_case": "c_cpp_files/main.cpp", - "weightage": 0.0 + "weight": 0.0 }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -80,7 +80,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): self.file_paths = [('/tmp/test.txt', False)] self.test_case_data = [{"test_case": "c_cpp_files/file_data.c", - "weightage": 0.0 + "weight": 0.0 }] user_answer = dedent(""" #include @@ -108,7 +108,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def setUp(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'weightage': 0.0 + 'weight': 0.0 }] self.in_dir = tempfile.mkdtemp() self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -135,7 +135,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_array_input(self): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" #include @@ -158,7 +158,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_string_input(self): self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" #include @@ -229,7 +229,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" #include @@ -267,7 +267,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_array_input(self): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" #include @@ -291,7 +291,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_string_input(self): self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" #include @@ -366,7 +366,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase): def test_cpp_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" #include diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index 142f0bf..d410052 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -17,7 +17,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [ {"test_case": "java_files/main_square.java", - "weightage": 0.0 + "weight": 0.0 } ] self.in_dir = tmp_in_dir_path @@ -86,7 +86,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [ {"test_case": "java_files/read_file.java", - "weightage": 0.0 + "weight": 0.0 } ] user_answer = dedent(""" @@ -126,7 +126,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.in_dir = tmp_in_dir_path self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6', - 'weightage': 0.0 + 'weight': 0.0 }] evaluator.SERVER_TIMEOUT = 4 self.timeout_msg = ("Code took more than {0} seconds to run. " @@ -161,7 +161,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" import java.util.Scanner; @@ -241,7 +241,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): def test_only_stdout(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" class Test @@ -262,7 +262,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): def test_string_input(self): self.test_case_data = [{'expected_output': 'HelloWorld', 'expected_input': 'Hello\nWorld', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" import java.util.Scanner; @@ -286,7 +286,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase): self.file_paths = [("/tmp/test.txt", False)] self.test_case_data = [{'expected_output': '2', 'expected_input': '', - 'weightage': 0.0 + 'weight': 0.0 }] user_answer = dedent(""" import java.io.BufferedReader; diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 690f474..a0e3713 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -17,9 +17,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path - self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weightage': 0.0}, - {"test_case": 'assert(add(-1,2)==1)', 'weightage': 0.0}, - {"test_case": 'assert(add(-1,-2)==-3)', 'weightage': 0.0}, + self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weight': 0.0}, + {"test_case": 'assert(add(-1,2)==1)', 'weight': 0.0}, + {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0}, ] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -260,7 +260,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): def test_file_based_assert(self): # Given - self.test_case_data = [{"test_case": "assert(ans()=='2')", "weightage": 0.0}] + self.test_case_data = [{"test_case": "assert(ans()=='2')", "weight": 0.0}] self.file_paths = [('/tmp/test.txt', False)] user_answer = dedent(""" def ans(): @@ -287,7 +287,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): user_answer = "def palindrome(a):\n\treturn a == a[::-1]" test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False', - "weightage": 0.0 + "weight": 0.0 } ] syntax_error_msg = ["Traceback", @@ -322,10 +322,10 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): # Given user_answer = "def palindrome(a):\n\treturn a == a[::-1]" test_case_data = [{"test_case": 'assert(palindrome("abba")==True)', - "weightage": 0.0 + "weight": 0.0 }, {"test_case": 's="abbb"\nassert palindrome(S)==False', - "weightage": 0.0 + "weight": 0.0 } ] name_error_msg = ["Traceback", @@ -364,7 +364,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "weightage": 0.0 + "weight": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -389,7 +389,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1,2,3\n5,6,7", "expected_output": "[1, 2, 3, 5, 6, 7]", - "weightage": 0.0 + "weight": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -417,7 +417,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"), "expected_output": "2", - "weightage": 0.0 + "weight": 0.0 }] user_answer = dedent(""" from six.moves import input @@ -443,7 +443,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "weightage": 0.0 + "weight": 0.0 }] user_answer = dedent(""" a = int(input()) @@ -468,7 +468,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given self.test_case_data = [{"expected_input": "", "expected_output": "2", - "weightage": 0.0 + "weight": 0.0 }] self.file_paths = [('/tmp/test.txt', False)] @@ -496,7 +496,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase): # Given test_case_data = [{"expected_input": "1\n2", "expected_output": "3", - "weightage": 0.0 + "weight": 0.0 }] timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index bc03e04..5f02601 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -12,7 +12,7 @@ class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() self.test_case_data = [{"test_case": "scilab_files/test_add.sce", - "weightage": 0.0 + "weight": 0.0 }] self.in_dir = tmp_in_dir_path self.timeout_msg = ("Code took more than {0} seconds to run. " diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py index 05e6405..d87e6e3 100644 --- a/yaksh/java_code_evaluator.py +++ b/yaksh/java_code_evaluator.py @@ -47,7 +47,7 @@ class JavaCodeEvaluator(CodeEvaluator): output_path = "{0}{1}.class".format(directory, file_name) return output_path - def compile_code(self, user_answer, file_paths, test_case, weightage): + def compile_code(self, user_answer, file_paths, test_case, weight): if self.compiled_user_answer and self.compiled_test_code: return None else: @@ -96,7 +96,7 @@ class JavaCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weight): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -117,7 +117,7 @@ class JavaCodeEvaluator(CodeEvaluator): """ success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) @@ -136,7 +136,7 @@ class JavaCodeEvaluator(CodeEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, "Correct answer" - test_case_weightage = float(weightage) if partial_grading else 0.0 + test_case_weight = float(weight) if partial_grading else 0.0 else: err = stdout + "\n" + stderr else: @@ -161,4 +161,4 @@ class JavaCodeEvaluator(CodeEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - return success, err, test_case_weightage + return success, err, test_case_weight diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py index bc50744..88d4c88 100644 --- a/yaksh/java_stdio_evaluator.py +++ b/yaksh/java_stdio_evaluator.py @@ -31,7 +31,7 @@ class JavaStdioEvaluator(StdIOEvaluator): compile_command = 'javac {0}'.format(self.submit_code_path) return compile_command - def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight): if not isfile(self.submit_code_path): msg = "No file at %s or Incorrect path" % self.submit_code_path return False, msg @@ -51,9 +51,9 @@ class JavaStdioEvaluator(StdIOEvaluator): return self.compiled_user_answer def check_code(self, user_answer, file_paths, partial_grading, - expected_input, expected_output, weightage): + expected_input, expected_output, weight): success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) if stdnt_stderr == '' or "error" not in stdnt_stderr: @@ -79,5 +79,5 @@ class JavaStdioEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - test_case_weightage = float(weightage) if partial_grading and success else 0.0 - return success, err, test_case_weightage + test_case_weight = float(weight) if partial_grading and success else 0.0 + return success, err, test_case_weight diff --git a/yaksh/models.py b/yaksh/models.py index bdcc43e..8907df0 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -325,12 +325,12 @@ class Question(models.Model): return test_case - def get_maximum_test_case_weightage(self, **kwargs): - max_weightage = 0.0 + def get_maximum_test_case_weight(self, **kwargs): + max_weight = 0.0 for test_case in self.get_test_cases(): - max_weightage += test_case.weightage + max_weight += test_case.weight - return max_weightage + return max_weight def _add_and_get_files(self, zip_file): files = FileUpload.objects.filter(question=self) @@ -1036,7 +1036,7 @@ class AnswerPaper(models.Model): For code questions success is True only if the answer is correct. """ - result = {'success': True, 'error': 'Incorrect answer', 'weightage': 0.0} + result = {'success': True, 'error': 'Incorrect answer', 'weight': 0.0} correct = False if user_answer is not None: if question.type == 'mcq': @@ -1087,12 +1087,12 @@ class AnswerPaper(models.Model): user_answer.correct = correct user_answer.error = result.get('error') if correct: - user_answer.marks = (question.points * result['weightage'] / - question.get_maximum_test_case_weightage()) \ + user_answer.marks = (question.points * result['weight'] / + question.get_maximum_test_case_weight()) \ if question.partial_grading and question.type == 'code' else question.points else: - user_answer.marks = (question.points * result['weightage'] / - question.get_maximum_test_case_weightage()) \ + user_answer.marks = (question.points * result['weight'] / + question.get_maximum_test_case_weight()) \ if question.partial_grading and question.type == 'code' else 0 user_answer.save() self.update_marks('completed') @@ -1118,11 +1118,11 @@ class TestCase(models.Model): class StandardTestCase(TestCase): test_case = models.TextField(blank=True) - weightage = models.FloatField(default=0.0) + weight = models.FloatField(default=0.0) def get_field_value(self): return {"test_case": self.test_case, - "weightage": self.weightage} + "weight": self.weight} def __str__(self): return u'Question: {0} | Test Case: {1}'.format(self.question, @@ -1133,12 +1133,12 @@ class StandardTestCase(TestCase): class StdioBasedTestCase(TestCase): expected_input = models.TextField(blank=True) expected_output = models.TextField() - weightage = models.IntegerField(default=0.0) + weight = models.IntegerField(default=0.0) def get_field_value(self): return {"expected_output": self.expected_output, "expected_input": self.expected_input, - "weightage": self.weightage} + "weight": self.weight} def __str__(self): return u'Question: {0} | Exp. Output: {1} | Exp. Input: {2}'.format(self.question, diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 6503566..8924643 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -25,7 +25,7 @@ class PythonAssertionEvaluator(CodeEvaluator): delete_files(self.files) super(PythonAssertionEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, test_case, weightage): + def compile_code(self, user_answer, file_paths, test_case, weight): if file_paths: self.files = copy_files(file_paths) if self.exec_scope: @@ -36,9 +36,9 @@ class PythonAssertionEvaluator(CodeEvaluator): exec(submitted, self.exec_scope) return self.exec_scope - def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weight): success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 try: tb = None _tests = compile(test_case, '', mode='exec') @@ -54,6 +54,6 @@ class PythonAssertionEvaluator(CodeEvaluator): else: success = True err = 'Correct answer' - test_case_weightage = float(weightage) if partial_grading else 0.0 + test_case_weight = float(weight) if partial_grading else 0.0 del tb - return success, err, test_case_weightage + return success, err, test_case_weight diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py index cd8c52a..1506685 100644 --- a/yaksh/python_stdio_evaluator.py +++ b/yaksh/python_stdio_evaluator.py @@ -42,7 +42,7 @@ class PythonStdioEvaluator(CodeEvaluator): super(PythonStdioEvaluator, self).teardown() - def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage): + def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight): if file_paths: self.files = copy_files(file_paths) submitted = compile(user_answer, '', mode='exec') @@ -58,15 +58,15 @@ class PythonStdioEvaluator(CodeEvaluator): return self.output_value def check_code(self, user_answer, file_paths, partial_grading, expected_input, - expected_output, weightage): + expected_output, weight): success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 tb = None if self.output_value == expected_output: success = True err = "Correct answer" - test_case_weightage = weightage + test_case_weight = weight else: success = False err = dedent(""" @@ -80,4 +80,4 @@ class PythonStdioEvaluator(CodeEvaluator): ) ) del tb - return success, err, test_case_weightage + return success, err, test_case_weight diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py index 927b84d..3c2d44c 100644 --- a/yaksh/scilab_code_evaluator.py +++ b/yaksh/scilab_code_evaluator.py @@ -27,7 +27,7 @@ class ScilabCodeEvaluator(CodeEvaluator): delete_files(self.files) super(ScilabCodeEvaluator, self).teardown() - def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage): + def check_code(self, user_answer, file_paths, partial_grading, test_case, weight): if file_paths: self.files = copy_files(file_paths) ref_code_path = test_case @@ -37,7 +37,7 @@ class ScilabCodeEvaluator(CodeEvaluator): self._remove_scilab_exit(user_answer.lstrip()) success = False - test_case_weightage = 0.0 + test_case_weight = 0.0 self.write_to_submit_code_file(self.submit_code_path, user_answer) # Throw message if there are commmands that terminates scilab @@ -65,12 +65,12 @@ class ScilabCodeEvaluator(CodeEvaluator): stdout = self._strip_output(stdout) if proc.returncode == 5: success, err = True, "Correct answer" - test_case_weightage = float(weightage) if partial_grading else 0.0 + test_case_weight = float(weight) if partial_grading else 0.0 else: err = add_err + stdout else: err = add_err + stderr - return success, err, test_case_weightage + return success, err, test_case_weight def _remove_scilab_exit(self, string): """ diff --git a/yaksh/test_models.py b/yaksh/test_models.py index d05fac3..e7f3016 100644 --- a/yaksh/test_models.py +++ b/yaksh/test_models.py @@ -142,7 +142,7 @@ class QuestionTestCases(unittest.TestCase): self.upload_test_case.save() self.user_answer = "demo_answer" self.test_case_upload_data = [{"test_case": "assert fact(3)==6", - "weightage": 0.0 + "weight": 0.0 }] questions_data = [{"snippet": "def fact()", "active": True, "points": 1.0, @@ -880,7 +880,7 @@ class TestCaseTestCases(unittest.TestCase): answer_data = {"user_answer": "demo_answer", "test_case_data": [ {"test_case": "assert myfunc(12, 13) == 15", - "weightage": 0.0 + "weight": 0.0 } ] } diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py index 19560e4..1984c6c 100644 --- a/yaksh/tests/test_code_server.py +++ b/yaksh/tests/test_code_server.py @@ -40,7 +40,7 @@ class TestCodeServer(unittest.TestCase): testdata = {'user_answer': 'while True: pass', 'partial_grading': False, 'test_case_data': [{'test_case':'assert 1==2', - 'weightage': 0.0 + 'weight': 0.0 } ]} @@ -59,7 +59,7 @@ class TestCodeServer(unittest.TestCase): testdata = {'user_answer': 'def f(): return 1', 'partial_grading': False, 'test_case_data': [{'test_case':'assert f() == 1', - 'weightage': 0.0 + 'weight': 0.0 } ]} @@ -78,7 +78,7 @@ class TestCodeServer(unittest.TestCase): testdata = {'user_answer': 'def f(): return 1', 'partial_grading': False, 'test_case_data': [{'test_case':'assert f() == 2', - 'weightage': 0.0 + 'weight': 0.0 } ]} @@ -101,7 +101,7 @@ class TestCodeServer(unittest.TestCase): testdata = {'user_answer': 'while True: pass', 'partial_grading': False, 'test_case_data': [{'test_case':'assert 1==2', - 'weightage': 0.0 + 'weight': 0.0 } ]} result = self.code_server.run_code( diff --git a/yaksh/views.py b/yaksh/views.py index aca89ef..c3d743b 100644 --- a/yaksh/views.py +++ b/yaksh/views.py @@ -517,15 +517,15 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None): if question.type == 'code' else None correct, result = paper.validate_answer(user_answer, question, json_data) if correct: - new_answer.marks = (question.points * result['weightage'] / - question.get_maximum_test_case_weightage()) \ + new_answer.marks = (question.points * result['weight'] / + question.get_maximum_test_case_weight()) \ if question.partial_grading and question.type == 'code' else question.points new_answer.correct = correct new_answer.error = result.get('error') else: new_answer.error = result.get('error') - new_answer.marks = (question.points * result['weightage'] / - question.get_maximum_test_case_weightage()) \ + new_answer.marks = (question.points * result['weight'] / + question.get_maximum_test_case_weight()) \ if question.partial_grading and question.type == 'code' else 0 new_answer.save() paper.update_marks('inprogress') diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py index 437dbcb..bb8260d 100644 --- a/yaksh/xmlrpc_clients.py +++ b/yaksh/xmlrpc_clients.py @@ -56,10 +56,10 @@ class CodeServerProxy(object): Returns ------- A json string of a dict containing: - {"success": success, "weightage": weightage, "error": error message} + {"success": success, "weight": weight, "error": error message} success - Boolean, indicating if code was executed successfully, correctly - weightage - Float, indicating total weightage of all successful test cases + weight - Float, indicating total weight of all successful test cases error - String, error message if success is false """ @@ -68,7 +68,7 @@ class CodeServerProxy(object): result = server.check_code(language, test_case_type, json_data, user_dir) except ConnectionError: result = json.dumps({'success': False, - 'weightage': 0.0, + 'weight': 0.0, 'error': 'Unable to connect to any code servers!'}) return result -- cgit From 31a15a666e69d96b5062596c79641645a64fcdb2 Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Wed, 16 Nov 2016 11:47:26 +0530 Subject: Modify docstrings of evaluators --- yaksh/bash_code_evaluator.py | 9 ++++++--- yaksh/python_assertion_evaluator.py | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) (limited to 'yaksh') diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index 7575725..b5974d2 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -40,14 +40,17 @@ class BashCodeEvaluator(CodeEvaluator): Returns -------- + success - Boolean, indicating if code was executed successfully, correctly + weight - Float, indicating total weight of all successful test cases + error - String, error message if success is false - returns (True, "Correct answer") : If the student script passes all + returns (True, "Correct answer", 1.0) : If the student script passes all test cases/have same output, when compared to the instructor script - returns (False, error_msg): If the student script fails a single + returns (False, error_msg, 0.0): If the student script fails a single test/have dissimilar output, when compared to the instructor script. - Returns (False, error_msg): If mandatory arguments are not files or if + Returns (False, error_msg, 0.0): If mandatory arguments are not files or if the required permissions are not given to the file(s). """ ref_code_path = test_case diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 8924643..1cc4fa4 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -37,6 +37,26 @@ class PythonAssertionEvaluator(CodeEvaluator): return self.exec_scope def check_code(self, user_answer, file_paths, partial_grading, test_case, weight): + """ Function validates user answer by running an assertion based test case + against it + + Returns + -------- + Returns a tuple (success, error, test_case_weight) + + success - Boolean, indicating if code was executed successfully, correctly + weight - Float, indicating total weight of all successful test cases + error - String, error message if success is false + + returns (True, "Correct answer", 1.0) : If the student script passes all + test cases/have same output, when compared to the instructor script + + returns (False, error_msg, 0.0): If the student script fails a single + test/have dissimilar output, when compared to the instructor script. + + Returns (False, error_msg, 0.0): If mandatory arguments are not files or if + the required permissions are not given to the file(s). + """ success = False test_case_weight = 0.0 try: -- cgit From 0b151541d9414572965b597815f98c4b057430e1 Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Wed, 16 Nov 2016 17:42:47 +0530 Subject: Fix error where answer is correct if last test case is correct --- yaksh/code_evaluator.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'yaksh') diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index 50fc546..8cea94e 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -108,25 +108,30 @@ class CodeEvaluator(object): # Add a new signal handler for the execution of this code. prev_handler = create_signal_handler() success = False + test_case_success_status = [False] * len(test_case_data) error = "" - weight = 0 + weight = 0.0 # Do whatever testing needed. try: - for test_case in test_case_data: - success = False + for idx, test_case in enumerate(test_case_data): + test_case_success = False self.compile_code(user_answer, file_paths, **test_case) - success, err, test_case_weight = self.check_code(user_answer, + test_case_success, err, test_case_weight = self.check_code(user_answer, file_paths, partial_grading, **test_case ) - if success: + if test_case_success: weight += test_case_weight error = err else: error += err + "\n" + test_case_success_status[idx] = test_case_success + + success = all(test_case_success_status) + except TimeoutException: error = self.timeout_msg except OSError: -- cgit From cb9a541a4653b2ed13db3a6171b50de575b0f05d Mon Sep 17 00:00:00 2001 From: ankitjavalkar Date: Wed, 16 Nov 2016 20:32:21 +0530 Subject: Fix Output display of Code Question --- yaksh/code_evaluator.py | 4 +-- yaksh/evaluator_tests/test_python_evaluation.py | 44 +++++++++++++++++++++---- yaksh/python_assertion_evaluator.py | 5 +-- yaksh/templates/yaksh/grade_user.html | 2 +- yaksh/templates/yaksh/question.html | 4 ++- 5 files changed, 45 insertions(+), 14 deletions(-) (limited to 'yaksh') diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index 8cea94e..afe18c3 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -124,10 +124,8 @@ class CodeEvaluator(object): ) if test_case_success: weight += test_case_weight - error = err - else: - error += err + "\n" + error += err + "\n" test_case_success_status[idx] = test_case_success success = all(test_case_success_status) diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index a0e3713..a3a0be8 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -45,7 +45,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): # Then self.assertTrue(result.get('success')) - self.assertEqual(result.get('error'), "Correct answer") + self.assertIn("Correct answer", result.get('error')) def test_incorrect_answer(self): # Given @@ -62,12 +62,42 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): # Then self.assertFalse(result.get('success')) - self.assertEqual(result.get('error'), - ('AssertionError in: assert(add(1,2)==3)\n' - 'AssertionError in: assert(add(-1,2)==1)\n' - 'AssertionError in: assert(add(-1,-2)==-3)\n' - ) - ) + self.assertIn('AssertionError in: assert(add(1,2)==3)', + result.get('error') + ) + self.assertIn('AssertionError in: assert(add(-1,2)==1)', + result.get('error') + ) + self.assertIn('AssertionError in: assert(add(-1,-2)==-3)', + result.get('error') + ) + + def test_partial_incorrect_answer(self): + # Given + user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)" + test_case_data = [{"test_case": 'assert(add(-1,2)==1)', 'weight': 1.0}, + {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 1.0}, + {"test_case": 'assert(add(1,2)==3)', 'weight': 2.0} + ] + kwargs = {'user_answer': user_answer, + 'test_case_data': test_case_data, + 'file_paths': self.file_paths, + 'partial_grading': True + } + + # When + evaluator = PythonAssertionEvaluator() + result = evaluator.evaluate(**kwargs) + + # Then + self.assertFalse(result.get('success')) + self.assertEqual(result.get('weight'), 2.0) + self.assertIn('AssertionError in: assert(add(-1,2)==1)', + result.get('error') + ) + self.assertIn('AssertionError in: assert(add(-1,-2)==-3)', + result.get('error') + ) def test_infinite_loop(self): # Given diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 1cc4fa4..275244a 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -68,12 +68,13 @@ class PythonAssertionEvaluator(CodeEvaluator): info = traceback.extract_tb(tb) fname, lineno, func, text = info[-1] text = str(test_case).splitlines()[lineno-1] - err = "{0} {1} in: {2}".format(type.__name__, str(value), text) + err = ("-----\nExpected Test Case:\n{0}\n" + "Error - {1} {2} in: {3}\n-----").format(test_case, type.__name__, str(value), text) except Exception: raise # Exception will be caught in CodeEvaluator. else: success = True - err = 'Correct answer' + err = '-----\nCorrect answer\nTest Case: {0}\n-----'.format(test_case) test_case_weight = float(weight) if partial_grading else 0.0 del tb return success, err, test_case_weight diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html index ced3ca2..38f31ca 100644 --- a/yaksh/templates/yaksh/grade_user.html +++ b/yaksh/templates/yaksh/grade_user.html @@ -157,7 +157,7 @@ Status : Passed
Student answer:
{% for answer in answers %} {% if not answer.skipped %} - {% if "Correct answer" in answer.error %} + {% if answer.correct %}
{% else %}
diff --git a/yaksh/templates/yaksh/question.html b/yaksh/templates/yaksh/question.html index a8de448..18b6388 100644 --- a/yaksh/templates/yaksh/question.html +++ b/yaksh/templates/yaksh/question.html @@ -163,9 +163,11 @@ function call_skip(url)