summaryrefslogtreecommitdiff
path: root/yaksh/models.py
diff options
context:
space:
mode:
authormaheshgudi2017-01-13 01:24:27 +0530
committermaheshgudi2017-01-13 01:24:27 +0530
commit57a5d99c7ed5aed498943686b2c951bdaba3852a (patch)
tree8969f5db3aeb70350e089e97fb5e37bd0e4db1ab /yaksh/models.py
parent7fb288ffd992c912a8e2288aa97d6c6ceeedf1a1 (diff)
downloadonline_test-57a5d99c7ed5aed498943686b2c951bdaba3852a.tar.gz
online_test-57a5d99c7ed5aed498943686b2c951bdaba3852a.tar.bz2
online_test-57a5d99c7ed5aed498943686b2c951bdaba3852a.zip
Fixed a bug which would mark all MCQ and MCC questions' answers as correct
1. Due to the new evaluator, an unchecked bug, which marked all mcq and mcc attempts as correct, despite actually marking the wrong answer, has been fixed. 2. Added Test case to check for incorrect mcq and mcc questions 3. question.html would also render error msg for mcq and mcc questions. This has also been fixed.
Diffstat (limited to 'yaksh/models.py')
-rw-r--r--yaksh/models.py17
1 files changed, 7 insertions, 10 deletions
diff --git a/yaksh/models.py b/yaksh/models.py
index d65970b..271ed6d 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1059,28 +1059,25 @@ class AnswerPaper(models.Model):
For code questions success is True only if the answer is correct.
"""
- result = {'success': True, 'error': ['Incorrect answer'], 'weight': 0.0}
- correct = False
+ result = {'success': False, 'error': ['Incorrect answer'], 'weight': 0.0}
if user_answer is not None:
if question.type == 'mcq':
expected_answer = question.get_test_case(correct=True).options
if user_answer.strip() == expected_answer.strip():
- correct = True
+ result['success'] = True
result['error'] = ['Correct answer']
elif question.type == 'mcc':
expected_answers = []
for opt in question.get_test_cases(correct=True):
expected_answers.append(opt.options)
if set(user_answer) == set(expected_answers):
+ result['success'] = True
result['error'] = ['Correct answer']
- correct = True
elif question.type == 'code':
user_dir = self.user.profile.get_user_dir()
json_result = code_server.run_code(question.language, json_data, user_dir)
result = json.loads(json_result)
- if result.get('success'):
- correct = True
- return correct, result
+ return result
def regrade(self, question_id):
try:
@@ -1105,10 +1102,10 @@ class AnswerPaper(models.Model):
answer = user_answer.answer
json_data = question.consolidate_answer_data(answer) \
if question.type == 'code' else None
- correct, result = self.validate_answer(answer, question, json_data)
- user_answer.correct = correct
+ result = self.validate_answer(answer, question, json_data)
+ user_answer.correct = result.get('success')
user_answer.error = result.get('error')
- if correct:
+ if result.get('success'):
user_answer.marks = (question.points * result['weight'] /
question.get_maximum_test_case_weight()) \
if question.partial_grading and question.type == 'code' else question.points