Correct:
{% else %}
+
Error:
{% endif %}
-
Autocheck: {{ answer.error }}
-
{{ answer.answer.strip }}
+ {% for err in answer.1 %}
+
+ {% endfor %}
+
+
{{ answer.0.answer.strip }}
{% endif %}
{% endfor %}
@@ -171,7 +176,7 @@ Status :
Passed
{% with answers|last as answer %}
Marks:
+ value="{{ answer.0.marks }}">
{% endwith %}
{% endfor %} {# for question, answers ... #}
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index 317c832..dc69024 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -487,12 +487,14 @@ class AnswerPaperTestCases(unittest.TestCase):
# answers for the Answer Paper
self.answer_right = Answer(question=Question.objects.get(id=1),
answer="Demo answer",
- correct=True, marks=1
+ correct=True, marks=1,
+ error=json.dumps([])
)
self.answer_wrong = Answer(question=Question.objects.get(id=2),
answer="My answer",
correct=False,
- marks=0
+ marks=0,
+ error=json.dumps(['error1', 'error2'])
)
self.answer_right.save()
self.answer_wrong.save()
@@ -587,7 +589,6 @@ class AnswerPaperTestCases(unittest.TestCase):
# Then
self.assertTrue(correct)
self.assertTrue(result['success'])
- self.assertEqual(result['error'], 'Correct answer')
self.answer.correct = True
self.answer.marks = 1
@@ -729,8 +730,8 @@ class AnswerPaperTestCases(unittest.TestCase):
""" Test get_question_answer() method of Answer Paper"""
answered = self.answerpaper.get_question_answers()
first_answer = list(answered.values())[0][0]
- self.assertEqual(first_answer.answer, 'Demo answer')
- self.assertTrue(first_answer.correct)
+ self.assertEqual(first_answer[0].answer, 'Demo answer')
+ self.assertTrue(first_answer[0].correct)
self.assertEqual(len(answered), 2)
def test_is_answer_correct(self):
diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py
index d46c9dd..19698a5 100644
--- a/yaksh/tests/test_code_server.py
+++ b/yaksh/tests/test_code_server.py
@@ -55,7 +55,7 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertFalse(data['success'])
- self.assertTrue('infinite loop' in data['error'])
+ self.assertTrue('infinite loop' in data['error'][0])
def test_correct_answer(self):
# Given
@@ -77,7 +77,6 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertTrue(data['success'])
- self.assertIn('Correct answer', data['error'])
def test_wrong_answer(self):
# Given
@@ -99,7 +98,7 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertFalse(data['success'])
- self.assertTrue('AssertionError' in data['error'])
+ self.assertTrue('AssertionError' in data['error'][0])
def test_multiple_simultaneous_hits(self):
# Given
@@ -139,7 +138,7 @@ class TestCodeServer(unittest.TestCase):
for i in range(N):
data = results.get()
self.assertFalse(data['success'])
- self.assertTrue('infinite loop' in data['error'])
+ self.assertTrue('infinite loop' in data['error'][0])
def test_server_pool_status(self):
# Given
diff --git a/yaksh/views.py b/yaksh/views.py
index b92b3fe..802d9d9 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -488,7 +488,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
json_data = question.consolidate_answer_data(user_answer) \
if question.type == 'code' else None
correct, result = paper.validate_answer(user_answer, question, json_data)
- if correct:
+ if correct or result.get('success'):
new_answer.marks = (question.points * result['weight'] /
question.get_maximum_test_case_weight()) \
if question.partial_grading and question.type == 'code' else question.points
@@ -502,9 +502,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
new_answer.save()
paper.update_marks('inprogress')
paper.set_end_time(timezone.now())
- if not result.get('success'): # Should only happen for non-mcq questions.
- new_answer.answer = user_code
- new_answer.save()
+ if question.type == 'code': # Should only happen for non-mcq questions.
return show_question(request, question, paper, result.get('error'))
else:
next_q = paper.completed_question(question.id)
--
cgit
From 42ed7c142e2c4ff4dccdaec2365e22d30bc276dd Mon Sep 17 00:00:00 2001
From: ankitjavalkar
Date: Thu, 22 Dec 2016 15:55:54 +0530
Subject: Refactor check method for cleaner code and fixing multiple issues -
Question should skip to new question with new error rendering - Fix test
cases
Fix change_dir context manager in grader
---
yaksh/c_cpp_files/main2.c | 2 +-
yaksh/cpp_code_evaluator.py | 3 +-
yaksh/grader.py | 6 ++--
yaksh/java_code_evaluator.py | 1 +
yaksh/models.py | 6 ++--
yaksh/templates/yaksh/grade_user.html | 1 -
yaksh/test_models.py | 2 +-
yaksh/views.py | 53 ++++++++++++++++++-----------------
8 files changed, 37 insertions(+), 37 deletions(-)
(limited to 'yaksh')
diff --git a/yaksh/c_cpp_files/main2.c b/yaksh/c_cpp_files/main2.c
index ccd1768..a62195f 100755
--- a/yaksh/c_cpp_files/main2.c
+++ b/yaksh/c_cpp_files/main2.c
@@ -13,7 +13,7 @@ void check(T expect,T result)
else
{
printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
- exit (0);
+ exit (1);
}
}
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index 418c655..91ba703 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -15,7 +15,7 @@ class CppCodeEvaluator(BaseEvaluator):
"""Tests the C code obtained from Code Server"""
def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('submit.c')
+ self.submit_code_path = ''
self.compiled_user_answer = None
self.compiled_test_code = None
self.user_output_path = ""
@@ -62,6 +62,7 @@ class CppCodeEvaluator(BaseEvaluator):
ref_code_path = self.test_case
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
+ self.submit_code_path = self.create_submit_code_file('submit.c')
if self.file_paths:
self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
diff --git a/yaksh/grader.py b/yaksh/grader.py
index 41b3ac1..0c057c2 100644
--- a/yaksh/grader.py
+++ b/yaksh/grader.py
@@ -24,7 +24,6 @@ from .language_registry import create_evaluator_instance
MY_DIR = abspath(dirname(__file__))
-
registry = None
# Raised when the code times-out.
@@ -34,7 +33,7 @@ class TimeoutException(Exception):
@contextlib.contextmanager
def change_dir(path):
- cur_dir = os.getcwd()
+ cur_dir = abspath(dirname(MY_DIR))
os.chdir(path)
try:
yield
@@ -77,7 +76,7 @@ class Grader(object):
self.in_dir = in_dir if in_dir else MY_DIR
- def evaluate(self, kwargs): #language, test_case_type,
+ def evaluate(self, kwargs):
"""Evaluates given code with the test cases based on
given arguments in test_case_data.
@@ -98,7 +97,6 @@ class Grader(object):
A tuple: (success, error, weight).
"""
-
self.setup()
test_case_instances = self.get_evaluator_objects(kwargs)
with change_dir(self.in_dir):
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index ab8160c..91e5840 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -18,6 +18,7 @@ class JavaCodeEvaluator(BaseEvaluator):
self.files = []
self.compiled_user_answer = None
self.compiled_test_code = None
+ self.submit_code_path = ""
self.user_output_path = ""
self.ref_output_path = ""
diff --git a/yaksh/models.py b/yaksh/models.py
index 378d7a1..08feab6 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1064,20 +1064,20 @@ class AnswerPaper(models.Model):
For code questions success is True only if the answer is correct.
"""
- result = {'success': True, 'error': 'Incorrect answer', 'weight': 0.0}
+ result = {'success': True, 'error': ['Incorrect answer'], 'weight': 0.0}
correct = False
if user_answer is not None:
if question.type == 'mcq':
expected_answer = question.get_test_case(correct=True).options
if user_answer.strip() == expected_answer.strip():
correct = True
- result['error'] = 'Correct answer'
+ result['error'] = ['Correct answer']
elif question.type == 'mcc':
expected_answers = []
for opt in question.get_test_cases(correct=True):
expected_answers.append(opt.options)
if set(user_answer) == set(expected_answers):
- result['error'] = 'Correct answer'
+ result['error'] = ['Correct answer']
correct = True
elif question.type == 'code':
user_dir = self.user.profile.get_user_dir()
diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html
index 4fc5026..516a6d0 100644
--- a/yaksh/templates/yaksh/grade_user.html
+++ b/yaksh/templates/yaksh/grade_user.html
@@ -156,7 +156,6 @@ Status :
Passed
{% else %}
Student answer:
{% for answer in answers %}
- {% if not answer.0.skipped %}
{% if answer.0.correct %}
Correct:
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index dc69024..e5bd104 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -552,7 +552,7 @@ class AnswerPaperTestCases(unittest.TestCase):
# Then
self.assertTrue(correct)
self.assertTrue(result['success'])
- self.assertEqual(result['error'], 'Correct answer')
+ self.assertEqual(result['error'], ['Correct answer'])
self.answer.correct = True
self.answer.marks = 1
diff --git a/yaksh/views.py b/yaksh/views.py
index 802d9d9..35121e7 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -448,67 +448,68 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
user = request.user
paper = get_object_or_404(AnswerPaper, user=request.user, attempt_number=attempt_num,
question_paper=questionpaper_id)
- question = get_object_or_404(Question, pk=q_id)
- if question in paper.questions_answered.all():
+ current_question = get_object_or_404(Question, pk=q_id)
+ if current_question in paper.questions_answered.all():
next_q = paper.next_question(q_id)
return show_question(request, next_q, paper)
if request.method == 'POST':
snippet_code = request.POST.get('snippet')
# Add the answer submitted, regardless of it being correct or not.
- if question.type == 'mcq':
+ if current_question.type == 'mcq':
user_answer = request.POST.get('answer')
- elif question.type == 'mcc':
+ elif current_question.type == 'mcc':
user_answer = request.POST.getlist('answer')
- elif question.type == 'upload':
+ elif current_question.type == 'upload':
assign = AssignmentUpload()
assign.user = user.profile
- assign.assignmentQuestion = question
+ assign.assignmentQuestion = current_question
# if time-up at upload question then the form is submitted without
# validation
if 'assignment' in request.FILES:
assign.assignmentFile = request.FILES['assignment']
assign.save()
user_answer = 'ASSIGNMENT UPLOADED'
- next_q = paper.completed_question(question.id)
+ next_q = paper.completed_question(current_question.id)
return show_question(request, next_q, paper)
else:
user_code = request.POST.get('answer')
user_answer = snippet_code + "\n" + user_code if snippet_code else user_code
- new_answer = Answer(question=question, answer=user_answer,
+ if not user_answer:
+ msg = ["Please submit a valid option or code"]
+ return show_question(request, current_question, paper, msg)
+ new_answer = Answer(question=current_question, answer=user_answer,
correct=False)
new_answer.save()
paper.answers.add(new_answer)
- if not user_answer:
- msg = "Please submit a valid option or code"
- return show_question(request, question, paper, msg)
# If we were not skipped, we were asked to check. For any non-mcq
# questions, we obtain the results via XML-RPC with the code executed
# safely in a separate process (the code_server.py) running as nobody.
- json_data = question.consolidate_answer_data(user_answer) \
- if question.type == 'code' else None
- correct, result = paper.validate_answer(user_answer, question, json_data)
+ json_data = current_question.consolidate_answer_data(user_answer) \
+ if current_question.type == 'code' else None
+ correct, result = paper.validate_answer(user_answer, current_question, json_data)
if correct or result.get('success'):
- new_answer.marks = (question.points * result['weight'] /
- question.get_maximum_test_case_weight()) \
- if question.partial_grading and question.type == 'code' else question.points
+ new_answer.marks = (current_question.points * result['weight'] /
+ current_question.get_maximum_test_case_weight()) \
+ if current_question.partial_grading and current_question.type == 'code' else current_question.points
new_answer.correct = correct
+ error_message = None
new_answer.error = json.dumps(result.get('error'))
+ next_question = paper.completed_question(current_question.id)
else:
+ new_answer.marks = (current_question.points * result['weight'] /
+ current_question.get_maximum_test_case_weight()) \
+ if current_question.partial_grading and current_question.type == 'code' else 0
+ error_message = result.get('error')
new_answer.error = json.dumps(result.get('error'))
- new_answer.marks = (question.points * result['weight'] /
- question.get_maximum_test_case_weight()) \
- if question.partial_grading and question.type == 'code' else 0
+ next_question = current_question if current_question.type == 'code' \
+ else paper.completed_question(current_question.id)
new_answer.save()
paper.update_marks('inprogress')
paper.set_end_time(timezone.now())
- if question.type == 'code': # Should only happen for non-mcq questions.
- return show_question(request, question, paper, result.get('error'))
- else:
- next_q = paper.completed_question(question.id)
- return show_question(request, next_q, paper)
+ return show_question(request, next_question, paper, error_message)
else:
- return show_question(request, question, paper)
+ return show_question(request, current_question, paper)
--
cgit
From 0e56fc6a77ec21db05c9bafb42b1acc652354a32 Mon Sep 17 00:00:00 2001
From: ankitjavalkar
Date: Thu, 22 Dec 2016 20:16:00 +0530
Subject: - Fix grade user rendering issue when question is skipped - Modify
grade user html template to create readable variables for answer and error
---
yaksh/models.py | 10 ++++++++--
yaksh/python_assertion_evaluator.py | 2 +-
yaksh/templates/yaksh/grade_user.html | 26 +++++---------------------
yaksh/templates/yaksh/question.html | 24 ++++++++++++++++++++----
yaksh/test_models.py | 5 +++--
yaksh/views.py | 5 +++--
6 files changed, 40 insertions(+), 32 deletions(-)
(limited to 'yaksh')
diff --git a/yaksh/models.py b/yaksh/models.py
index 08feab6..35999d3 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1027,9 +1027,15 @@ class AnswerPaper(models.Model):
for answer in self.answers.all():
question = answer.question
if question in q_a:
- q_a[question].append((answer, [e for e in json.loads(answer.error)]))
+ q_a[question].append({'answer': answer,
+ 'error_list': [e for e in json.loads(answer.error)]
+ }
+ )
else:
- q_a[question] = [(answer, [e for e in json.loads(answer.error)])]
+ q_a[question] = [{'answer': answer,
+ 'error_list': [e for e in json.loads(answer.error)]
+ }
+ ]
return q_a
def get_questions(self):
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index d8cd07c..749a6ec 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -77,7 +77,7 @@ class PythonAssertionEvaluator(BaseEvaluator):
fname, lineno, func, text = info[-1]
text = str(self.test_case)
err = "Expected Test Case:\n{0}\n" \
- "Error - {1} {2} in: {3}\n-----".format(
+ "Error - {1} {2} in: {3}\n".format(
self.test_case,
type.__name__,
str(value),
diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html
index 516a6d0..6fb8187 100644
--- a/yaksh/templates/yaksh/grade_user.html
+++ b/yaksh/templates/yaksh/grade_user.html
@@ -118,6 +118,7 @@ Status :
Passed
method="post">
{% csrf_token %}
{% for question, answers in paper.get_question_answers.items %}
+
Details: {{forloop.counter}}. {{ question.summary }}
@@ -139,39 +140,22 @@ Status : Passed
{%endif%}
- {% if question.type == "mcq" or question.type == "mcc" %}
- {% if "Correct answer" in answers.0.error %}
-
- {% else %}
-
- {% endif %}
-
- Autocheck: {{ answers.0.error }}
-
-
-
Student answer:
-
{{forloop.counter}}. {{ answers.0 }}
-
-
- {% else %}
Student answer:
- {% for answer in answers %}
- {% if answer.0.correct %}
+ {% for ans in answers %}
+ {% if ans.answer.correct %}
Correct:
{% else %}
Error:
{% endif %}
- {% for err in answer.1 %}
+ {% for err in ans.error_list %}
{% endfor %}
-
{{ answer.0.answer.strip }}
+
{{ ans.answer.answer.strip }}
- {% endif %}
{% endfor %}
- {% endif %}
{% with answers|last as answer %}
Marks:
{% if question.type == "mcq" %}
{% if error_message %}
-
{{ error_message }}
- {% endif %}
+
+
+
+ {% for err in error_message %}
+ {{ err }}
+ {% endfor %}
+
+
+
+ {% endif %}
{% for test_case in test_cases %}
{{ test_case.options }}
{% endfor %}
{% endif %}
{% if question.type == "mcc" %}
{% if error_message %}
-
{{ error_message }}
- {% endif %}
+
+
+
+ {% for err in error_message %}
+ {{ err }}
+ {% endfor %}
+
+
+
+ {% endif %}
{% for test_case in test_cases %}
{{ test_case.options }}
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index e5bd104..6764dd0 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -730,8 +730,9 @@ class AnswerPaperTestCases(unittest.TestCase):
""" Test get_question_answer() method of Answer Paper"""
answered = self.answerpaper.get_question_answers()
first_answer = list(answered.values())[0][0]
- self.assertEqual(first_answer[0].answer, 'Demo answer')
- self.assertTrue(first_answer[0].correct)
+ first_answer_obj = first_answer['answer']
+ self.assertEqual(first_answer_obj.answer, 'Demo answer')
+ self.assertTrue(first_answer_obj.correct)
self.assertEqual(len(answered), 2)
def test_is_answer_correct(self):
diff --git a/yaksh/views.py b/yaksh/views.py
index 35121e7..7ecf6aa 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -430,7 +430,8 @@ def skip(request, q_id, next_q=None, attempt_num=None, questionpaper_id=None):
if request.method == 'POST' and question.type == 'code':
user_code = request.POST.get('answer')
new_answer = Answer(question=question, answer=user_code,
- correct=False, skipped=True)
+ correct=False, skipped=True,
+ error=json.dumps([]))
new_answer.save()
paper.answers.add(new_answer)
if next_q is not None:
@@ -479,7 +480,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
msg = ["Please submit a valid option or code"]
return show_question(request, current_question, paper, msg)
new_answer = Answer(question=current_question, answer=user_answer,
- correct=False)
+ correct=False, error=json.dumps([]))
new_answer.save()
paper.answers.add(new_answer)
# If we were not skipped, we were asked to check. For any non-mcq
--
cgit