summaryrefslogtreecommitdiff
path: root/yaksh
diff options
context:
space:
mode:
Diffstat (limited to 'yaksh')
-rw-r--r--yaksh/bash_code_evaluator.py4
-rw-r--r--yaksh/cpp_code_evaluator.py2
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py6
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py5
-rw-r--r--yaksh/java_code_evaluator.py2
-rw-r--r--yaksh/models.py10
-rw-r--r--yaksh/python_assertion_evaluator.py11
-rw-r--r--yaksh/python_stdio_evaluator.py2
-rw-r--r--yaksh/scilab_code_evaluator.py2
-rw-r--r--yaksh/stdio_evaluator.py2
-rw-r--r--yaksh/templates/yaksh/grade_user.html17
-rw-r--r--yaksh/test_models.py11
-rw-r--r--yaksh/tests/test_code_server.py7
-rw-r--r--yaksh/views.py6
14 files changed, 41 insertions, 46 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index 1e6fc9c..975af82 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -104,7 +104,7 @@ class BashCodeEvaluator(BaseEvaluator):
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
mark_fraction = float(self.weight) if self.partial_grading else 0.0
- return True, "Correct answer", mark_fraction
+ return True, None, mark_fraction
else:
err = "Error: expected %s, got %s" % (inst_stderr,
stdnt_stderr
@@ -147,7 +147,7 @@ class BashCodeEvaluator(BaseEvaluator):
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
mark_fraction = float(self.weight) if self.partial_grading else 0.0
- return True, "Correct answer", mark_fraction
+ return True, None, mark_fraction
else:
err = ("Error:expected"
" {0}, got {1}").format(inst_stdout+inst_stderr,
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index f0c2029..418c655 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -133,7 +133,7 @@ class CppCodeEvaluator(BaseEvaluator):
)
proc, stdout, stderr = ret
if proc.returncode == 0:
- success, err = True, "Correct answer"
+ success, err = True, None
mark_fraction = float(self.weight) if self.partial_grading else 0.0
else:
err = "{0} \n {1}".format(stdout, stderr)
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index 503372e..bfba38f 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -52,7 +52,6 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assert_correct_output("Correct answer", result.get('error'))
self.assertTrue(result.get('success'))
def test_incorrect_answer(self):
@@ -152,7 +151,6 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
self.assertTrue(result.get("success"))
- self.assert_correct_output("Correct answer", result.get("error"))
class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
@@ -199,7 +197,6 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assert_correct_output("Correct answer", result.get('error'))
self.assertTrue(result.get('success'))
def test_array_input(self):
@@ -333,7 +330,6 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assert_correct_output("Correct answer", result.get('error'))
self.assertTrue(result.get('success'))
def test_string_input(self):
@@ -364,7 +360,6 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- self.assert_correct_output("Correct answer", result.get('error'))
self.assertTrue(result.get('success'))
def test_file_based_stdout(self):
@@ -406,7 +401,6 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
self.assertTrue(result.get("success"))
- self.assert_correct_output("Correct answer", result.get("error"))
if __name__ == '__main__':
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 291e639..c58d7f1 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -56,7 +56,6 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertTrue(result.get('success'))
- self.assert_correct_output("Correct answer", result.get('error'))
def test_incorrect_answer(self):
# Given
@@ -368,7 +367,6 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
# Then
- self.assert_correct_output("Correct answer", result.get('error'))
self.assertTrue(result.get('success'))
def test_single_testcase_error(self):
@@ -449,7 +447,7 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get("success"))
- self.assertEqual(5, len(err))
+ self.assertEqual(2, len(err))
for msg in name_error_msg:
self.assert_correct_output(msg, result.get("error"))
@@ -613,7 +611,6 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
# Then
- self.assert_correct_output("Correct answer", result.get('error'))
self.assertTrue(result.get('success'))
def test_infinite_loop(self):
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index 5d3fd28..ab8160c 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -142,7 +142,7 @@ class JavaCodeEvaluator(BaseEvaluator):
stderr=subprocess.PIPE)
proc, stdout, stderr = ret
if proc.returncode == 0:
- success, err = True, "Correct answer"
+ success, err = True, None
mark_fraction = float(seelf.weight) if self.partial_grading else 0.0
else:
err = stdout + "\n" + stderr
diff --git a/yaksh/models.py b/yaksh/models.py
index 4fd6967..378d7a1 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1155,9 +1155,7 @@ class StandardTestCase(TestCase):
"weight": self.weight}
def __str__(self):
- return u'Question: {0} | Test Case: {1}'.format(self.question,
- self.test_case
- )
+ return u'Standard TestCase | Test Case: {0}'.format(self.test_case)
class StdIOBasedTestCase(TestCase):
@@ -1172,7 +1170,7 @@ class StdIOBasedTestCase(TestCase):
"weight": self.weight}
def __str__(self):
- return u'Question: {0} | Exp. Output: {1} | Exp. Input: {2}'.format(self.question,
+ return u'StdIO Based Testcase | Exp. Output: {0} | Exp. Input: {1}'.format(
self.expected_output, self.expected_input
)
@@ -1185,9 +1183,7 @@ class McqTestCase(TestCase):
return {"test_case_type": "mcqtestcase", "options": self.options, "correct": self.correct}
def __str__(self):
- return u'Question: {0} | Correct: {1}'.format(self.question,
- self.correct
- )
+ return u'MCQ Testcase | Correct: {0}'.format(self.correct)
class HookTestCase(TestCase):
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index 41ebeb6..d8cd07c 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -76,8 +76,13 @@ class PythonAssertionEvaluator(BaseEvaluator):
info = traceback.extract_tb(tb)
fname, lineno, func, text = info[-1]
text = str(self.test_case)
- err = ("-----\nExpected Test Case:\n{0}\n"
- "Error - {1} {2} in: {3}\n-----").format(self.test_case, type.__name__, str(value), text)
+ err = "Expected Test Case:\n{0}\n" \
+ "Error - {1} {2} in: {3}\n-----".format(
+ self.test_case,
+ type.__name__,
+ str(value),
+ text
+ )
except TimeoutException:
raise
except Exception:
@@ -85,7 +90,7 @@ class PythonAssertionEvaluator(BaseEvaluator):
err = "Error in Test case: {0}".format(msg)
else:
success = True
- err = '-----\nCorrect answer\nTest Case: {0}\n-----'.format(self.test_case)
+ err = None
mark_fraction = float(self.weight) if self.partial_grading else 0.0
del tb
return success, err, mark_fraction
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index da0c954..67f57a9 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -70,7 +70,7 @@ class PythonStdIOEvaluator(BaseEvaluator):
tb = None
if self.output_value == self.expected_output:
success = True
- err = "Correct answer"
+ err = None
mark_fraction = self.weight
else:
success = False
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index cc3c401..bf16c84 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -69,7 +69,7 @@ class ScilabCodeEvaluator(BaseEvaluator):
# Clean output
stdout = self._strip_output(stdout)
if proc.returncode == 5:
- success, err = True, "Correct answer"
+ success, err = True, None
test_case_weight = float(self.weight) if self.partial_grading else 0.0
else:
err = add_err + stdout
diff --git a/yaksh/stdio_evaluator.py b/yaksh/stdio_evaluator.py
index 106facd..fb9dfb3 100644
--- a/yaksh/stdio_evaluator.py
+++ b/yaksh/stdio_evaluator.py
@@ -21,7 +21,7 @@ class StdIOEvaluator(BaseEvaluator):
format(expected_input, repr(expected_output))
if output_err == '':
if user_output == expected_output:
- success, err = True, "Correct answer"
+ success, err = True, None
else:
err = " Incorrect answer\n" + error_msg +\
"\n Your output is {0}".format(repr(user_output))
diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html
index 38f31ca..4fc5026 100644
--- a/yaksh/templates/yaksh/grade_user.html
+++ b/yaksh/templates/yaksh/grade_user.html
@@ -134,7 +134,7 @@ Status : <b style="color: green;"> Passed </b><br/>
{% else %}
<h5> <u>Test cases: </u></h5>
{% for testcase in question.get_test_cases %}
- <br/><strong>{{ forloop.counter }}. {{ testcase.test_case }}</strong>
+ <br/><strong>{{ forloop.counter }}. {{ testcase }}</strong>
{% endfor %}
{%endif%}
</div>
@@ -156,14 +156,19 @@ Status : <b style="color: green;"> Passed </b><br/>
{% else %}
<h5>Student answer: </h5>
{% for answer in answers %}
- {% if not answer.skipped %}
- {% if answer.correct %}
+ {% if not answer.0.skipped %}
+ {% if answer.0.correct %}
<div class="panel panel-success">
+ <div class="panel-heading">Correct:
{% else %}
<div class="panel panel-danger">
+ <div class="panel-heading">Error:
{% endif %}
- <div class="panel-heading">Autocheck: {{ answer.error }}</div>
- <div class="panel-body"><pre><code>{{ answer.answer.strip }}</code></pre></div>
+ {% for err in answer.1 %}
+ <div><pre>{{ err }}</pre></div>
+ {% endfor %}
+ </div>
+ <div class="panel-body"><pre><code>{{ answer.0.answer.strip }}</code></pre></div>
</div>
{% endif %}
{% endfor %}
@@ -171,7 +176,7 @@ Status : <b style="color: green;"> Passed </b><br/>
{% with answers|last as answer %}
Marks: <input id="q{{ question.id }}" type="text"
name="q{{ question.id }}_marks" size="4"
- value="{{ answer.marks }}"><br><br>
+ value="{{ answer.0.marks }}"><br><br>
{% endwith %}
<hr/>
{% endfor %} {# for question, answers ... #}
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index 317c832..dc69024 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -487,12 +487,14 @@ class AnswerPaperTestCases(unittest.TestCase):
# answers for the Answer Paper
self.answer_right = Answer(question=Question.objects.get(id=1),
answer="Demo answer",
- correct=True, marks=1
+ correct=True, marks=1,
+ error=json.dumps([])
)
self.answer_wrong = Answer(question=Question.objects.get(id=2),
answer="My answer",
correct=False,
- marks=0
+ marks=0,
+ error=json.dumps(['error1', 'error2'])
)
self.answer_right.save()
self.answer_wrong.save()
@@ -587,7 +589,6 @@ class AnswerPaperTestCases(unittest.TestCase):
# Then
self.assertTrue(correct)
self.assertTrue(result['success'])
- self.assertEqual(result['error'], 'Correct answer')
self.answer.correct = True
self.answer.marks = 1
@@ -729,8 +730,8 @@ class AnswerPaperTestCases(unittest.TestCase):
""" Test get_question_answer() method of Answer Paper"""
answered = self.answerpaper.get_question_answers()
first_answer = list(answered.values())[0][0]
- self.assertEqual(first_answer.answer, 'Demo answer')
- self.assertTrue(first_answer.correct)
+ self.assertEqual(first_answer[0].answer, 'Demo answer')
+ self.assertTrue(first_answer[0].correct)
self.assertEqual(len(answered), 2)
def test_is_answer_correct(self):
diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py
index d46c9dd..19698a5 100644
--- a/yaksh/tests/test_code_server.py
+++ b/yaksh/tests/test_code_server.py
@@ -55,7 +55,7 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertFalse(data['success'])
- self.assertTrue('infinite loop' in data['error'])
+ self.assertTrue('infinite loop' in data['error'][0])
def test_correct_answer(self):
# Given
@@ -77,7 +77,6 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertTrue(data['success'])
- self.assertIn('Correct answer', data['error'])
def test_wrong_answer(self):
# Given
@@ -99,7 +98,7 @@ class TestCodeServer(unittest.TestCase):
# Then
data = json.loads(result)
self.assertFalse(data['success'])
- self.assertTrue('AssertionError' in data['error'])
+ self.assertTrue('AssertionError' in data['error'][0])
def test_multiple_simultaneous_hits(self):
# Given
@@ -139,7 +138,7 @@ class TestCodeServer(unittest.TestCase):
for i in range(N):
data = results.get()
self.assertFalse(data['success'])
- self.assertTrue('infinite loop' in data['error'])
+ self.assertTrue('infinite loop' in data['error'][0])
def test_server_pool_status(self):
# Given
diff --git a/yaksh/views.py b/yaksh/views.py
index b92b3fe..802d9d9 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -488,7 +488,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
json_data = question.consolidate_answer_data(user_answer) \
if question.type == 'code' else None
correct, result = paper.validate_answer(user_answer, question, json_data)
- if correct:
+ if correct or result.get('success'):
new_answer.marks = (question.points * result['weight'] /
question.get_maximum_test_case_weight()) \
if question.partial_grading and question.type == 'code' else question.points
@@ -502,9 +502,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
new_answer.save()
paper.update_marks('inprogress')
paper.set_end_time(timezone.now())
- if not result.get('success'): # Should only happen for non-mcq questions.
- new_answer.answer = user_code
- new_answer.save()
+ if question.type == 'code': # Should only happen for non-mcq questions.
return show_question(request, question, paper, result.get('error'))
else:
next_q = paper.completed_question(question.id)