summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPrabhu Ramachandran2018-05-09 19:20:26 +0530
committerGitHub2018-05-09 19:20:26 +0530
commit28618863e1487627e24f24476afd0f7b12149bcb (patch)
treefd4a52c5f42aaad1170f6ae9af136abe520f9e49
parent9248e11d935a0b80433bcc80f66fb4ad40f3adb3 (diff)
parent393a9d2a8ec116f6530512dfbe6e8769442667e3 (diff)
downloadonline_test-28618863e1487627e24f24476afd0f7b12149bcb.tar.gz
online_test-28618863e1487627e24f24476afd0f7b12149bcb.tar.bz2
online_test-28618863e1487627e24f24476afd0f7b12149bcb.zip
Merge pull request #471 from adityacp/fix_error_messages
Python Assertion Evaluation additions and changes
-rw-r--r--requirements/requirements-codeserver.txt1
-rw-r--r--yaksh/error_messages.py38
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py554
-rw-r--r--yaksh/grader.py40
-rw-r--r--yaksh/python_assertion_evaluator.py20
-rw-r--r--yaksh/static/yaksh/js/requesthandler.js21
-rw-r--r--yaksh/templates/yaksh/error_template.html2
-rw-r--r--yaksh/templates/yaksh/question.html8
8 files changed, 360 insertions, 324 deletions
diff --git a/requirements/requirements-codeserver.txt b/requirements/requirements-codeserver.txt
index 004e45b..11bc0a2 100644
--- a/requirements/requirements-codeserver.txt
+++ b/requirements/requirements-codeserver.txt
@@ -4,3 +4,4 @@ six
requests
tornado==4.5.3
psutil
+nose==1.3.7
diff --git a/yaksh/error_messages.py b/yaksh/error_messages.py
index 7ea8618..7a18c22 100644
--- a/yaksh/error_messages.py
+++ b/yaksh/error_messages.py
@@ -3,7 +3,9 @@ try:
except ImportError:
from itertools import izip_longest as zip_longest
-def prettify_exceptions(exception, message, traceback=None, testcase=None):
+
+def prettify_exceptions(exception, message, traceback=None,
+ testcase=None, line_no=None):
err = {"type": "assertion",
"exception": exception,
"traceback": traceback,
@@ -13,23 +15,28 @@ def prettify_exceptions(exception, message, traceback=None, testcase=None):
err["traceback"] = None
if exception == 'AssertionError':
- value = ("Expected answer from the"
- + " test case did not match the output")
- err["message"] = value
+ value = ("Expected answer from the" +
+ " test case did not match the output")
+ if message:
+ err["message"] = message
+ else:
+ err["message"] = value
err["traceback"] = None
- if testcase:
- err["test_case"] = testcase
+ err["test_case"] = testcase
+ err["line_no"] = line_no
return err
+
def _get_incorrect_user_lines(exp_lines, user_lines):
err_line_numbers = []
for line_no, (expected_line, user_line) in \
- enumerate(zip_longest(exp_lines, user_lines)):
- if not user_line or not expected_line or \
- user_line.strip() != expected_line.strip():
+ enumerate(zip_longest(exp_lines, user_lines)):
+ if (not user_line or not expected_line or
+ user_line.strip() != expected_line.strip()):
err_line_numbers.append(line_no)
return err_line_numbers
-
+
+
def compare_outputs(expected_output, user_output, given_input=None):
given_lines = user_output.splitlines()
exp_lines = expected_output.splitlines()
@@ -44,18 +51,17 @@ def compare_outputs(expected_output, user_output, given_input=None):
msg["error_line_numbers"] = err_line_numbers
if ng != ne:
msg["error_msg"] = ("Incorrect Answer: "
- + "We had expected {} number of lines. "\
- .format(ne)
+ + "We had expected {} number of lines. ".format(ne)
+ "We got {} number of lines.".format(ng)
)
return False, msg
else:
if err_line_numbers:
msg["error_msg"] = ("Incorrect Answer: "
- + "Line number(s) {0} did not match."
- .format(", ".join(map(
- str,[x+1 for x in err_line_numbers]
- ))))
+ + "Line number(s) {0} did not match."
+ .format(", ".join(
+ map(str, [x+1 for x in err_line_numbers])
+ )))
return False, msg
else:
msg["error_msg"] = "Correct Answer"
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 71d7732..1933d17 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -1,7 +1,6 @@
from __future__ import unicode_literals
import unittest
import os
-import sys
import tempfile
import shutil
from textwrap import dedent
@@ -26,13 +25,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
self.in_dir = tmp_in_dir_path
self.test_case_data = [{"test_case_type": "standardtestcase",
"test_case": 'assert(add(1,2)==3)',
- 'weight': 0.0},
+ 'weight': 0.0},
{"test_case_type": "standardtestcase",
"test_case": 'assert(add(-1,2)==1)',
- 'weight': 0.0},
+ 'weight': 0.0},
{"test_case_type": "standardtestcase",
"test_case": 'assert(add(-1,-2)==-3)',
- 'weight': 0.0},
+ 'weight': 0.0},
]
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
@@ -46,14 +45,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
def test_correct_answer(self):
# Given
user_answer = "def add(a,b):\n\treturn a + b"
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
}
# When
@@ -66,14 +63,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
def test_incorrect_answer(self):
# Given
user_answer = "def add(a,b):\n\treturn a - b"
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
}
# When
@@ -85,13 +80,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
given_test_case_list = [tc["test_case"] for tc in self.test_case_data]
for error in result.get("error"):
self.assertEqual(error['exception'], 'AssertionError')
- self.assertEqual(error['message'],
- "Expected answer from the test case did not match the output"
- )
+ self.assertEqual(
+ error['message'],
+ "Expected answer from the test case did not match the output"
+ )
error_testcase_list = [tc['test_case'] for tc in result.get('error')]
self.assertEqual(error_testcase_list, given_test_case_list)
-
def test_partial_incorrect_answer(self):
# Given
user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)"
@@ -100,19 +95,17 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
'weight': 1.0},
{"test_case_type": "standardtestcase",
"test_case": 'assert(add(-1,-2)==-3)',
- 'weight': 1.0},
+ 'weight': 1.0},
{"test_case_type": "standardtestcase",
"test_case": 'assert(add(1,2)==3)',
'weight': 2.0}
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': True,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': True,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
}
# When
@@ -126,22 +119,22 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
given_test_case_list.remove('assert(add(1,2)==3)')
for error in result.get("error"):
self.assertEqual(error['exception'], 'AssertionError')
- self.assertEqual(error['message'],
- "Expected answer from the test case did not match the output"
- )
+ self.assertEqual(
+ error['message'],
+ "Expected answer from the test case did not match the output"
+ )
error_testcase_list = [tc['test_case'] for tc in result.get('error')]
self.assertEqual(error_testcase_list, given_test_case_list)
+
def test_infinite_loop(self):
# Given
user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
}
# When
@@ -168,14 +161,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
"SyntaxError",
"invalid syntax"
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
}
# When
@@ -201,14 +192,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
"IndentationError",
"indented block"
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
}
# When
@@ -220,9 +209,9 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
self.assertFalse(result.get("success"))
self.assertEqual(5, len(err))
for msg in indent_error_msg:
- self.assert_correct_output(msg,
- result.get("error")[0]['traceback']
- )
+ self.assert_correct_output(
+ msg, result.get("error")[0]['traceback']
+ )
def test_name_error(self):
# Given
@@ -234,15 +223,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
"defined"
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
+ }
# When
grader = Grader(self.in_dir)
@@ -258,15 +245,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
return add(3, 3)
""")
recursion_error_msg = "maximum recursion depth exceeded"
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
+ }
# When
grader = Grader(self.in_dir)
@@ -289,15 +274,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
"argument"
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
+ }
# When
grader = Grader(self.in_dir)
@@ -323,25 +306,26 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
"base"
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
+ }
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error")[0]['traceback']
+ errors = result.get("error")
# Then
self.assertFalse(result.get("success"))
for msg in value_error_msg:
- self.assert_correct_output(msg, err)
+ self.assert_correct_output(msg, errors[0]['traceback'])
+ for index, error in enumerate(errors):
+ self.assertEqual(error['test_case'],
+ self.test_case_data[index]['test_case'])
def test_file_based_assert(self):
# Given
@@ -356,15 +340,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
return f.read()[0]
""")
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data,
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data,
+ }
# When
grader = Grader(self.in_dir)
@@ -390,25 +372,23 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
]
kwargs = {'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
- }
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
+ }
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
- err = result.get("error")[0]['traceback']
+ err = result.get("error")[0]['traceback']
# Then
self.assertFalse(result.get("success"))
for msg in syntax_error_msg:
self.assert_correct_output(msg, err)
-
def test_multiple_testcase_error(self):
""" Tests the user answer with an correct test case
first and then with an incorrect test case """
@@ -418,7 +398,8 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
"test_case": 'assert(palindrome("abba")==True)',
"weight": 0.0},
{"test_case_type": "standardtestcase",
- "test_case": 's="abbb"\nassert palindrome(S)==False',
+ "test_case": 's="abbb"\n'
+ 'assert palindrome(S)==False',
"weight": 0.0}
]
name_error_msg = ["Traceback",
@@ -426,15 +407,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
"NameError",
"name 'S' is not defined"
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
+ }
# When
grader = Grader(self.in_dir)
@@ -454,18 +433,15 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
return type(a)
""")
test_case_data = [{"test_case_type": "standardtestcase",
- "test_case": 'assert(strchar("hello")==str)',
- "weight": 0.0
- },]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
- }
+ "test_case": 'assert(strchar("hello")==str)',
+ "weight": 0.0}]
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
+ }
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
@@ -473,6 +449,31 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertTrue(result.get("success"))
+ def test_incorrect_answer_with_nose_assert(self):
+ user_answer = dedent("""\
+ def add(a, b):
+ return a - b
+ """)
+ test_case_data = [{"test_case_type": "standardtestcase",
+ "test_case": 'assert_equal(add(1, 2), 3)',
+ "weight": 0.0}]
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
+ }
+ # When
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
+ # Then
+ self.assertFalse(result.get("success"))
+ error = result.get("error")[0]
+ self.assertEqual(error['exception'], 'AssertionError')
+ self.assertEqual(error['message'], '-1 != 3')
+
class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
@@ -501,13 +502,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
"""
)
kwargs = {'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data
- }
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data
+ }
# When
grader = Grader(self.in_dir)
@@ -534,13 +534,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
)
kwargs = {'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data
- }
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data
+ }
# When
grader = Grader(self.in_dir)
@@ -551,11 +550,13 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_correct_answer_string(self):
# Given
- self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
- "expected_input": ("the quick brown fox jumps over the lazy dog\nthe"),
- "expected_output": "2",
- "weight": 0.0
- }]
+ self.test_case_data = [{
+ "test_case_type": "stdiobasedtestcase",
+ "expected_input": ("the quick brown fox jumps over "
+ "the lazy dog\nthe"),
+ "expected_output": "2",
+ "weight": 0.0
+ }]
user_answer = dedent("""
from six.moves import input
a = str(input())
@@ -565,13 +566,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
)
kwargs = {'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data
- }
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data
+ }
# When
grader = Grader(self.in_dir)
@@ -594,13 +594,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
"""
)
kwargs = {'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data
- }
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data
+ }
# When
grader = Grader(self.in_dir)
@@ -629,13 +628,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
"""
)
kwargs = {'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data
- }
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data
+ }
# When
grader = Grader(self.in_dir)
@@ -646,24 +644,24 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_infinite_loop(self):
# Given
- self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
- "expected_input": "1\n2",
- "expected_output": "3",
- "weight": 0.0
- }]
+ self.test_case_data = [{
+ "test_case_type": "stdiobasedtestcase",
+ "expected_input": "1\n2",
+ "expected_output": "3",
+ "weight": 0.0
+ }]
timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in"
- " your code.").format(SERVER_TIMEOUT)
+ "You probably have an infinite loop in"
+ " your code.").format(SERVER_TIMEOUT)
user_answer = "while True:\n\tpass"
kwargs = {'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': self.test_case_data
- }
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': self.test_case_data
+ }
# When
grader = Grader(self.in_dir)
@@ -675,7 +673,6 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
)
self.assertFalse(result.get('success'))
-
def test_unicode_literal_bug(self):
# Given
user_answer = dedent("""\
@@ -687,21 +684,44 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
"expected_output": "str",
"weight": 0.0
}]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
- }
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
+ }
# When
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
# Then
self.assertTrue(result.get("success"))
+ def test_get_error_lineno(self):
+ user_answer = dedent("""\
+ print(1/0)
+ """)
+ test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ "expected_input": "",
+ "expected_output": "1",
+ "weight": 0.0
+ }]
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
+ }
+ # When
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+ # Then
+ self.assertFalse(result.get("success"))
+ error = result.get("error")[0]
+ self.assertEqual(error['line_no'], 1)
+ self.assertEqual(error['exception'], "ZeroDivisionError")
+
class PythonHookEvaluationTestCases(EvaluatorBaseTest):
@@ -733,19 +753,17 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
success, err, mark_fraction = True, "", 1.0
return success, err, mark_fraction
"""
- )
+ )
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code,"weight": 1.0
- }]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': True,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
+ "hook_code": hook_code, "weight": 1.0
+ }]
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': True,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
}
# When
@@ -768,20 +786,18 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
success, err, mark_fraction = True, "", 1.0
return success, err, mark_fraction
"""
- )
+ )
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code,"weight": 1.0
- }]
-
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
+ "hook_code": hook_code, "weight": 1.0
+ }]
+
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
}
# When
@@ -805,21 +821,19 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
success, err, mark_fraction = True, "", 1.0
return success, err, mark_fraction
"""
- )
+ )
test_case_data = [{"test_case_type": "standardtestcase",
"test_case": assert_test_case, 'weight': 1.0},
{"test_case_type": "hooktestcase",
"hook_code": hook_code, 'weight': 1.0},
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': True,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': True,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
}
# When
@@ -842,7 +856,7 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
success, err, mark_fraction = True, "", 0.5
return success, err, mark_fraction
"""
- )
+ )
hook_code_2 = dedent("""\
def check_answer(user_answer):
success = False
@@ -853,22 +867,19 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
success, err, mark_fraction = True, "", 1.0
return success, err, mark_fraction
"""
- )
-
+ )
test_case_data = [{"test_case_type": "hooktestcase",
"hook_code": hook_code_1, 'weight': 1.0},
{"test_case_type": "hooktestcase",
"hook_code": hook_code_2, 'weight': 1.0},
]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': True,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': True,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
}
# When
@@ -892,19 +903,18 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
success, err, mark_fraction = True, "", 1.0
return success, err, mark_fraction
"""
- )
+ )
+
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code,"weight": 1.0
- }]
-
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
+ "hook_code": hook_code, "weight": 1.0
+ }]
+
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
}
# When
@@ -931,19 +941,18 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
success, err, mark_fraction = True, "", 1.0
return success, err, mark_fraction
"""
- )
+ )
+
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code,"weight": 1.0
- }]
- kwargs = {
- 'metadata': {
- 'user_answer': user_answer,
- 'file_paths': self.file_paths,
- 'assign_files': [(self.tmp_file, False)],
- 'partial_grading': False,
- 'language': 'python'
- },
- 'test_case_data': test_case_data,
+ "hook_code": hook_code, "weight": 1.0
+ }]
+ kwargs = {'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'assign_files': [(self.tmp_file, False)],
+ 'partial_grading': False,
+ 'language': 'python'},
+ 'test_case_data': test_case_data,
}
# When
@@ -953,5 +962,6 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertTrue(result.get('success'))
+
if __name__ == '__main__':
unittest.main()
diff --git a/yaksh/grader.py b/yaksh/grader.py
index c9dc8a2..320e7e7 100644
--- a/yaksh/grader.py
+++ b/yaksh/grader.py
@@ -1,22 +1,12 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import sys
-import pwd
import os
-import stat
import contextlib
-from os.path import isdir, dirname, abspath, join, isfile, exists
+from os.path import dirname, abspath
import signal
import traceback
-from multiprocessing import Process, Queue
-import subprocess
-import re
-try:
- from SimpleXMLRPCServer import SimpleXMLRPCServer
-except ImportError:
- # The above import will not work on Python-3.x.
- from xmlrpc.server import SimpleXMLRPCServer
# Local imports
from .settings import SERVER_TIMEOUT
@@ -26,11 +16,13 @@ from .error_messages import prettify_exceptions
MY_DIR = abspath(dirname(__file__))
registry = None
+
# Raised when the code times-out.
# c.f. http://pguides.net/python/timeout-a-function
class TimeoutException(Exception):
pass
+
@contextlib.contextmanager
def change_dir(path):
cur_dir = abspath(dirname(MY_DIR))
@@ -75,7 +67,6 @@ class Grader(object):
self.timeout_msg = msg
self.in_dir = in_dir if in_dir else MY_DIR
-
def evaluate(self, kwargs):
"""Evaluates given code with the test cases based on
given arguments in test_case_data.
@@ -122,7 +113,6 @@ class Grader(object):
test_case_instances.append(test_case_instance)
return test_case_instances
-
def safe_evaluate(self, test_case_instances):
"""
Handles code evaluation along with compilation, signal handling
@@ -157,20 +147,24 @@ class Grader(object):
test_case_instance.teardown()
except TimeoutException:
- error.append(prettify_exceptions("TimeoutException",
- self.timeout_msg
- )
- )
- except Exception:
+ error.append(
+ prettify_exceptions("TimeoutException", self.timeout_msg)
+ )
+ except Exception as e:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
+ try:
+ line_no = e.lineno
+ except AttributeError:
+ line_no = traceback.extract_tb(exc_tb)[-1][1]
if len(tb_list) > 2:
del tb_list[1:3]
- error.append(prettify_exceptions(exc_type.__name__,
- str(exc_value),
- "".join(tb_list),
- )
- )
+ error.append(
+ prettify_exceptions(
+ exc_type.__name__, str(exc_value), "".join(tb_list),
+ line_no=line_no
+ )
+ )
finally:
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index 440f422..4b016a1 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -1,10 +1,6 @@
#!/usr/bin/env python
import sys
import traceback
-import os
-import re
-from os.path import join
-import importlib
# Local imports
from .file_utils import copy_files, delete_files
@@ -53,22 +49,24 @@ class PythonAssertionEvaluator(BaseEvaluator):
--------
Returns a tuple (success, error, test_case_weight)
- success - Boolean, indicating if code was executed successfully, correctly
+ success - Boolean, indicating if code was executed successfully,
+ correctly
weight - Float, indicating total weight of all successful test cases
error - String, error message if success is false
- returns (True, "Correct answer", 1.0) : If the student script passes all
- test cases/have same output, when compared to the instructor script
+ returns (True, "Correct answer", 1.0) : If the student script passes
+ all test cases/have same output, when compared to the instructor script
returns (False, error_msg, 0.0): If the student script fails a single
test/have dissimilar output, when compared to the instructor script.
- Returns (False, error_msg, 0.0): If mandatory arguments are not files or if
- the required permissions are not given to the file(s).
+ Returns (False, error_msg, 0.0): If mandatory arguments are not files
+ or if the required permissions are not given to the file(s).
"""
success = False
mark_fraction = 0.0
try:
+ exec("from nose.tools import *", self.exec_scope)
_tests = compile(self.test_case, '<string>', mode='exec')
exec(_tests, self.exec_scope)
except TimeoutException:
@@ -76,12 +74,14 @@ class PythonAssertionEvaluator(BaseEvaluator):
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
+ line_no = traceback.extract_tb(exc_tb)[-1][1]
if len(tb_list) > 2:
del tb_list[1:3]
err = prettify_exceptions(exc_type.__name__,
str(exc_value),
"".join(tb_list),
- self.test_case
+ self.test_case,
+ line_no
)
else:
success = True
diff --git a/yaksh/static/yaksh/js/requesthandler.js b/yaksh/static/yaksh/js/requesthandler.js
index a215ce4..952de3a 100644
--- a/yaksh/static/yaksh/js/requesthandler.js
+++ b/yaksh/static/yaksh/js/requesthandler.js
@@ -75,6 +75,24 @@ function response_handler(method_type, content_type, data, uid){
var error_output = document.getElementById("error_panel");
error_output.innerHTML = res.error;
focus_on_error(error_output);
+ if(global_editor.editor){
+ err_lineno = $("#err_lineno").val();
+ if(marker){
+ marker.clear();
+ }
+ if(err_lineno){
+ var lineno = parseInt(err_lineno) - 1;
+ var editor = global_editor.editor;
+ var line_length = editor.getLine(lineno).length;
+ marker = editor.markText({line: lineno, ch: 0}, {line: lineno, ch: line_length},
+ {className: "activeline", clearOnEnter:true});
+ }
+ else{
+ if(marker){
+ marker.clear();
+ }
+ }
+ }
}
} else {
reset_values();
@@ -125,6 +143,8 @@ function ajax_check_code(url, method_type, data_type, data, uid)
var global_editor = {};
var csrftoken = jQuery("[name=csrfmiddlewaretoken]").val();
+var err_lineno;
+var marker;
$(document).ready(function(){
if(is_exercise == "True" && can_skip == "False"){
setTimeout(function() {show_solution();}, delay_time*1000);
@@ -148,6 +168,7 @@ $(document).ready(function(){
mode: mode_dict[lang],
gutter: true,
lineNumbers: true,
+ styleSelectedText: true,
onChange: function (instance, changes) {
render();
}
diff --git a/yaksh/templates/yaksh/error_template.html b/yaksh/templates/yaksh/error_template.html
index 61657ae..301020e 100644
--- a/yaksh/templates/yaksh/error_template.html
+++ b/yaksh/templates/yaksh/error_template.html
@@ -3,7 +3,6 @@
{% endblock %}
{% load custom_filters %}
-
{% if error_message %}
{% for error in error_message %}
@@ -35,6 +34,7 @@
</tr>
<tr>
{% if error.traceback %}
+ <input type="hidden" id="err_lineno" value="{{error.line_no}}">
<td><b>Full Traceback: </b></td>
<td><pre>{{error.traceback}}</pre></td>
{% endif %}
diff --git a/yaksh/templates/yaksh/question.html b/yaksh/templates/yaksh/question.html
index b65073a..ebfe066 100644
--- a/yaksh/templates/yaksh/question.html
+++ b/yaksh/templates/yaksh/question.html
@@ -11,6 +11,10 @@
.CodeMirror{
border-style: groove;
}
+ .activeline {
+ background: #FBC2C4 !important;
+ color: #8a1f11 !important;
+ }
</style>
{% endblock %}
@@ -221,7 +225,7 @@ question_type = "{{ question.type }}"
{% if question.type == "integer" %}
Enter Integer:<br/>
- <input autofocus name="answer" type="number" id="integer" value={{ last_attempt|safe }} />
+ <input autofocus name="answer" type="number" id="integer" value="{{ last_attempt|safe }}" />
<br/><br/>
{% endif %}
@@ -233,7 +237,7 @@ question_type = "{{ question.type }}"
{% if question.type == "float" %}
Enter Decimal Value :<br/>
- <input autofocus name="answer" type="number" step="any" id="float" value={{ last_attempt|safe }} />
+ <input autofocus name="answer" type="number" step="any" id="float" value="{{ last_attempt|safe }}" />
<br/><br/>
{% endif %}