summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoradityacp2018-04-05 15:30:01 +0530
committeradityacp2018-04-05 15:30:01 +0530
commit032d496c7fa7298a0748885b0f1c8e2c24af67d8 (patch)
tree1cacc6b570ddbc4c8de172ced8e494c8ac655f0d
parentf5c24ccf8b0bde0fe5726728a64f1e3638cf170d (diff)
downloadonline_test-032d496c7fa7298a0748885b0f1c8e2c24af67d8.tar.gz
online_test-032d496c7fa7298a0748885b0f1c8e2c24af67d8.tar.bz2
online_test-032d496c7fa7298a0748885b0f1c8e2c24af67d8.zip
Change error_messages.py, python_assertion_evaluator.py and grader.py
- Pep8 changes - Show code error message along with test case - Add nose in python exec scope
-rw-r--r--yaksh/error_messages.py34
-rw-r--r--yaksh/grader.py33
-rw-r--r--yaksh/python_assertion_evaluator.py16
3 files changed, 37 insertions, 46 deletions
diff --git a/yaksh/error_messages.py b/yaksh/error_messages.py
index 7ea8618..f2dc7c6 100644
--- a/yaksh/error_messages.py
+++ b/yaksh/error_messages.py
@@ -3,6 +3,7 @@ try:
except ImportError:
from itertools import izip_longest as zip_longest
+
def prettify_exceptions(exception, message, traceback=None, testcase=None):
err = {"type": "assertion",
"exception": exception,
@@ -13,23 +14,27 @@ def prettify_exceptions(exception, message, traceback=None, testcase=None):
err["traceback"] = None
if exception == 'AssertionError':
- value = ("Expected answer from the"
- + " test case did not match the output")
- err["message"] = value
+ value = ("Expected answer from the" +
+ " test case did not match the output")
+ if message:
+ err["message"] = message
+ else:
+ err["message"] = value
err["traceback"] = None
- if testcase:
- err["test_case"] = testcase
+ err["test_case"] = testcase
return err
+
def _get_incorrect_user_lines(exp_lines, user_lines):
err_line_numbers = []
for line_no, (expected_line, user_line) in \
- enumerate(zip_longest(exp_lines, user_lines)):
- if not user_line or not expected_line or \
- user_line.strip() != expected_line.strip():
+ enumerate(zip_longest(exp_lines, user_lines)):
+ if (not user_line or not expected_line or
+ user_line.strip() != expected_line.strip()):
err_line_numbers.append(line_no)
return err_line_numbers
-
+
+
def compare_outputs(expected_output, user_output, given_input=None):
given_lines = user_output.splitlines()
exp_lines = expected_output.splitlines()
@@ -44,18 +49,17 @@ def compare_outputs(expected_output, user_output, given_input=None):
msg["error_line_numbers"] = err_line_numbers
if ng != ne:
msg["error_msg"] = ("Incorrect Answer: "
- + "We had expected {} number of lines. "\
- .format(ne)
+ + "We had expected {} number of lines. ".format(ne)
+ "We got {} number of lines.".format(ng)
)
return False, msg
else:
if err_line_numbers:
msg["error_msg"] = ("Incorrect Answer: "
- + "Line number(s) {0} did not match."
- .format(", ".join(map(
- str,[x+1 for x in err_line_numbers]
- ))))
+ + "Line number(s) {0} did not match."
+ .format(", ".join(
+ map(str, [x+1 for x in err_line_numbers])
+ )))
return False, msg
else:
msg["error_msg"] = "Correct Answer"
diff --git a/yaksh/grader.py b/yaksh/grader.py
index 38cce8d..a721236 100644
--- a/yaksh/grader.py
+++ b/yaksh/grader.py
@@ -1,22 +1,12 @@
#!/usr/bin/env python
from __future__ import unicode_literals
import sys
-import pwd
import os
-import stat
import contextlib
-from os.path import isdir, dirname, abspath, join, isfile, exists
+from os.path import dirname, abspath
import signal
import traceback
-from multiprocessing import Process, Queue
-import subprocess
-import re
-try:
- from SimpleXMLRPCServer import SimpleXMLRPCServer
-except ImportError:
- # The above import will not work on Python-3.x.
- from xmlrpc.server import SimpleXMLRPCServer
# Local imports
from .settings import SERVER_TIMEOUT
@@ -26,11 +16,13 @@ from .error_messages import prettify_exceptions
MY_DIR = abspath(dirname(__file__))
registry = None
+
# Raised when the code times-out.
# c.f. http://pguides.net/python/timeout-a-function
class TimeoutException(Exception):
pass
+
@contextlib.contextmanager
def change_dir(path):
cur_dir = abspath(dirname(MY_DIR))
@@ -75,7 +67,6 @@ class Grader(object):
self.timeout_msg = msg
self.in_dir = in_dir if in_dir else MY_DIR
-
def evaluate(self, kwargs):
"""Evaluates given code with the test cases based on
given arguments in test_case_data.
@@ -122,7 +113,6 @@ class Grader(object):
test_case_instances.append(test_case_instance)
return test_case_instances
-
def safe_evaluate(self, test_case_instances):
"""
Handles code evaluation along with compilation, signal handling
@@ -155,20 +145,19 @@ class Grader(object):
test_case_instance.teardown()
except TimeoutException:
- error.append(prettify_exceptions("TimeoutException",
- self.timeout_msg
- )
- )
+ error.append(
+ prettify_exceptions("TimeoutException", self.timeout_msg)
+ )
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
if len(tb_list) > 2:
del tb_list[1:3]
- error.append(prettify_exceptions(exc_type.__name__,
- str(exc_value),
- "".join(tb_list),
- )
- )
+ error.append(
+ prettify_exceptions(
+ exc_type.__name__, str(exc_value), "".join(tb_list)
+ )
+ )
finally:
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index 440f422..8c7d451 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -1,10 +1,6 @@
#!/usr/bin/env python
import sys
import traceback
-import os
-import re
-from os.path import join
-import importlib
# Local imports
from .file_utils import copy_files, delete_files
@@ -43,6 +39,7 @@ class PythonAssertionEvaluator(BaseEvaluator):
submitted = compile(self.user_answer, '<string>', mode='exec')
self.exec_scope = {}
exec(submitted, self.exec_scope)
+ exec("from nose.tools import *", self.exec_scope)
return self.exec_scope
def check_code(self):
@@ -53,18 +50,19 @@ class PythonAssertionEvaluator(BaseEvaluator):
--------
Returns a tuple (success, error, test_case_weight)
- success - Boolean, indicating if code was executed successfully, correctly
+ success - Boolean, indicating if code was executed successfully,
+ correctly
weight - Float, indicating total weight of all successful test cases
error - String, error message if success is false
- returns (True, "Correct answer", 1.0) : If the student script passes all
- test cases/have same output, when compared to the instructor script
+ returns (True, "Correct answer", 1.0) : If the student script passes
+ all test cases/have same output, when compared to the instructor script
returns (False, error_msg, 0.0): If the student script fails a single
test/have dissimilar output, when compared to the instructor script.
- Returns (False, error_msg, 0.0): If mandatory arguments are not files or if
- the required permissions are not given to the file(s).
+ Returns (False, error_msg, 0.0): If mandatory arguments are not files
+ or if the required permissions are not given to the file(s).
"""
success = False
mark_fraction = 0.0