From f5c24ccf8b0bde0fe5726728a64f1e3638cf170d Mon Sep 17 00:00:00 2001 From: adityacp Date: Thu, 5 Apr 2018 15:28:18 +0530 Subject: Add nose to codeserver requirements --- requirements/requirements-codeserver.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/requirements-codeserver.txt b/requirements/requirements-codeserver.txt index 004e45b..963a836 100644 --- a/requirements/requirements-codeserver.txt +++ b/requirements/requirements-codeserver.txt @@ -4,3 +4,4 @@ six requests tornado==4.5.3 psutil +nose -- cgit From 032d496c7fa7298a0748885b0f1c8e2c24af67d8 Mon Sep 17 00:00:00 2001 From: adityacp Date: Thu, 5 Apr 2018 15:30:01 +0530 Subject: Change error_messages.py, python_assertion_evaluator.py and grader.py - Pep8 changes - Show code error message along with test case - Add nose in python exec scope --- yaksh/error_messages.py | 34 +++++++++++++++++++--------------- yaksh/grader.py | 33 +++++++++++---------------------- yaksh/python_assertion_evaluator.py | 16 +++++++--------- 3 files changed, 37 insertions(+), 46 deletions(-) diff --git a/yaksh/error_messages.py b/yaksh/error_messages.py index 7ea8618..f2dc7c6 100644 --- a/yaksh/error_messages.py +++ b/yaksh/error_messages.py @@ -3,6 +3,7 @@ try: except ImportError: from itertools import izip_longest as zip_longest + def prettify_exceptions(exception, message, traceback=None, testcase=None): err = {"type": "assertion", "exception": exception, @@ -13,23 +14,27 @@ def prettify_exceptions(exception, message, traceback=None, testcase=None): err["traceback"] = None if exception == 'AssertionError': - value = ("Expected answer from the" - + " test case did not match the output") - err["message"] = value + value = ("Expected answer from the" + + " test case did not match the output") + if message: + err["message"] = message + else: + err["message"] = value err["traceback"] = None - if testcase: - err["test_case"] = testcase + err["test_case"] = testcase return err + def _get_incorrect_user_lines(exp_lines, user_lines): err_line_numbers = [] for line_no, (expected_line, user_line) in \ - enumerate(zip_longest(exp_lines, user_lines)): - if not user_line or not expected_line or \ - user_line.strip() != expected_line.strip(): + enumerate(zip_longest(exp_lines, user_lines)): + if (not user_line or not expected_line or + user_line.strip() != expected_line.strip()): err_line_numbers.append(line_no) return err_line_numbers - + + def compare_outputs(expected_output, user_output, given_input=None): given_lines = user_output.splitlines() exp_lines = expected_output.splitlines() @@ -44,18 +49,17 @@ def compare_outputs(expected_output, user_output, given_input=None): msg["error_line_numbers"] = err_line_numbers if ng != ne: msg["error_msg"] = ("Incorrect Answer: " - + "We had expected {} number of lines. "\ - .format(ne) + + "We had expected {} number of lines. ".format(ne) + "We got {} number of lines.".format(ng) ) return False, msg else: if err_line_numbers: msg["error_msg"] = ("Incorrect Answer: " - + "Line number(s) {0} did not match." - .format(", ".join(map( - str,[x+1 for x in err_line_numbers] - )))) + + "Line number(s) {0} did not match." + .format(", ".join( + map(str, [x+1 for x in err_line_numbers]) + ))) return False, msg else: msg["error_msg"] = "Correct Answer" diff --git a/yaksh/grader.py b/yaksh/grader.py index 38cce8d..a721236 100644 --- a/yaksh/grader.py +++ b/yaksh/grader.py @@ -1,22 +1,12 @@ #!/usr/bin/env python from __future__ import unicode_literals import sys -import pwd import os -import stat import contextlib -from os.path import isdir, dirname, abspath, join, isfile, exists +from os.path import dirname, abspath import signal import traceback -from multiprocessing import Process, Queue -import subprocess -import re -try: - from SimpleXMLRPCServer import SimpleXMLRPCServer -except ImportError: - # The above import will not work on Python-3.x. - from xmlrpc.server import SimpleXMLRPCServer # Local imports from .settings import SERVER_TIMEOUT @@ -26,11 +16,13 @@ from .error_messages import prettify_exceptions MY_DIR = abspath(dirname(__file__)) registry = None + # Raised when the code times-out. # c.f. http://pguides.net/python/timeout-a-function class TimeoutException(Exception): pass + @contextlib.contextmanager def change_dir(path): cur_dir = abspath(dirname(MY_DIR)) @@ -75,7 +67,6 @@ class Grader(object): self.timeout_msg = msg self.in_dir = in_dir if in_dir else MY_DIR - def evaluate(self, kwargs): """Evaluates given code with the test cases based on given arguments in test_case_data. @@ -122,7 +113,6 @@ class Grader(object): test_case_instances.append(test_case_instance) return test_case_instances - def safe_evaluate(self, test_case_instances): """ Handles code evaluation along with compilation, signal handling @@ -155,20 +145,19 @@ class Grader(object): test_case_instance.teardown() except TimeoutException: - error.append(prettify_exceptions("TimeoutException", - self.timeout_msg - ) - ) + error.append( + prettify_exceptions("TimeoutException", self.timeout_msg) + ) except Exception: exc_type, exc_value, exc_tb = sys.exc_info() tb_list = traceback.format_exception(exc_type, exc_value, exc_tb) if len(tb_list) > 2: del tb_list[1:3] - error.append(prettify_exceptions(exc_type.__name__, - str(exc_value), - "".join(tb_list), - ) - ) + error.append( + prettify_exceptions( + exc_type.__name__, str(exc_value), "".join(tb_list) + ) + ) finally: # Set back any original signal handler. set_original_signal_handler(prev_handler) diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 440f422..8c7d451 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -1,10 +1,6 @@ #!/usr/bin/env python import sys import traceback -import os -import re -from os.path import join -import importlib # Local imports from .file_utils import copy_files, delete_files @@ -43,6 +39,7 @@ class PythonAssertionEvaluator(BaseEvaluator): submitted = compile(self.user_answer, '', mode='exec') self.exec_scope = {} exec(submitted, self.exec_scope) + exec("from nose.tools import *", self.exec_scope) return self.exec_scope def check_code(self): @@ -53,18 +50,19 @@ class PythonAssertionEvaluator(BaseEvaluator): -------- Returns a tuple (success, error, test_case_weight) - success - Boolean, indicating if code was executed successfully, correctly + success - Boolean, indicating if code was executed successfully, + correctly weight - Float, indicating total weight of all successful test cases error - String, error message if success is false - returns (True, "Correct answer", 1.0) : If the student script passes all - test cases/have same output, when compared to the instructor script + returns (True, "Correct answer", 1.0) : If the student script passes + all test cases/have same output, when compared to the instructor script returns (False, error_msg, 0.0): If the student script fails a single test/have dissimilar output, when compared to the instructor script. - Returns (False, error_msg, 0.0): If mandatory arguments are not files or if - the required permissions are not given to the file(s). + Returns (False, error_msg, 0.0): If mandatory arguments are not files + or if the required permissions are not given to the file(s). """ success = False mark_fraction = 0.0 -- cgit From 680b741cb066bae3e074c9feade61bf7792ef384 Mon Sep 17 00:00:00 2001 From: adityacp Date: Thu, 5 Apr 2018 15:31:55 +0530 Subject: Change in test_python_evaluation.py - Pep8 changes - Add assertion test using nose tools --- yaksh/evaluator_tests/test_python_evaluation.py | 521 ++++++++++++------------ 1 file changed, 251 insertions(+), 270 deletions(-) diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 71d7732..886ed01 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -1,7 +1,6 @@ from __future__ import unicode_literals import unittest import os -import sys import tempfile import shutil from textwrap import dedent @@ -26,13 +25,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): self.in_dir = tmp_in_dir_path self.test_case_data = [{"test_case_type": "standardtestcase", "test_case": 'assert(add(1,2)==3)', - 'weight': 0.0}, + 'weight': 0.0}, {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,2)==1)', - 'weight': 0.0}, + 'weight': 0.0}, {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,-2)==-3)', - 'weight': 0.0}, + 'weight': 0.0}, ] self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" @@ -46,14 +45,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer(self): # Given user_answer = "def add(a,b):\n\treturn a + b" - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, } # When @@ -66,14 +63,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): def test_incorrect_answer(self): # Given user_answer = "def add(a,b):\n\treturn a - b" - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, } # When @@ -85,13 +80,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): given_test_case_list = [tc["test_case"] for tc in self.test_case_data] for error in result.get("error"): self.assertEqual(error['exception'], 'AssertionError') - self.assertEqual(error['message'], - "Expected answer from the test case did not match the output" - ) + self.assertEqual( + error['message'], + "Expected answer from the test case did not match the output" + ) error_testcase_list = [tc['test_case'] for tc in result.get('error')] self.assertEqual(error_testcase_list, given_test_case_list) - def test_partial_incorrect_answer(self): # Given user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)" @@ -100,19 +95,17 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): 'weight': 1.0}, {"test_case_type": "standardtestcase", "test_case": 'assert(add(-1,-2)==-3)', - 'weight': 1.0}, + 'weight': 1.0}, {"test_case_type": "standardtestcase", "test_case": 'assert(add(1,2)==3)', 'weight': 2.0} ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': True, - 'language': 'python' - }, - 'test_case_data': test_case_data, + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': True, + 'language': 'python'}, + 'test_case_data': test_case_data, } # When @@ -126,22 +119,22 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): given_test_case_list.remove('assert(add(1,2)==3)') for error in result.get("error"): self.assertEqual(error['exception'], 'AssertionError') - self.assertEqual(error['message'], - "Expected answer from the test case did not match the output" - ) + self.assertEqual( + error['message'], + "Expected answer from the test case did not match the output" + ) error_testcase_list = [tc['test_case'] for tc in result.get('error')] self.assertEqual(error_testcase_list, given_test_case_list) + def test_infinite_loop(self): # Given user_answer = "def add(a, b):\n\twhile True:\n\t\tpass" - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, } # When @@ -168,14 +161,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): "SyntaxError", "invalid syntax" ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, } # When @@ -201,14 +192,12 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): "IndentationError", "indented block" ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, } # When @@ -220,9 +209,9 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): self.assertFalse(result.get("success")) self.assertEqual(5, len(err)) for msg in indent_error_msg: - self.assert_correct_output(msg, - result.get("error")[0]['traceback'] - ) + self.assert_correct_output( + msg, result.get("error")[0]['traceback'] + ) def test_name_error(self): # Given @@ -234,15 +223,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): "defined" ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, - } + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, + } # When grader = Grader(self.in_dir) @@ -258,15 +245,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): return add(3, 3) """) recursion_error_msg = "maximum recursion depth exceeded" - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, - } + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, + } # When grader = Grader(self.in_dir) @@ -289,15 +274,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): "argument" ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, - } + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, + } # When grader = Grader(self.in_dir) @@ -323,15 +306,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): "base" ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, - } + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, + } # When grader = Grader(self.in_dir) @@ -356,15 +337,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): return f.read()[0] """) - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data, - } + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data, + } # When grader = Grader(self.in_dir) @@ -390,25 +369,23 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): ] kwargs = {'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': test_case_data, - } + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, + } # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) - err = result.get("error")[0]['traceback'] + err = result.get("error")[0]['traceback'] # Then self.assertFalse(result.get("success")) for msg in syntax_error_msg: self.assert_correct_output(msg, err) - def test_multiple_testcase_error(self): """ Tests the user answer with an correct test case first and then with an incorrect test case """ @@ -418,7 +395,8 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): "test_case": 'assert(palindrome("abba")==True)', "weight": 0.0}, {"test_case_type": "standardtestcase", - "test_case": 's="abbb"\nassert palindrome(S)==False', + "test_case": 's="abbb"\n' + 'assert palindrome(S)==False', "weight": 0.0} ] name_error_msg = ["Traceback", @@ -426,15 +404,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): "NameError", "name 'S' is not defined" ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': test_case_data, - } + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, + } # When grader = Grader(self.in_dir) @@ -454,18 +430,15 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): return type(a) """) test_case_data = [{"test_case_type": "standardtestcase", - "test_case": 'assert(strchar("hello")==str)', - "weight": 0.0 - },] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': test_case_data, - } + "test_case": 'assert(strchar("hello")==str)', + "weight": 0.0}] + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, + } # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) @@ -473,6 +446,31 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get("success")) + def test_incorrect_answer_with_nose_assert(self): + user_answer = dedent("""\ + def add(a, b): + return a - b + """) + test_case_data = [{"test_case_type": "standardtestcase", + "test_case": 'assert_equal(add(1, 2), 3)', + "weight": 0.0}] + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, + } + # When + grader = Grader(self.in_dir) + result = grader.evaluate(kwargs) + + # Then + self.assertFalse(result.get("success")) + error = result.get("error")[0] + self.assertEqual(error['exception'], 'AssertionError') + self.assertEqual(error['message'], '-1 != 3') + class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): def setUp(self): @@ -501,13 +499,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): """ ) kwargs = {'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data - } + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data + } # When grader = Grader(self.in_dir) @@ -534,13 +531,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): ) kwargs = {'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data - } + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data + } # When grader = Grader(self.in_dir) @@ -551,11 +547,13 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer_string(self): # Given - self.test_case_data = [{"test_case_type": "stdiobasedtestcase", - "expected_input": ("the quick brown fox jumps over the lazy dog\nthe"), - "expected_output": "2", - "weight": 0.0 - }] + self.test_case_data = [{ + "test_case_type": "stdiobasedtestcase", + "expected_input": ("the quick brown fox jumps over " + "the lazy dog\nthe"), + "expected_output": "2", + "weight": 0.0 + }] user_answer = dedent(""" from six.moves import input a = str(input()) @@ -565,13 +563,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): ) kwargs = {'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data - } + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data + } # When grader = Grader(self.in_dir) @@ -594,13 +591,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): """ ) kwargs = {'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data - } + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data + } # When grader = Grader(self.in_dir) @@ -629,13 +625,12 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): """ ) kwargs = {'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data - } + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data + } # When grader = Grader(self.in_dir) @@ -646,24 +641,24 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): def test_infinite_loop(self): # Given - self.test_case_data = [{"test_case_type": "stdiobasedtestcase", - "expected_input": "1\n2", - "expected_output": "3", - "weight": 0.0 - }] + self.test_case_data = [{ + "test_case_type": "stdiobasedtestcase", + "expected_input": "1\n2", + "expected_output": "3", + "weight": 0.0 + }] timeout_msg = ("Code took more than {0} seconds to run. " - "You probably have an infinite loop in" - " your code.").format(SERVER_TIMEOUT) + "You probably have an infinite loop in" + " your code.").format(SERVER_TIMEOUT) user_answer = "while True:\n\tpass" kwargs = {'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': self.test_case_data - } + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': self.test_case_data + } # When grader = Grader(self.in_dir) @@ -675,7 +670,6 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): ) self.assertFalse(result.get('success')) - def test_unicode_literal_bug(self): # Given user_answer = dedent("""\ @@ -687,15 +681,13 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): "expected_output": "str", "weight": 0.0 }] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': test_case_data, - } + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, + } # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) @@ -733,19 +725,17 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction """ - ) + ) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': True, - 'language': 'python' - }, - 'test_case_data': test_case_data, + "hook_code": hook_code, "weight": 1.0 + }] + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': True, + 'language': 'python'}, + 'test_case_data': test_case_data, } # When @@ -768,20 +758,18 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction """ - ) + ) test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] - - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': test_case_data, + "hook_code": hook_code, "weight": 1.0 + }] + + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, } # When @@ -805,21 +793,19 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction """ - ) + ) test_case_data = [{"test_case_type": "standardtestcase", "test_case": assert_test_case, 'weight': 1.0}, {"test_case_type": "hooktestcase", "hook_code": hook_code, 'weight': 1.0}, ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': True, - 'language': 'python' - }, - 'test_case_data': test_case_data, + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': True, + 'language': 'python'}, + 'test_case_data': test_case_data, } # When @@ -842,7 +828,7 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): success, err, mark_fraction = True, "", 0.5 return success, err, mark_fraction """ - ) + ) hook_code_2 = dedent("""\ def check_answer(user_answer): success = False @@ -853,22 +839,19 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction """ - ) - + ) test_case_data = [{"test_case_type": "hooktestcase", "hook_code": hook_code_1, 'weight': 1.0}, {"test_case_type": "hooktestcase", "hook_code": hook_code_2, 'weight': 1.0}, ] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': True, - 'language': 'python' - }, - 'test_case_data': test_case_data, + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': True, + 'language': 'python'}, + 'test_case_data': test_case_data, } # When @@ -892,19 +875,18 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction """ - ) + ) + test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] - - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': test_case_data, + "hook_code": hook_code, "weight": 1.0 + }] + + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, } # When @@ -931,19 +913,18 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): success, err, mark_fraction = True, "", 1.0 return success, err, mark_fraction """ - ) + ) + test_case_data = [{"test_case_type": "hooktestcase", - "hook_code": hook_code,"weight": 1.0 - }] - kwargs = { - 'metadata': { - 'user_answer': user_answer, - 'file_paths': self.file_paths, - 'assign_files': [(self.tmp_file, False)], - 'partial_grading': False, - 'language': 'python' - }, - 'test_case_data': test_case_data, + "hook_code": hook_code, "weight": 1.0 + }] + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'assign_files': [(self.tmp_file, False)], + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, } # When -- cgit From 463cd907d0e744d5bbbd5ed5f0d0dde1bd8ca162 Mon Sep 17 00:00:00 2001 From: adityacp Date: Fri, 6 Apr 2018 15:01:01 +0530 Subject: Add error line number in error output --- yaksh/error_messages.py | 4 +++- yaksh/evaluator_tests/test_python_evaluation.py | 1 + yaksh/grader.py | 5 +++-- yaksh/python_assertion_evaluator.py | 4 +++- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/yaksh/error_messages.py b/yaksh/error_messages.py index f2dc7c6..7a18c22 100644 --- a/yaksh/error_messages.py +++ b/yaksh/error_messages.py @@ -4,7 +4,8 @@ except ImportError: from itertools import izip_longest as zip_longest -def prettify_exceptions(exception, message, traceback=None, testcase=None): +def prettify_exceptions(exception, message, traceback=None, + testcase=None, line_no=None): err = {"type": "assertion", "exception": exception, "traceback": traceback, @@ -22,6 +23,7 @@ def prettify_exceptions(exception, message, traceback=None, testcase=None): err["message"] = value err["traceback"] = None err["test_case"] = testcase + err["line_no"] = line_no return err diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 886ed01..2748d8a 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -934,5 +934,6 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get('success')) + if __name__ == '__main__': unittest.main() diff --git a/yaksh/grader.py b/yaksh/grader.py index a721236..d81470b 100644 --- a/yaksh/grader.py +++ b/yaksh/grader.py @@ -148,14 +148,15 @@ class Grader(object): error.append( prettify_exceptions("TimeoutException", self.timeout_msg) ) - except Exception: + except Exception as e: exc_type, exc_value, exc_tb = sys.exc_info() tb_list = traceback.format_exception(exc_type, exc_value, exc_tb) + line_no = e.lineno if len(tb_list) > 2: del tb_list[1:3] error.append( prettify_exceptions( - exc_type.__name__, str(exc_value), "".join(tb_list) + exc_type.__name__, str(exc_value), "".join(tb_list), line_no=line_no ) ) finally: diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 8c7d451..1f2b82b 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -74,12 +74,14 @@ class PythonAssertionEvaluator(BaseEvaluator): except Exception: exc_type, exc_value, exc_tb = sys.exc_info() tb_list = traceback.format_exception(exc_type, exc_value, exc_tb) + line_no = traceback.extract_tb(exc_tb)[-1][1] if len(tb_list) > 2: del tb_list[1:3] err = prettify_exceptions(exc_type.__name__, str(exc_value), "".join(tb_list), - self.test_case + self.test_case, + line_no ) else: success = True -- cgit From c9aeb7b7e1484229782f0f8ab397890b27cfc905 Mon Sep 17 00:00:00 2001 From: adityacp Date: Fri, 6 Apr 2018 15:01:47 +0530 Subject: Highlight error line in code mirror for python --- yaksh/static/yaksh/js/requesthandler.js | 19 +++++++++++++++++++ yaksh/templates/yaksh/error_template.html | 2 +- yaksh/templates/yaksh/question.html | 8 ++++++-- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/yaksh/static/yaksh/js/requesthandler.js b/yaksh/static/yaksh/js/requesthandler.js index a215ce4..f858317 100644 --- a/yaksh/static/yaksh/js/requesthandler.js +++ b/yaksh/static/yaksh/js/requesthandler.js @@ -75,6 +75,22 @@ function response_handler(method_type, content_type, data, uid){ var error_output = document.getElementById("error_panel"); error_output.innerHTML = res.error; focus_on_error(error_output); + err_lineno = $("#err_lineno").val(); + if(marker){ + marker.clear(); + } + if(err_lineno){ + var lineno = parseInt(err_lineno) - 1; + var editor = global_editor.editor; + var line_length = editor.getLine(lineno).length; + marker = editor.markText({line: lineno, ch: 0}, {line: lineno, ch: line_length}, + {className: "activeline", clearOnEnter:true}); + } + else{ + if(marker){ + marker.clear(); + } + } } } else { reset_values(); @@ -125,6 +141,8 @@ function ajax_check_code(url, method_type, data_type, data, uid) var global_editor = {}; var csrftoken = jQuery("[name=csrfmiddlewaretoken]").val(); +var err_lineno; +var marker; $(document).ready(function(){ if(is_exercise == "True" && can_skip == "False"){ setTimeout(function() {show_solution();}, delay_time*1000); @@ -148,6 +166,7 @@ $(document).ready(function(){ mode: mode_dict[lang], gutter: true, lineNumbers: true, + styleSelectedText: true, onChange: function (instance, changes) { render(); } diff --git a/yaksh/templates/yaksh/error_template.html b/yaksh/templates/yaksh/error_template.html index 61657ae..301020e 100644 --- a/yaksh/templates/yaksh/error_template.html +++ b/yaksh/templates/yaksh/error_template.html @@ -3,7 +3,6 @@ {% endblock %} {% load custom_filters %} - {% if error_message %} {% for error in error_message %} @@ -35,6 +34,7 @@ {% if error.traceback %} + Full Traceback:
{{error.traceback}}
{% endif %} diff --git a/yaksh/templates/yaksh/question.html b/yaksh/templates/yaksh/question.html index b65073a..ebfe066 100644 --- a/yaksh/templates/yaksh/question.html +++ b/yaksh/templates/yaksh/question.html @@ -11,6 +11,10 @@ .CodeMirror{ border-style: groove; } + .activeline { + background: #FBC2C4 !important; + color: #8a1f11 !important; + } {% endblock %} @@ -221,7 +225,7 @@ question_type = "{{ question.type }}" {% if question.type == "integer" %} Enter Integer:
- +

{% endif %} @@ -233,7 +237,7 @@ question_type = "{{ question.type }}" {% if question.type == "float" %} Enter Decimal Value :
- +

{% endif %} -- cgit From a3e540aa209be57318de6c3e2548c56e68fdeded Mon Sep 17 00:00:00 2001 From: adityacp Date: Fri, 6 Apr 2018 16:21:12 +0530 Subject: Change in grader.py and python_assertion_evaluator.py - Make pep8 change in grader.py - Add nose tools in the scope during check_code instead of compile_code --- yaksh/grader.py | 3 ++- yaksh/python_assertion_evaluator.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/yaksh/grader.py b/yaksh/grader.py index d81470b..9bc4e5c 100644 --- a/yaksh/grader.py +++ b/yaksh/grader.py @@ -156,7 +156,8 @@ class Grader(object): del tb_list[1:3] error.append( prettify_exceptions( - exc_type.__name__, str(exc_value), "".join(tb_list), line_no=line_no + exc_type.__name__, str(exc_value), "".join(tb_list), + line_no=line_no ) ) finally: diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 1f2b82b..4b016a1 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -39,7 +39,6 @@ class PythonAssertionEvaluator(BaseEvaluator): submitted = compile(self.user_answer, '', mode='exec') self.exec_scope = {} exec(submitted, self.exec_scope) - exec("from nose.tools import *", self.exec_scope) return self.exec_scope def check_code(self): @@ -67,6 +66,7 @@ class PythonAssertionEvaluator(BaseEvaluator): success = False mark_fraction = 0.0 try: + exec("from nose.tools import *", self.exec_scope) _tests = compile(self.test_case, '', mode='exec') exec(_tests, self.exec_scope) except TimeoutException: -- cgit From 575da538337da6afb23cc159870853b6457797d3 Mon Sep 17 00:00:00 2001 From: adityacp Date: Fri, 13 Apr 2018 12:17:29 +0530 Subject: Change in python test, grader and request handler - Add try except in grader to get exceptions for python stdio and python assertion evaluation - Add additional tests - Add condition in request handler to avoid error if codemirror is not instantiated --- yaksh/evaluator_tests/test_python_evaluation.py | 32 +++++++++++++++++++++++-- yaksh/grader.py | 5 +++- yaksh/static/yaksh/js/requesthandler.js | 26 ++++++++++---------- 3 files changed, 48 insertions(+), 15 deletions(-) diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 2748d8a..1933d17 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -317,12 +317,15 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest): # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) - err = result.get("error")[0]['traceback'] + errors = result.get("error") # Then self.assertFalse(result.get("success")) for msg in value_error_msg: - self.assert_correct_output(msg, err) + self.assert_correct_output(msg, errors[0]['traceback']) + for index, error in enumerate(errors): + self.assertEqual(error['test_case'], + self.test_case_data[index]['test_case']) def test_file_based_assert(self): # Given @@ -694,6 +697,31 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest): # Then self.assertTrue(result.get("success")) + def test_get_error_lineno(self): + user_answer = dedent("""\ + print(1/0) + """) + test_case_data = [{"test_case_type": "stdiobasedtestcase", + "expected_input": "", + "expected_output": "1", + "weight": 0.0 + }] + kwargs = {'metadata': { + 'user_answer': user_answer, + 'file_paths': self.file_paths, + 'partial_grading': False, + 'language': 'python'}, + 'test_case_data': test_case_data, + } + # When + grader = Grader(self.in_dir) + result = grader.evaluate(kwargs) + # Then + self.assertFalse(result.get("success")) + error = result.get("error")[0] + self.assertEqual(error['line_no'], 1) + self.assertEqual(error['exception'], "ZeroDivisionError") + class PythonHookEvaluationTestCases(EvaluatorBaseTest): diff --git a/yaksh/grader.py b/yaksh/grader.py index 9bc4e5c..2a246b8 100644 --- a/yaksh/grader.py +++ b/yaksh/grader.py @@ -151,7 +151,10 @@ class Grader(object): except Exception as e: exc_type, exc_value, exc_tb = sys.exc_info() tb_list = traceback.format_exception(exc_type, exc_value, exc_tb) - line_no = e.lineno + try: + line_no = e.lineno + except AttributeError: + line_no = traceback.extract_tb(exc_tb)[-1][1] if len(tb_list) > 2: del tb_list[1:3] error.append( diff --git a/yaksh/static/yaksh/js/requesthandler.js b/yaksh/static/yaksh/js/requesthandler.js index f858317..952de3a 100644 --- a/yaksh/static/yaksh/js/requesthandler.js +++ b/yaksh/static/yaksh/js/requesthandler.js @@ -75,21 +75,23 @@ function response_handler(method_type, content_type, data, uid){ var error_output = document.getElementById("error_panel"); error_output.innerHTML = res.error; focus_on_error(error_output); - err_lineno = $("#err_lineno").val(); - if(marker){ - marker.clear(); - } - if(err_lineno){ - var lineno = parseInt(err_lineno) - 1; - var editor = global_editor.editor; - var line_length = editor.getLine(lineno).length; - marker = editor.markText({line: lineno, ch: 0}, {line: lineno, ch: line_length}, - {className: "activeline", clearOnEnter:true}); - } - else{ + if(global_editor.editor){ + err_lineno = $("#err_lineno").val(); if(marker){ marker.clear(); } + if(err_lineno){ + var lineno = parseInt(err_lineno) - 1; + var editor = global_editor.editor; + var line_length = editor.getLine(lineno).length; + marker = editor.markText({line: lineno, ch: 0}, {line: lineno, ch: line_length}, + {className: "activeline", clearOnEnter:true}); + } + else{ + if(marker){ + marker.clear(); + } + } } } } else { -- cgit From 393a9d2a8ec116f6530512dfbe6e8769442667e3 Mon Sep 17 00:00:00 2001 From: adityacp Date: Mon, 7 May 2018 15:34:57 +0530 Subject: Specify version for nose package --- requirements/requirements-codeserver.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements-codeserver.txt b/requirements/requirements-codeserver.txt index 963a836..11bc0a2 100644 --- a/requirements/requirements-codeserver.txt +++ b/requirements/requirements-codeserver.txt @@ -4,4 +4,4 @@ six requests tornado==4.5.3 psutil -nose +nose==1.3.7 -- cgit