diff options
author | prathamesh | 2020-04-02 16:33:20 +0530 |
---|---|---|
committer | prathamesh | 2020-04-02 16:53:00 +0530 |
commit | 2e360f7d5772c3059a42db8915530cde952a01c6 (patch) | |
tree | f253fc25a4f449dda1fa7bcfccb816ecf119cb29 | |
parent | 61abccdcb0b0a44f4db60e3bb4d09d1ec5dc50e3 (diff) | |
download | online_test-2e360f7d5772c3059a42db8915530cde952a01c6.tar.gz online_test-2e360f7d5772c3059a42db8915530cde952a01c6.tar.bz2 online_test-2e360f7d5772c3059a42db8915530cde952a01c6.zip |
Show prettified R error messages and fix filter
R language available in filter form. Values taken from models.
R messages are prettified like python assertion messages.
Text up to first colon in the R error message removed, as
it is simply a filename or unwanted text.
-rw-r--r-- | yaksh/evaluator_tests/test_r_evaluation.py | 9 | ||||
-rw-r--r-- | yaksh/forms.py | 24 | ||||
-rw-r--r-- | yaksh/r_code_evaluator.py | 14 |
3 files changed, 16 insertions, 31 deletions
diff --git a/yaksh/evaluator_tests/test_r_evaluation.py b/yaksh/evaluator_tests/test_r_evaluation.py index b161dc9..b4b81ae 100644 --- a/yaksh/evaluator_tests/test_r_evaluation.py +++ b/yaksh/evaluator_tests/test_r_evaluation.py @@ -94,7 +94,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest): } ''' ) - err = ['Error: input == output is not TRUE\nExecution halted\n'] + err = 'input == output is not TRUE\nExecution halted\n' kwargs = {'metadata': { 'user_answer': user_answer, 'file_paths': self.file_paths, @@ -109,7 +109,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest): errors = result.get('error') # Then self.assertFalse(result.get('success')) - self.assertEqual(errors, err) + self.assertEqual(errors[0]['message'], err) def test_error_code(self): # Given @@ -135,7 +135,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest): # Then self.assertFalse(result.get("success")) - self.assertIn("object 'a' not found", errors[0]) + self.assertIn("object 'a' not found", errors[0]['message']) def test_empty_function(self): # Given @@ -160,7 +160,8 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest): # Then self.assertFalse(result.get("success")) - self.assertIn("Error: is.null(obj) == FALSE is not TRUE", errors[0]) + err = errors[0]['message'] + self.assertIn("is.null(obj) == FALSE is not TRUE", err) def test_infinite_loop(self): # Given diff --git a/yaksh/forms.py b/yaksh/forms.py index c0f40ea..52ef75d 100644 --- a/yaksh/forms.py +++ b/yaksh/forms.py @@ -1,7 +1,7 @@ from django import forms from yaksh.models import ( get_model_class, Profile, Quiz, Question, Course, QuestionPaper, Lesson, - LearningModule, TestCase + LearningModule, TestCase, languages, question_types ) from grades.models import GradingSystem from django.contrib.auth import authenticate @@ -17,27 +17,9 @@ from string import punctuation, digits import pytz from .send_emails import generate_activation_key -languages = ( - ("select", "Select Language"), - ("python", "Python"), - ("bash", "Bash"), - ("c", "C Language"), - ("cpp", "C++ Language"), - ("java", "Java Language"), - ("scilab", "Scilab"), -) +languages = (("select", "Select Language"),) + languages -question_types = ( - ("select", "Select Question Type"), - ("mcq", "Multiple Choice"), - ("mcc", "Multiple Correct Choices"), - ("code", "Code"), - ("upload", "Assignment Upload"), - ("integer", "Answer in Integer"), - ("string", "Answer in String"), - ("float", "Answer in Float"), - ("arrange", "Arrange in Correct Order"), -) +question_types = (("select", "Select Question Type"),) + question_types test_case_types = ( ("standardtestcase", "Standard Testcase"), diff --git a/yaksh/r_code_evaluator.py b/yaksh/r_code_evaluator.py index 11bc970..8eaeb38 100644 --- a/yaksh/r_code_evaluator.py +++ b/yaksh/r_code_evaluator.py @@ -7,6 +7,7 @@ import re # Local imports from .base_evaluator import BaseEvaluator from .file_utils import copy_files, delete_files +from .error_messages import prettify_exceptions class RCodeEvaluator(BaseEvaluator): @@ -49,9 +50,8 @@ class RCodeEvaluator(BaseEvaluator): # Throw message if there are commmands that terminates scilab add_err = "" if terminate_commands: - add_err = "Please do not use quit() q() in your\ - code.\n Otherwise your code will not be evaluated\ - correctly.\n" + add_err = "Please do not use quit() q() in your code.\ + \n Otherwise your code will not be evaluated.\n" cmd = 'Rscript main.r' ret = self._run_command(cmd, shell=True, stdout=subprocess.PIPE, @@ -66,10 +66,12 @@ class RCodeEvaluator(BaseEvaluator): success, err = True, None mark_fraction = 1.0 if self.partial_grading else 0.0 else: - err = add_err + stdout + err = stdout + add_err else: - err = add_err + stderr - + err = stderr + add_err + if err: + err = re.sub(r'.*?: ', '', err, count=1) + err = prettify_exceptions('Error', err) return success, err, mark_fraction def _remove_r_quit(self, string): |