summaryrefslogtreecommitdiff
path: root/yaksh
diff options
context:
space:
mode:
authorPalaparthy Adityachandra2020-07-02 13:28:48 +0530
committerGitHub2020-07-02 13:28:48 +0530
commitb541b3aa05bf19ed6d4d28373e186883e3c4e966 (patch)
tree2d0b95b53a60ce6830fcefe115830e89e940845a /yaksh
parentc9e0d0df8dcb3a14017cef5ff01832a5345629db (diff)
parent46ef4a4a563b16bb96d09cb17496afb731e616ad (diff)
downloadonline_test-b541b3aa05bf19ed6d4d28373e186883e3c4e966.tar.gz
online_test-b541b3aa05bf19ed6d4d28373e186883e3c4e966.tar.bz2
online_test-b541b3aa05bf19ed6d4d28373e186883e3c4e966.zip
Merge pull request #719 from prathamesh920/hidden-testcases
Hide test cases
Diffstat (limited to 'yaksh')
-rw-r--r--yaksh/bash_code_evaluator.py1
-rw-r--r--yaksh/bash_stdio_evaluator.py1
-rw-r--r--yaksh/cpp_code_evaluator.py5
-rw-r--r--yaksh/cpp_stdio_evaluator.py1
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py31
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py52
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py19
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py44
-rw-r--r--yaksh/evaluator_tests/test_r_evaluation.py5
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py13
-rw-r--r--yaksh/grader.py3
-rw-r--r--yaksh/hook_evaluator.py1
-rw-r--r--yaksh/java_code_evaluator.py5
-rw-r--r--yaksh/java_stdio_evaluator.py1
-rw-r--r--yaksh/models.py7
-rw-r--r--yaksh/python_assertion_evaluator.py1
-rw-r--r--yaksh/python_stdio_evaluator.py1
-rw-r--r--yaksh/r_code_evaluator.py1
-rw-r--r--yaksh/scilab_code_evaluator.py1
-rw-r--r--yaksh/templates/yaksh/error_template.html8
-rw-r--r--yaksh/templates/yaksh/view_answerpaper.html9
-rw-r--r--yaksh/test_models.py8
22 files changed, 144 insertions, 74 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index 61cb9fa..f97d386 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -27,6 +27,7 @@ class BashCodeEvaluator(BaseEvaluator):
self.test_case_args = test_case_data.get('test_case_args')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
# Delete the created file.
diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py
index f445e09..8d14758 100644
--- a/yaksh/bash_stdio_evaluator.py
+++ b/yaksh/bash_stdio_evaluator.py
@@ -23,6 +23,7 @@ class BashStdIOEvaluator(StdIOEvaluator):
self.expected_input = test_case_data.get('expected_input')
self.expected_output = test_case_data.get('expected_output')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
os.remove(self.submit_code_path)
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index d249c66..0f19596 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -8,6 +8,7 @@ import subprocess
from .file_utils import copy_files, delete_files
from .base_evaluator import BaseEvaluator
from .grader import CompilationError, TestCaseError
+from .error_messages import prettify_exceptions
class CppCodeEvaluator(BaseEvaluator):
@@ -29,6 +30,7 @@ class CppCodeEvaluator(BaseEvaluator):
# Set test case data values
self.test_case = test_case_data.get('test_case')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
# Delete the created file.
@@ -142,7 +144,8 @@ class CppCodeEvaluator(BaseEvaluator):
mark_fraction = 1.0 if self.partial_grading else 0.0
else:
err = "{0} \n {1}".format(stdout, stderr)
- raise AssertionError(err)
+ err = prettify_exceptions('AssertionError', err)
+ return success, err, mark_fraction
else:
err = "Test case Error:"
try:
diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py
index 3f44cb2..a48f701 100644
--- a/yaksh/cpp_stdio_evaluator.py
+++ b/yaksh/cpp_stdio_evaluator.py
@@ -24,6 +24,7 @@ class CppStdIOEvaluator(StdIOEvaluator):
self.expected_input = test_case_data.get('expected_input')
self.expected_output = test_case_data.get('expected_output')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
if os.path.exists(self.submit_code_path):
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index f86bf24..031a9de 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -25,7 +25,7 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest):
{"test_case": self.tc_data,
"test_case_args": self.tc_data_args,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0, "hidden": False
}
]
self.in_dir = tempfile.mkdtemp()
@@ -66,7 +66,8 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest):
{"test_case": tc_data,
"test_case_args": "",
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0,
+ "hidden": True
}
]
kwargs = {'metadata': {
@@ -129,7 +130,9 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get("success"))
- self.assert_correct_output("Error", result.get("error"))
+ self.assertFalse(result.get("error")[0]["hidden"])
+ self.assert_correct_output("Error",
+ result.get("error")[0]["message"])
def test_infinite_loop(self):
# Given
@@ -170,7 +173,8 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest):
"test_case": self.tc_data,
"test_case_args": self.tc_data_args,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0,
+ "hidden": True
}]
user_answer = ("#!/bin/bash\ncat $1")
kwargs = {'metadata': {
@@ -240,7 +244,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest):
test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n',
'expected_input': '1,2,3\n4,5,6\n7,8,9',
'test_case_type': 'stdiobasedtestcase',
- 'weight': 0.0
+ 'weight': 0.0,
}]
kwargs = {
'metadata': {
@@ -270,7 +274,8 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest):
'expected_output': '11',
'expected_input': '5\n6',
'test_case_type': 'stdiobasedtestcase',
- 'weight': 0.0
+ 'weight': 0.0,
+ 'hidden': True
}]
kwargs = {
'metadata': {
@@ -288,6 +293,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
result_error = result.get('error')[0].get('error_msg')
self.assert_correct_output("Incorrect", result_error)
+ self.assertTrue(result.get('error')[0]['hidden'])
self.assertFalse(result.get('success'))
def test_stdout_only(self):
@@ -399,7 +405,8 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest):
return success, err, mark_fraction
""")
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code, "weight": 1.0}]
+ "hook_code": hook_code, "weight": 1.0,
+ "hidden": True}]
kwargs = {
'metadata': {
@@ -416,7 +423,9 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output('Incorrect Answer', result.get('error'))
+ self.assertTrue(result.get('error')[0]['hidden'])
+ self.assert_correct_output('Incorrect Answer',
+ result.get('error')[0]['message'])
def test_assert_with_hook(self):
# Given
@@ -446,7 +455,8 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest):
'weight': 1.0
},
{"test_case_type": "hooktestcase",
- "hook_code": hook_code, 'weight': 1.0},
+ "hook_code": hook_code, 'weight': 1.0,
+ 'hidden': True},
]
kwargs = {
'metadata': {
@@ -544,7 +554,8 @@ class BashHookEvaluationTestCases(EvaluatorBaseTest):
""")
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code, "weight": 1.0}]
+ "hook_code": hook_code, "weight": 1.0,
+ "hidden": False}]
kwargs = {
'metadata': {
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index 14ed808..b7c6018 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -54,7 +54,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest):
""")
self.test_case_data = [{"test_case": self.tc_data,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0, "hidden": False
}]
self.in_dir = tmp_in_dir_path
self.timeout_msg = ("Code took more than {0} seconds to run. "
@@ -184,9 +184,9 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest):
check(50, result);
}
""")
- self.test_case_data = [{"test_case": self.tc_data,
+ test_case_data = [{"test_case": self.tc_data,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0,
}]
user_answer = dedent("""
#include<stdio.h>
@@ -206,7 +206,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'cpp'
- }, 'test_case_data': self.test_case_data,
+ }, 'test_case_data': test_case_data,
}
# When
@@ -257,9 +257,9 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest):
{
return a+b;
}""")
- self.test_case_data = [{"test_case": self.tc_data,
+ test_case_data = [{"test_case": self.tc_data,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0,
}]
kwargs = {
'metadata': {
@@ -267,7 +267,7 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'cpp'
- }, 'test_case_data': self.test_case_data,
+ }, 'test_case_data': test_case_data,
}
# When
@@ -287,6 +287,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
'expected_input': '5\n6',
'weight': 0.0,
'test_case_type': 'stdiobasedtestcase',
+ 'hidden': True
}]
self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
@@ -324,7 +325,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_array_input(self):
# Given
- self.test_case_data = [{'expected_output': '561',
+ test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
'weight': 0.0,
'test_case_type': 'stdiobasedtestcase',
@@ -344,7 +345,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'cpp'
- }, 'test_case_data': self.test_case_data,
+ }, 'test_case_data': test_case_data,
}
# When
@@ -356,7 +357,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_string_input(self):
# Given
- self.test_case_data = [{'expected_output': 'abc',
+ test_case_data = [{'expected_output': 'abc',
'expected_input': 'abc',
'weight': 0.0,
'test_case_type': 'stdiobasedtestcase',
@@ -374,7 +375,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'cpp'
- }, 'test_case_data': self.test_case_data,
+ }, 'test_case_data': test_case_data,
}
# When
@@ -408,6 +409,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
lines_of_error = len(result.get('error')[0].get('error_line_numbers'))
result_error = result.get('error')[0].get('error_msg')
+ self.assertTrue(result.get('error')[0].get('hidden'))
self.assertFalse(result.get('success'))
self.assert_correct_output("Incorrect", result_error)
self.assertTrue(lines_of_error > 0)
@@ -472,7 +474,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_only_stdout(self):
# Given
- self.test_case_data = [{'expected_output': '11',
+ test_case_data = [{'expected_output': '11',
'weight': 0.0,
'test_case_type': 'stdiobasedtestcase',
}]
@@ -488,7 +490,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'cpp'
- }, 'test_case_data': self.test_case_data,
+ }, 'test_case_data': test_case_data,
}
# When
@@ -522,11 +524,12 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
# Then
+ self.assertFalse(result.get('hidden'))
self.assertTrue(result.get('success'))
def test_cpp_array_input(self):
# Given
- self.test_case_data = [{'expected_output': '561',
+ test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
'weight': 0.0,
'test_case_type': 'stdiobasedtestcase',
@@ -547,7 +550,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'cpp'
- }, 'test_case_data': self.test_case_data,
+ }, 'test_case_data': test_case_data,
}
# When
@@ -559,7 +562,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_cpp_string_input(self):
# Given
- self.test_case_data = [{'expected_output': 'abc',
+ test_case_data = [{'expected_output': 'abc',
'expected_input': 'abc',
'weight': 0.0,
'test_case_type': 'stdiobasedtestcase',
@@ -578,7 +581,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'cpp'
- }, 'test_case_data': self.test_case_data,
+ }, 'test_case_data': test_case_data,
}
# When
@@ -613,6 +616,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
lines_of_error = len(result.get('error')[0].get('error_line_numbers'))
result_error = result.get('error')[0].get('error_msg')
+ self.assertTrue(result.get('error')[0].get('hidden'))
self.assertFalse(result.get('success'))
self.assert_correct_output("Incorrect", result_error)
self.assertTrue(lines_of_error > 0)
@@ -675,7 +679,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_cpp_only_stdout(self):
# Given
- self.test_case_data = [{'expected_output': '11',
+ test_case_data = [{'expected_output': '11',
'expected_input': '',
'weight': 0.0,
'test_case_type': 'stdiobasedtestcase',
@@ -693,7 +697,7 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'cpp'
- }, 'test_case_data': self.test_case_data,
+ }, 'test_case_data': test_case_data,
}
# When
@@ -806,7 +810,8 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest):
""")
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code, "weight": 1.0}]
+ "hook_code": hook_code, "weight": 1.0,
+ "hidden": True}]
kwargs = {
'metadata': {
'user_answer': user_answer,
@@ -821,8 +826,10 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
# Then
+ self.assertTrue(result.get('error')[0]['hidden'])
self.assertFalse(result.get('success'))
- self.assert_correct_output('Incorrect Answer', result.get('error'))
+ self.assert_correct_output('Incorrect Answer',
+ result.get('error')[0]['message'])
def test_assert_with_hook(self):
# Given
@@ -999,7 +1006,8 @@ class CppHookEvaluationTestCases(EvaluatorBaseTest):
""")
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code, "weight": 1.0}]
+ "hook_code": hook_code, "weight": 1.0,
+ "hidden": False}]
kwargs = {
'metadata': {
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index eb09f2f..e762852 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -62,7 +62,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
self.test_case_data = [
{"test_case": self.tc_data,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0, "hidden": False
}
]
self.in_dir = tmp_in_dir_path
@@ -119,6 +119,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
self.assertFalse(result.get('success'))
for error in errors:
self.assertEqual(error.get('exception'), 'AssertionError')
+ self.assertFalse(error.get('hidden'))
def test_error(self):
# Given
@@ -140,6 +141,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
self.assertFalse(result.get("success"))
for error in errors:
self.assertEqual(error.get('exception'), 'CompilationError')
+ self.assertFalse(result.get('hidden'))
def test_infinite_loop(self):
# Given
@@ -276,7 +278,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
"{\n\treturn a;\n\t}\n}")
self.test_case_data = [{"test_case": self.tc_data,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0, "hidden": True
}]
kwargs = {
'metadata': {
@@ -408,6 +410,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
lines_of_error = len(result.get('error')[0].get('error_line_numbers'))
result_error = result.get('error')[0].get('error_msg')
+ self.assertFalse(result.get('error')[0].get('hidden'))
self.assertFalse(result.get('success'))
self.assert_correct_output("Incorrect", result_error)
self.assertTrue(lines_of_error > 0)
@@ -437,6 +440,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
self.assertFalse(result.get("success"))
for error in errors:
self.assertEqual(error.get('exception'), 'CompilationError')
+ self.assertFalse(error.get('hidden'))
def test_infinite_loop(self):
# Given
@@ -474,7 +478,7 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
# Given
self.test_case_data = [{'expected_output': '11',
'test_case_type': 'stdiobasedtestcase',
- 'weight': 0.0
+ 'weight': 0.0, 'hidden': False
}]
user_answer = dedent("""
class Test
@@ -677,7 +681,8 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest):
""")
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code, "weight": 1.0
+ "hook_code": hook_code, "weight": 1.0,
+ "hidden": True
}]
kwargs = {
'metadata': {
@@ -693,8 +698,10 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
# Then
+ self.assertTrue(result.get('error')[0]['hidden'])
self.assertFalse(result.get('success'))
- self.assert_correct_output('Incorrect Answer', result.get('error'))
+ self.assert_correct_output('Incorrect Answer',
+ result.get('error')[0]['message'])
def test_assert_with_hook(self):
# Given
@@ -875,7 +882,7 @@ class JavaHookEvaluationTestCases(EvaluatorBaseTest):
""")
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code, "weight": 1.0
+ "hook_code": hook_code, "weight": 1.0,
}]
kwargs = {
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index de973cf..343c8fb 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -25,13 +25,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
self.in_dir = tmp_in_dir_path
self.test_case_data = [{"test_case_type": "standardtestcase",
"test_case": 'assert(add(1,2)==3)',
- 'weight': 0.0},
+ 'weight': 0.0, 'hidden': True},
{"test_case_type": "standardtestcase",
"test_case": 'assert(add(-1,2)==1)',
- 'weight': 0.0},
+ 'weight': 0.0, 'hidden': True},
{"test_case_type": "standardtestcase",
"test_case": 'assert(add(-1,-2)==-3)',
- 'weight': 0.0},
+ 'weight': 0.0, 'hidden': True},
]
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
@@ -80,6 +80,7 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
given_test_case_list = [tc["test_case"] for tc in self.test_case_data]
for error in result.get("error"):
self.assertEqual(error['exception'], 'AssertionError')
+ self.assertTrue(error['hidden'])
self.assertEqual(
error['message'],
"Expected answer from the test case did not match the output"
@@ -92,13 +93,13 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
user_answer = "def add(a,b):\n\treturn abs(a) + abs(b)"
test_case_data = [{"test_case_type": "standardtestcase",
"test_case": 'assert(add(-1,2)==1)',
- 'weight': 1.0},
+ 'weight': 1.0, 'hidden': False},
{"test_case_type": "standardtestcase",
"test_case": 'assert(add(-1,-2)==-3)',
- 'weight': 1.0},
+ 'weight': 1.0, 'hidden': False},
{"test_case_type": "standardtestcase",
"test_case": 'assert(add(1,2)==3)',
- 'weight': 2.0}
+ 'weight': 2.0, 'hidden': False}
]
kwargs = {'metadata': {
'user_answer': user_answer,
@@ -119,6 +120,7 @@ class PythonAssertionEvaluationTestCases(EvaluatorBaseTest):
given_test_case_list.remove('assert(add(1,2)==3)')
for error in result.get("error"):
self.assertEqual(error['exception'], 'AssertionError')
+ self.assertFalse(error['hidden'])
self.assertEqual(
error['message'],
"Expected answer from the test case did not match the output"
@@ -489,7 +491,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_correct_answer_integer(self):
# Given
- self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ test_case_data = [{"test_case_type": "stdiobasedtestcase",
"expected_input": "1\n2",
"expected_output": "3",
"weight": 0.0
@@ -505,7 +507,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'python'},
- 'test_case_data': self.test_case_data
+ 'test_case_data': test_case_data
}
# When
@@ -517,7 +519,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_correct_answer_list(self):
# Given
- self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ test_case_data = [{"test_case_type": "stdiobasedtestcase",
"expected_input": "1,2,3\n5,6,7",
"expected_output": "[1, 2, 3, 5, 6, 7]",
"weight": 0.0
@@ -536,7 +538,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'python'},
- 'test_case_data': self.test_case_data
+ 'test_case_data': test_case_data
}
# When
@@ -548,7 +550,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_correct_answer_string(self):
# Given
- self.test_case_data = [{
+ test_case_data = [{
"test_case_type": "stdiobasedtestcase",
"expected_input": ("the quick brown fox jumps over "
"the lazy dog\nthe"),
@@ -567,7 +569,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'python'},
- 'test_case_data': self.test_case_data
+ 'test_case_data': test_case_data
}
# When
@@ -579,10 +581,10 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_incorrect_answer_integer(self):
# Given
- self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ test_case_data = [{"test_case_type": "stdiobasedtestcase",
"expected_input": "1\n2",
"expected_output": "3",
- "weight": 0.0
+ "weight": 0.0, 'hidden': True
}]
user_answer = dedent("""
a = int(input())
@@ -595,7 +597,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'python'},
- 'test_case_data': self.test_case_data
+ 'test_case_data': test_case_data
}
# When
@@ -604,6 +606,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
+ self.assertTrue(result.get('error')[0].get('hidden'))
self.assert_correct_output(
"Incorrect Answer: Line number(s) 1 did not match.",
result.get('error')[0].get('error_msg')
@@ -611,7 +614,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
def test_file_based_answer(self):
# Given
- self.test_case_data = [{"test_case_type": "stdiobasedtestcase",
+ test_case_data = [{"test_case_type": "stdiobasedtestcase",
"expected_input": "",
"expected_output": "2",
"weight": 0.0
@@ -629,7 +632,7 @@ class PythonStdIOEvaluationTestCases(EvaluatorBaseTest):
'file_paths': self.file_paths,
'partial_grading': False,
'language': 'python'},
- 'test_case_data': self.test_case_data
+ 'test_case_data': test_case_data
}
# When
@@ -786,7 +789,8 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
)
test_case_data = [{"test_case_type": "hooktestcase",
- "hook_code": hook_code, "weight": 1.0
+ "hook_code": hook_code, "weight": 1.0,
+ "hidden": True
}]
kwargs = {'metadata': {
@@ -803,7 +807,9 @@ class PythonHookEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertFalse(result.get('success'))
- self.assert_correct_output('Incorrect Answer', result.get('error'))
+ self.assertTrue(result.get('error')[0]['hidden'])
+ self.assert_correct_output('Incorrect Answer',
+ result.get('error')[0]['message'])
def test_assert_with_hook(self):
# Given
diff --git a/yaksh/evaluator_tests/test_r_evaluation.py b/yaksh/evaluator_tests/test_r_evaluation.py
index b4b81ae..a196d91 100644
--- a/yaksh/evaluator_tests/test_r_evaluation.py
+++ b/yaksh/evaluator_tests/test_r_evaluation.py
@@ -44,7 +44,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest):
)
self.test_case_data = [{"test_case": self.test_case,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0, "hidden": True
}]
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
@@ -108,6 +108,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest):
result = grader.evaluate(kwargs)
errors = result.get('error')
# Then
+ self.assertTrue(result.get("error")[0]['hidden'])
self.assertFalse(result.get('success'))
self.assertEqual(errors[0]['message'], err)
@@ -134,6 +135,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest):
errors = result.get('error')
# Then
+ self.assertTrue(result.get("error")[0]['hidden'])
self.assertFalse(result.get("success"))
self.assertIn("object 'a' not found", errors[0]['message'])
@@ -159,6 +161,7 @@ class RAssertionEvaluationTestCase(EvaluatorBaseTest):
errors = result.get('error')
# Then
+ self.assertTrue(result.get("error")[0]['hidden'])
self.assertFalse(result.get("success"))
err = errors[0]['message']
self.assertIn("is.null(obj) == FALSE is not TRUE", err)
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index d3f1dc8..b08d348 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -48,7 +48,7 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest):
""")
self.test_case_data = [{"test_case": self.tc_data,
"test_case_type": "standardtestcase",
- "weight": 0.0
+ "weight": 0.0, 'hidden': True
}]
self.in_dir = tmp_in_dir_path
self.file_paths = None
@@ -92,9 +92,9 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
-
self.assertFalse(result.get("success"))
- self.assert_correct_output('error', result.get("error"))
+ self.assertTrue(result.get("error")[0]['hidden'])
+ self.assert_correct_output('error', result.get("error")[0]['message'])
def test_incorrect_answer(self):
user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
@@ -110,10 +110,11 @@ class ScilabEvaluationTestCases(EvaluatorBaseTest):
grader = Grader(self.in_dir)
result = grader.evaluate(kwargs)
-
- lines_of_error = len(result.get('error')[0].splitlines())
+ lines_of_error = len(result.get('error')[0]['message'].splitlines())
self.assertFalse(result.get('success'))
- self.assert_correct_output("Message", result.get('error'))
+ self.assertTrue(result.get("error")[0]['hidden'])
+ self.assert_correct_output("Message",
+ result.get('error')[0]["message"])
self.assertTrue(lines_of_error > 1)
def test_infinite_loop(self):
diff --git a/yaksh/grader.py b/yaksh/grader.py
index c1be493..c0d81fa 100644
--- a/yaksh/grader.py
+++ b/yaksh/grader.py
@@ -143,6 +143,9 @@ class Grader(object):
test_case_instance.compile_code()
eval_result = test_case_instance.check_code()
test_case_success, err, mark_fraction = eval_result
+ if not isinstance(err, dict):
+ err = prettify_exceptions('Error', err)
+ err['hidden'] = test_case_instance.hidden
if test_case_success:
weight += mark_fraction * test_case_instance.weight
else:
diff --git a/yaksh/hook_evaluator.py b/yaksh/hook_evaluator.py
index 33c1549..ff428c3 100644
--- a/yaksh/hook_evaluator.py
+++ b/yaksh/hook_evaluator.py
@@ -25,6 +25,7 @@ class HookEvaluator(BaseEvaluator):
# Set test case data values
self.hook_code = test_case_data.get('hook_code')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
# Delete the created file.
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index 35573c0..9689c25 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -8,6 +8,7 @@ import subprocess
from .base_evaluator import BaseEvaluator
from .file_utils import copy_files, delete_files
from .grader import CompilationError, TestCaseError
+from .error_messages import prettify_exceptions
class JavaCodeEvaluator(BaseEvaluator):
@@ -27,6 +28,7 @@ class JavaCodeEvaluator(BaseEvaluator):
# Set test case data values
self.test_case = test_case_data.get('test_case')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
# Delete the created file.
@@ -150,7 +152,8 @@ class JavaCodeEvaluator(BaseEvaluator):
mark_fraction = 1.0 if self.partial_grading else 0.0
else:
err = stdout + "\n" + stderr
- raise AssertionError(err)
+ err = prettify_exceptions('AssertionError', err)
+ return success, err, mark_fraction
else:
err = "Test case Error:"
try:
diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py
index 0d7e480..375676f 100644
--- a/yaksh/java_stdio_evaluator.py
+++ b/yaksh/java_stdio_evaluator.py
@@ -24,6 +24,7 @@ class JavaStdIOEvaluator(StdIOEvaluator):
self.expected_input = test_case_data.get('expected_input')
self.expected_output = test_case_data.get('expected_output')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
if os.path.exists(self.submit_code_path):
diff --git a/yaksh/models.py b/yaksh/models.py
index 7d4dd98..1b076a8 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -2534,11 +2534,13 @@ class StandardTestCase(TestCase):
test_case = models.TextField()
weight = models.FloatField(default=1.0)
test_case_args = models.TextField(blank=True)
+ hidden = models.BooleanField(default=False)
def get_field_value(self):
return {"test_case_type": "standardtestcase",
"test_case": self.test_case,
"weight": self.weight,
+ "hidden": self.hidden,
"test_case_args": self.test_case_args}
def __str__(self):
@@ -2549,11 +2551,13 @@ class StdIOBasedTestCase(TestCase):
expected_input = models.TextField(default=None, blank=True, null=True)
expected_output = models.TextField(default=None)
weight = models.IntegerField(default=1.0)
+ hidden = models.BooleanField(default=False)
def get_field_value(self):
return {"test_case_type": "stdiobasedtestcase",
"expected_output": self.expected_output,
"expected_input": self.expected_input,
+ "hidden": self.hidden,
"weight": self.weight}
def __str__(self):
@@ -2599,10 +2603,11 @@ class HookTestCase(TestCase):
)
weight = models.FloatField(default=1.0)
+ hidden = models.BooleanField(default=False)
def get_field_value(self):
return {"test_case_type": "hooktestcase", "hook_code": self.hook_code,
- "weight": self.weight}
+ "hidden": self.hidden, "weight": self.weight}
def __str__(self):
return u'Hook Testcase | Correct: {0}'.format(self.hook_code)
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index 4b016a1..368206a 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -24,6 +24,7 @@ class PythonAssertionEvaluator(BaseEvaluator):
# Set test case data values
self.test_case = test_case_data.get('test_case')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
# Delete the created file.
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index a1e8f72..272bf34 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -36,6 +36,7 @@ class PythonStdIOEvaluator(BaseEvaluator):
self.expected_input = test_case_data.get('expected_input')
self.expected_output = test_case_data.get('expected_output')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
# Delete the created file.
diff --git a/yaksh/r_code_evaluator.py b/yaksh/r_code_evaluator.py
index 8eaeb38..8aeb7ec 100644
--- a/yaksh/r_code_evaluator.py
+++ b/yaksh/r_code_evaluator.py
@@ -24,6 +24,7 @@ class RCodeEvaluator(BaseEvaluator):
# Set test case data values
self.test_case = test_case_data.get('test_case')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
# Delete the created file.
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index 9f26234..a3df443 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -23,6 +23,7 @@ class ScilabCodeEvaluator(BaseEvaluator):
# Set test case data values
self.test_case = test_case_data.get('test_case')
self.weight = test_case_data.get('weight')
+ self.hidden = test_case_data.get('hidden')
def teardown(self):
# Delete the created file.
diff --git a/yaksh/templates/yaksh/error_template.html b/yaksh/templates/yaksh/error_template.html
index 9cfeac1..b93d2f1 100644
--- a/yaksh/templates/yaksh/error_template.html
+++ b/yaksh/templates/yaksh/error_template.html
@@ -13,6 +13,9 @@
<div class="card-header alert-danger">Error No. {{ forloop.counter }}</div>
<div class="card-body ">
<div class="">
+ {% if error.hidden %}
+ <strong> Hidden test case failed </strong>
+ {% else %}
{% if not error.type %}
<pre><code> {{error|safe}} </code></pre>
@@ -87,10 +90,11 @@
</tr>
</table>
{% endif %}
- </div>
+ {% endif %}
+ </div>
</div>
</div>
<br>
</div>
{% endfor %}
-{% endif %} \ No newline at end of file
+{% endif %}
diff --git a/yaksh/templates/yaksh/view_answerpaper.html b/yaksh/templates/yaksh/view_answerpaper.html
index d955192..905a111 100644
--- a/yaksh/templates/yaksh/view_answerpaper.html
+++ b/yaksh/templates/yaksh/view_answerpaper.html
@@ -159,12 +159,15 @@
<span class="badge badge-pill badge-primary">Test cases:
</span>
</h5>
+ <ul>
{% for testcase in question.get_test_cases %}
+ {% if not testcase.hidden %}
<strong>
- {{ forloop.counter }}. {{ testcase }}
+ <li> {{ testcase }} </li>
</strong>
- <br>
+ {% endif %}
{% endfor %}
+ </ul>
{% endif %}
<br>
<div class="card">
@@ -230,6 +233,7 @@
<div class="card-body">
{% with ans.error_list as err %}
{% for error in err %}
+ {% if not error.hidden %}
{% if error.type == 'stdio' %}
{% if error.given_input %}
<table class="table table-bordered table-responsive-sm">
@@ -293,6 +297,7 @@
{% else %}
<pre><code> {{error}} </code></pre>
{% endif %}
+ {% endif %}
{% endfor %}
{% endwith %}
{% if question.type == "code" %}
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index 4e6b1ae..37baf6e 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -431,7 +431,8 @@ class QuestionTestCases(unittest.TestCase):
self.test_case_upload_data = [{"test_case": "assert fact(3)==6",
"test_case_type": "standardtestcase",
"test_case_args": "",
- "weight": 1.0
+ "weight": 1.0,
+ "hidden": False
}]
questions_data = [{"snippet": "def fact()", "active": True,
"points": 1.0,
@@ -2029,7 +2030,8 @@ class TestCaseTestCases(unittest.TestCase):
{'test_case': 'assert myfunc(12, 13) == 15',
'test_case_type': 'standardtestcase',
'test_case_args': "",
- 'weight': 1.0
+ 'weight': 1.0,
+ 'hidden': False
}]
}
self.answer_data_json = json.dumps(answer_data)
@@ -2363,4 +2365,4 @@ class CommentModelTestCases(unittest.TestCase):
self.user1.delete()
self.course.delete()
self.post1.delete()
- self.comment1.delete() \ No newline at end of file
+ self.comment1.delete()