summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--yaksh/cpp_code_evaluator.py3
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py65
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py57
-rw-r--r--yaksh/java_code_evaluator.py2
-rw-r--r--yaksh/models.py2
-rw-r--r--yaksh/test_models.py34
6 files changed, 159 insertions, 4 deletions
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index 7ab06ec..8bd3beb 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -129,7 +129,6 @@ class CppCodeEvaluator(BaseEvaluator):
if stdnt_stderr == '':
proc, main_out, main_err = self.compiled_test_code
main_err = self._remove_null_substitute_char(main_err)
-
if main_err == '':
ret = self._run_command([self.ref_output_path],
stdin=None,
@@ -143,7 +142,7 @@ class CppCodeEvaluator(BaseEvaluator):
else:
err = "{0} \n {1}".format(stdout, stderr)
else:
- err = "Error:"
+ err = "Test case Error:"
try:
error_lines = main_err.splitlines()
for e in error_lines:
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index 877f708..5b49671 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -215,6 +215,71 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertTrue(result.get('success'))
+ def test_incorrect_testcase(self):
+ # Given
+ self.tc_data = dedent("""
+ #include <stdio.h>
+ #include <stdlib.h>
+
+ extern int add(int, int);
+
+ template <class T>
+
+ void check(T expect, T result)
+ {
+ if (expect == result)
+ {
+ printf("Correct: Expected %d got %d ",expect,result);
+ }
+ else
+ {
+ printf("Incorrect: Expected %d got %d ",expect,result);
+ exit (1);
+ }
+ }
+
+ int main(void)
+ {
+ int result;
+ result = add(0,0);
+ printf("Input submitted to the function: 0, 0");
+ check(0, result);
+ result = add(2,3);
+ printf("Input submitted to the function: 2 3");
+ check(5,result)
+ printf("All Correct");
+ return 0;
+ }
+ """)
+ user_answer = dedent("""\
+ int add(int a, int b)
+ {
+ return a+b;
+ }""")
+ self.test_case_data = [{"test_case": self.tc_data,
+ "test_case_type": "standardtestcase",
+ "weight": 0.0
+ }]
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'cpp'
+ }, 'test_case_data': self.test_case_data,
+ }
+
+ # When
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
+ # Then
+ err = result.get('error')[0]
+ lines_of_error = len(err.splitlines())
+ self.assertFalse(result.get('success'))
+ self.assertTrue(lines_of_error > 1)
+ self.assertIn("Test case Error", err)
+
class CppStdIOEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index 909d7ca..ab86dec 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -241,6 +241,63 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest):
# Then
self.assertTrue(result.get("success"))
+ def test_incorrect_testcase(self):
+ # Given
+ self.tc_data = dedent("""
+ class main
+ {
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:Output expected "+expect+
+ "and got "+result);
+ }
+ else
+ {
+ System.out.println("Incorrect:Output expected "+expect+
+ "but got "+result);
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ Test t = new Test();
+ int result, input, output;
+ input = 0; output = 0;
+ result = t.square_num(input);
+ System.out.println("Input submitted to the function: "+
+ input);
+ check(output, result)
+ }
+ }
+ """)
+ user_answer = ("class Test {\n\tint square_num(int a) "
+ "{\n\treturn a;\n\t}\n}")
+ self.test_case_data = [{"test_case": self.tc_data,
+ "test_case_type": "standardtestcase",
+ "weight": 0.0
+ }]
+ kwargs = {
+ 'metadata': {
+ 'user_answer': user_answer,
+ 'file_paths': self.file_paths,
+ 'partial_grading': False,
+ 'language': 'java'
+ }, 'test_case_data': self.test_case_data,
+ }
+
+ # When
+ grader = Grader(self.in_dir)
+ result = grader.evaluate(kwargs)
+
+ # Then
+ err = result.get('error')[0]
+ lines_of_error = len(err.splitlines())
+ self.assertFalse(result.get('success'))
+ self.assertTrue(lines_of_error > 1)
+ self.assertIn("Test case Error", err)
+
class JavaStdIOEvaluationTestCases(EvaluatorBaseTest):
def setUp(self):
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index 2612509..5f2288d 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -150,7 +150,7 @@ class JavaCodeEvaluator(BaseEvaluator):
else:
err = stdout + "\n" + stderr
else:
- err = "Error:"
+ err = "Test case Error:"
try:
error_lines = main_err.splitlines()
for e in error_lines:
diff --git a/yaksh/models.py b/yaksh/models.py
index 43f39e9..5d17dba 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1036,7 +1036,7 @@ class Question(models.Model):
new_test_case.save()
except Exception:
- msg = "File not correct."
+ msg = "Unable to parse test case data"
except Exception as exc_msg:
msg = "Error Parsing Yaml: {0}".format(exc_msg)
return msg
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index 132537d..210456f 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -356,6 +356,23 @@ class QuestionTestCases(unittest.TestCase):
self.yaml_questions_data_with_missing_fields = yaml.safe_dump_all(
questions_data_with_missing_fields
)
+ self.bad_yaml_question_data = '''[{
+ "active": True, "points": 1.0, "description" "factorial of a no",
+ "language": "Python", "type": "Code",
+ "testcase": self.test_case_upload_data,
+ "summary": "bad yaml"
+ }]'''
+
+ self.test_case_without_type = [{"test_case": "assert fact(3)==6",
+ "test_case_args": "",
+ "weight": 1.0
+ }]
+ self.yaml_question_data_without_test_case_type = yaml.safe_dump_all([{
+ "active": True, "points": 1.0, "description": "factorial of a no",
+ "language": "Python", "type": "Code",
+ "testcase": self.test_case_without_type,
+ "summary": "bad yaml"
+ }])
def tearDown(self):
shutil.rmtree(self.load_tmp_path)
@@ -460,6 +477,23 @@ class QuestionTestCases(unittest.TestCase):
tags = question_data.tags.all().values_list("name", flat=True)
self.assertListEqual(list(tags), [])
+ def test_load_questions_with_bad_yaml(self):
+ """
+ Test if yaml file is parsed correctly
+ """
+ question = Question()
+ msg = question.load_questions(
+ self.bad_yaml_question_data,
+ self.user1
+ )
+ self.assertIn("Error Parsing Yaml", msg)
+
+ msg = question.load_questions(
+ self.yaml_question_data_without_test_case_type,
+ self.user1
+ )
+ self.assertEqual(msg, "Unable to parse test case data")
+
###############################################################################
class QuizTestCases(unittest.TestCase):