summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--yaksh/compare_stdio.py43
-rw-r--r--yaksh/python_stdio_evaluator.py53
-rw-r--r--yaksh/stdio_evaluator.py20
3 files changed, 55 insertions, 61 deletions
diff --git a/yaksh/compare_stdio.py b/yaksh/compare_stdio.py
new file mode 100644
index 0000000..ba258c3
--- /dev/null
+++ b/yaksh/compare_stdio.py
@@ -0,0 +1,43 @@
+try:
+ from itertools import zip_longest
+except ImportError:
+ from itertools import izip_longest as zip_longest
+
+class CompareOutputs(object):
+
+ def _incorrect_user_lines(self, exp_lines, user_lines):
+ err_line_no = []
+ for i, (expected_line, user_line) in enumerate(zip_longest(exp_lines, user_lines)):
+ if not user_line or not expected_line:
+ err_line_no.append(i)
+ else:
+ if user_line.strip() != expected_line.strip():
+ err_line_no.append(i)
+ return err_line_no
+
+ def compare_outputs(self, expected_output, user_output,given_input=None):
+ given_lines = user_output.splitlines()
+ exp_lines = expected_output.splitlines()
+ # if given_input:
+ # given_input = given_input.splitlines()
+ msg = {"given_input":given_input,
+ "expected_output": exp_lines,
+ "user_output":given_lines
+ }
+ ng = len(given_lines)
+ ne = len(exp_lines)
+ if ng != ne:
+ err_line_no = self._incorrect_user_lines(exp_lines, given_lines)
+ msg["error_no"] = err_line_no
+ msg["error"] = "We had expected {0} number of lines. We got {1} number of lines.".format(ne, ng)
+ return False, msg
+ else:
+ err_line_no = self._incorrect_user_lines(exp_lines, given_lines)
+ if err_line_no:
+ msg["error_no"] = err_line_no
+ msg["error"] = "Line number(s) {0} did not match."\
+ .format(", ".join(map(str,[x+1 for x in err_line_no])))
+ return False, msg
+ else:
+ msg["error"] = "Correct answer"
+ return True, msg
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index ec5ed71..d6201f9 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -6,14 +6,10 @@ try:
except ImportError:
from io import StringIO
-try:
- from itertools import zip_longest
-except ImportError:
- from itertools import izip_longest as zip_longest
-
# Local imports
from .file_utils import copy_files, delete_files
from .base_evaluator import BaseEvaluator
+from .compare_stdio import CompareOutputs
@contextmanager
@@ -25,44 +21,6 @@ def redirect_stdout():
finally:
sys.stdout = old_target # restore to the previous value
-def _incorrect_user_lines(exp_lines, user_lines):
- err_line_no = []
- for i, (expected_line, user_line) in enumerate(zip_longest(exp_lines, user_lines)):
- if not user_line or not expected_line:
- err_line_no.append(i)
- else:
- if user_line.strip() != expected_line.strip():
- err_line_no.append(i)
- return err_line_no
-
-def compare_outputs(expected_output, user_output,given_input=None):
- given_lines = user_output.splitlines()
- exp_lines = expected_output.splitlines()
- # if given_input:
- # given_input = given_input.splitlines()
- msg = {"given_input":given_input,
- "expected_output": exp_lines,
- "user_output":given_lines
- }
- ng = len(given_lines)
- ne = len(exp_lines)
- if ng != ne:
- err_line_no = _incorrect_user_lines(exp_lines, given_lines)
- msg["error_no"] = err_line_no
- msg["error"] = "We had expected {0} number of lines. We got {1} number of lines.".format(ne, ng)
- return False, msg
- else:
- err_line_no = _incorrect_user_lines(exp_lines, given_lines)
- if err_line_no:
- msg["error_no"] = err_line_no
- msg["error"] = "Line number(s) {0} did not match."\
- .format(", ".join(map(str,[x+1 for x in err_line_no])))
- return False, msg
- else:
- msg["error"] = "Correct answer"
- return True, msg
-
-
class PythonStdIOEvaluator(BaseEvaluator):
"""Tests the Python code obtained from Code Server"""
def __init__(self, metadata, test_case_data):
@@ -100,8 +58,9 @@ class PythonStdIOEvaluator(BaseEvaluator):
def check_code(self):
mark_fraction = self.weight
- success, err = compare_outputs(self.expected_output,
- self.output_value,
- self.expected_input
- )
+ compare = CompareOutputs()
+ success, err = compare.compare_outputs(self.expected_output,
+ self.output_value,
+ self.expected_input
+ )
return success, err, mark_fraction
diff --git a/yaksh/stdio_evaluator.py b/yaksh/stdio_evaluator.py
index 554d4c5..a5785ea 100644
--- a/yaksh/stdio_evaluator.py
+++ b/yaksh/stdio_evaluator.py
@@ -5,6 +5,7 @@ import signal
# Local imports
from .base_evaluator import BaseEvaluator
from .grader import TimeoutException
+from .compare_stdio import CompareOutputs
class StdIOEvaluator(BaseEvaluator):
@@ -20,18 +21,9 @@ class StdIOEvaluator(BaseEvaluator):
os.killpg(os.getpgid(proc.pid), signal.SIGTERM)
raise
expected_output = expected_output.replace("\r", "")
- if not expected_input:
- error_msg = "Expected Output is\n{0} ".\
- format(str(expected_output))
- else:
- error_msg = "Given Input is\n{0}\nExpected Output is\n{1}".\
- format(expected_input, str(expected_output))
- if output_err == '':
- if user_output == expected_output:
- success, err = True, None
- else:
- err = "Incorrect answer:\n" + error_msg +\
- "\nYour output is\n{0}".format(str(user_output))
- else:
- err = "Error:\n{0}".format(output_err)
+ compare = CompareOutputs()
+ success, err = compare.compare_outputs(expected_output,
+ user_output,
+ expected_input
+ )
return success, err