summaryrefslogtreecommitdiff
path: root/yaksh/bash_code_evaluator.py
diff options
context:
space:
mode:
authorPrabhu Ramachandran2015-09-29 13:45:06 +0530
committerPrabhu Ramachandran2015-09-29 13:45:06 +0530
commitebbf135af98720f1979cd28a9108817bac385ce7 (patch)
treee812ac5466ad043f867c58bc363df823522a8468 /yaksh/bash_code_evaluator.py
parent31f5e743031d105b0406e9587dc33bb065cd6e4d (diff)
parent53be4bc2ec40a9a84ff5ea73db2fbea0a07f5338 (diff)
downloadonline_test-ebbf135af98720f1979cd28a9108817bac385ce7.tar.gz
online_test-ebbf135af98720f1979cd28a9108817bac385ce7.tar.bz2
online_test-ebbf135af98720f1979cd28a9108817bac385ce7.zip
Merge pull request #56 from ankitjavalkar/examtime
Start and Expiry times for Quizzes
Diffstat (limited to 'yaksh/bash_code_evaluator.py')
-rw-r--r--yaksh/bash_code_evaluator.py122
1 files changed, 122 insertions, 0 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
new file mode 100644
index 0000000..a468fd7
--- /dev/null
+++ b/yaksh/bash_code_evaluator.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+import subprocess
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class BashCodeEvaluator(CodeEvaluator):
+ """Tests the Bash code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(BashCodeEvaluator, self).__init__(test_case_data, test, language, user_answer,
+ ref_code_path, in_dir)
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(BashCodeEvaluator, self)._setup()
+
+ self.submit_path = self.create_submit_code_file('submit.sh')
+ self._set_file_as_executable(self.submit_path)
+ get_ref_path, get_test_case_path = self.ref_code_path.strip().split(',')
+ get_ref_path = get_ref_path.strip()
+ get_test_case_path = get_test_case_path.strip()
+ ref_path, test_case_path = self._set_test_code_file_path(get_ref_path,
+ get_test_case_path)
+
+ return ref_path, self.submit_path, test_case_path
+
+ def _teardown(self):
+ # Delete the created file.
+ super(BashCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_path, submit_path,
+ test_case_path=None):
+ """ Function validates student script using instructor script as
+ reference. Test cases can optionally be provided. The first argument
+ ref_path, is the path to instructor script, it is assumed to
+ have executable permission. The second argument submit_path, is
+ the path to the student script, it is assumed to have executable
+ permission. The Third optional argument is the path to test the
+ scripts. Each line in this file is a test case and each test case is
+ passed to the script as standard arguments.
+
+ Returns
+ --------
+
+ returns (True, "Correct answer") : If the student script passes all
+ test cases/have same output, when compared to the instructor script
+
+ returns (False, error_msg): If the student script fails a single
+ test/have dissimilar output, when compared to the instructor script.
+
+ Returns (False, error_msg): If mandatory arguments are not files or if
+ the required permissions are not given to the file(s).
+
+ """
+ if not isfile(ref_path):
+ return False, "No file at %s or Incorrect path" % ref_path
+ if not isfile(submit_path):
+ return False, "No file at %s or Incorrect path" % submit_path
+ if not os.access(ref_path, os.X_OK):
+ return False, "Script %s is not executable" % ref_path
+ if not os.access(submit_path, os.X_OK):
+ return False, "Script %s is not executable" % submit_path
+
+ success = False
+
+ if test_case_path is None or "":
+ ret = self._run_command(ref_path, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, inst_stdout, inst_stderr = ret
+ ret = self._run_command(submit_path, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdnt_stdout, stdnt_stderr = ret
+ if inst_stdout == stdnt_stdout:
+ return True, "Correct answer"
+ else:
+ err = "Error: expected %s, got %s" % (inst_stderr,
+ stdnt_stderr)
+ return False, err
+ else:
+ if not isfile(test_case_path):
+ return False, "No test case at %s" % test_case_path
+ if not os.access(ref_path, os.R_OK):
+ return False, "Test script %s, not readable" % test_case_path
+ # valid_answer is True, so that we can stop once a test case fails
+ valid_answer = True
+ # loop_count has to be greater than or equal to one.
+ # Useful for caching things like empty test files,etc.
+ loop_count = 0
+ test_cases = open(test_case_path).readlines()
+ num_lines = len(test_cases)
+ for test_case in test_cases:
+ loop_count += 1
+ if valid_answer:
+ args = [ref_path] + [x for x in test_case.split()]
+ ret = self._run_command(args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, inst_stdout, inst_stderr = ret
+ args = [submit_path]+[x for x in test_case.split()]
+ ret = self._run_command(args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdnt_stdout, stdnt_stderr = ret
+ valid_answer = inst_stdout == stdnt_stdout
+ if valid_answer and (num_lines == loop_count):
+ return True, "Correct answer"
+ else:
+ err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
+ stdnt_stdout+stdnt_stderr)
+ return False, err
+