1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
|
#!/usr/bin/env python
import traceback
import pwd
import os
from os.path import join, isfile
import subprocess
import importlib
# local imports
from code_evaluator import CodeEvaluator
class BashCodeEvaluator(CodeEvaluator):
"""Tests the Bash code obtained from Code Server"""
def __init__(self, test_case_data, test, language, user_answer,
ref_code_path=None, in_dir=None):
super(BashCodeEvaluator, self).__init__(test_case_data, test, language, user_answer,
ref_code_path, in_dir)
self.submit_path = self.create_submit_code_file('submit.sh')
self.test_case_args = self._setup()
# Private Protocol ##########
def _setup(self):
super(BashCodeEvaluator, self)._setup()
self.set_file_as_executable(self.submit_path)
get_ref_path, get_test_case_path = self.ref_code_path.strip().split(',')
get_ref_path = get_ref_path.strip()
get_test_case_path = get_test_case_path.strip()
ref_path, test_case_path = self.set_test_code_file_path(get_ref_path,
get_test_case_path)
return ref_path, self.submit_path, test_case_path
def _teardown(self):
# Delete the created file.
super(BashCodeEvaluator, self)._teardown()
os.remove(self.submit_path)
def _check_code(self, ref_path, submit_path,
test_case_path=None):
""" Function validates student script using instructor script as
reference. Test cases can optionally be provided. The first argument
ref_path, is the path to instructor script, it is assumed to
have executable permission. The second argument submit_path, is
the path to the student script, it is assumed to have executable
permission. The Third optional argument is the path to test the
scripts. Each line in this file is a test case and each test case is
passed to the script as standard arguments.
Returns
--------
returns (True, "Correct answer") : If the student script passes all
test cases/have same output, when compared to the instructor script
returns (False, error_msg): If the student script fails a single
test/have dissimilar output, when compared to the instructor script.
Returns (False, error_msg): If mandatory arguments are not files or if
the required permissions are not given to the file(s).
"""
if not isfile(ref_path):
return False, "No file at %s or Incorrect path" % ref_path
if not isfile(submit_path):
return False, "No file at %s or Incorrect path" % submit_path
if not os.access(ref_path, os.X_OK):
return False, "Script %s is not executable" % ref_path
if not os.access(submit_path, os.X_OK):
return False, "Script %s is not executable" % submit_path
success = False
if test_case_path is None or "":
ret = self.run_command(ref_path, stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc, inst_stdout, inst_stderr = ret
ret = self.run_command(submit_path, stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
return True, "Correct answer"
else:
err = "Error: expected %s, got %s" % (inst_stderr,
stdnt_stderr)
return False, err
else:
if not isfile(test_case_path):
return False, "No test case at %s" % test_case_path
if not os.access(ref_path, os.R_OK):
return False, "Test script %s, not readable" % test_case_path
# valid_answer is True, so that we can stop once a test case fails
valid_answer = True
# loop_count has to be greater than or equal to one.
# Useful for caching things like empty test files,etc.
loop_count = 0
test_cases = open(test_case_path).readlines()
num_lines = len(test_cases)
for test_case in test_cases:
loop_count += 1
if valid_answer:
args = [ref_path] + [x for x in test_case.split()]
ret = self.run_command(args, stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc, inst_stdout, inst_stderr = ret
args = [submit_path]+[x for x in test_case.split()]
ret = self.run_command(args, stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc, stdnt_stdout, stdnt_stderr = ret
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
return True, "Correct answer"
else:
err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
stdnt_stdout+stdnt_stderr)
return False, err
|