summaryrefslogtreecommitdiff
path: root/testapp/exam/evaluate_bash.py
blob: 57c89ae57da19d30c8fda313035b21e6029f7803 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
#!/usr/bin/env python
import traceback
import pwd
import os
from os.path import join, isfile
import subprocess
import importlib

# local imports
from code_server import TestCode
from registry import registry


class EvaluateBash(TestCode):
    """Tests the Bash code obtained from Code Server"""
    def evaluate_code(self):
        submit_path = self._create_submit_code_file('submit.sh')
        get_ref_path, get_test_case_path = self.ref_code_path.strip().split(',')
        ref_path, test_case_path = self._set_test_code_file_path(get_ref_path, get_test_case_path)
        success = False

        success, err = self.check_bash_script(ref_path, submit_path,
                            test_case_path)

        # Delete the created file.
        os.remove(submit_path)

        return success, err

    def check_bash_script(self, ref_path, submit_path,
                          test_case_path=None):
        """ Function validates student script using instructor script as
        reference. Test cases can optionally be provided.  The first argument
        ref_path, is the path to instructor script, it is assumed to
        have executable permission.  The second argument submit_path, is
        the path to the student script, it is assumed to have executable
        permission.  The Third optional argument is the path to test the
        scripts.  Each line in this file is a test case and each test case is
        passed to the script as standard arguments.

        Returns
        --------

        returns (True, "Correct answer") : If the student script passes all
        test cases/have same output, when compared to the instructor script

        returns (False, error_msg): If the student script fails a single
        test/have dissimilar output, when compared to the instructor script.

        Returns (False, error_msg): If mandatory arguments are not files or if
        the required permissions are not given to the file(s).

        """
        if not isfile(ref_path):
            return False, "No file at %s or Incorrect path" % ref_path
        if not isfile(submit_path):
            return False, "No file at %s or Incorrect path" % submit_path
        if not os.access(ref_path, os.X_OK):
            return False, "Script %s is not executable" % ref_path
        if not os.access(submit_path, os.X_OK):
            return False, "Script %s is not executable" % submit_path

        if test_case_path is None or "":
            ret = self._run_command(ref_path, stdin=None,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            proc, inst_stdout, inst_stderr = ret
            ret = self._run_command(submit_path, stdin=None,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE)
            proc, stdnt_stdout, stdnt_stderr = ret
            if inst_stdout == stdnt_stdout:
                return True, "Correct answer"
            else:
                err = "Error: expected %s, got %s" % (inst_stderr,
                                                      stdnt_stderr)
                return False, err
        else:
            if not isfile(test_case_path):
                return False, "No test case at %s" % test_case_path
            if not os.access(ref_path, os.R_OK):
                return False, "Test script %s, not readable" % test_case_path
            valid_answer = True  # We initially make it one, so that we can
                                 # stop once a test case fails
            loop_count = 0  # Loop count has to be greater than or
                            # equal to one.
                            # Useful for caching things like empty
                            # test files,etc.
            test_cases = open(test_case_path).readlines()
            num_lines = len(test_cases)
            for test_case in test_cases:
                loop_count += 1
                if valid_answer:
                    args = [ref_path] + [x for x in test_case.split()]
                    ret = self._run_command(args, stdin=None,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE)
                    proc, inst_stdout, inst_stderr = ret
                    args = [submit_path]+[x for x in test_case.split()]
                    ret = self._run_command(args, stdin=None,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE)
                    proc, stdnt_stdout, stdnt_stderr = ret
                    valid_answer = inst_stdout == stdnt_stdout
            if valid_answer and (num_lines == loop_count):
                return True, "Correct answer"
            else:
                err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
                                                     stdnt_stdout+stdnt_stderr)
                return False, err

registry.register('bash', evaluate_bash, EvaluateBash)