diff options
Diffstat (limited to 'yaksh')
27 files changed, 538 insertions, 108 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index a0af0e2..bce7f07 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -9,6 +9,7 @@ import importlib # local imports from code_evaluator import CodeEvaluator +from file_utils import copy_files, delete_files class BashCodeEvaluator(CodeEvaluator): @@ -22,8 +23,10 @@ class BashCodeEvaluator(CodeEvaluator): # Delete the created file. super(BashCodeEvaluator, self).teardown() os.remove(self.submit_code_path) + if self.files: + delete_files(self.files) - def check_code(self, user_answer, test_case): + def check_code(self, user_answer, file_paths, test_case): """ Function validates student script using instructor script as reference. Test cases can optionally be provided. The first argument ref_path, is the path to instructor script, it is assumed to @@ -53,6 +56,9 @@ class BashCodeEvaluator(CodeEvaluator): clean_ref_code_path, clean_test_case_path = \ self._set_test_code_file_path(get_ref_path, get_test_case_path) + self.files = [] + if file_paths: + self.files = copy_files(file_paths) if not isfile(clean_ref_code_path): msg = "No file at %s or Incorrect path" % clean_ref_code_path return False, msg @@ -67,6 +73,7 @@ class BashCodeEvaluator(CodeEvaluator): return False, msg success = False + user_answer = user_answer.replace("\r", "") self.write_to_submit_code_file(self.submit_code_path, user_answer) if clean_test_case_path is None or "": @@ -114,6 +121,8 @@ class BashCodeEvaluator(CodeEvaluator): stderr=subprocess.PIPE ) proc, inst_stdout, inst_stderr = ret + if file_paths: + self.files = copy_files(file_paths) args = [self.submit_code_path] + \ [x for x in test_case.split()] ret = self._run_command(args, @@ -126,7 +135,7 @@ class BashCodeEvaluator(CodeEvaluator): return True, "Correct answer" else: err = ("Error:expected" - " %s, got %s").format(inst_stdout+inst_stderr, + " {0}, got {1}").format(inst_stdout+inst_stderr, stdnt_stdout+stdnt_stderr ) return False, err diff --git a/yaksh/bash_files/sample1.args b/yaksh/bash_files/sample1.args new file mode 100644 index 0000000..541cb64 --- /dev/null +++ b/yaksh/bash_files/sample1.args @@ -0,0 +1 @@ +test.txt
\ No newline at end of file diff --git a/yaksh/bash_files/sample1.sh b/yaksh/bash_files/sample1.sh new file mode 100755 index 0000000..965874b --- /dev/null +++ b/yaksh/bash_files/sample1.sh @@ -0,0 +1,2 @@ +#!/bin/bash +cat $1 diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py index 5431e5d..56f2e35 100644 --- a/yaksh/bash_stdio_evaluator.py +++ b/yaksh/bash_stdio_evaluator.py @@ -6,7 +6,7 @@ from os.path import isfile #local imports from code_evaluator import CodeEvaluator from stdio_evaluator import Evaluator - +from file_utils import copy_files, delete_files class BashStdioEvaluator(CodeEvaluator): """Evaluates Bash StdIO based code""" @@ -18,8 +18,13 @@ class BashStdioEvaluator(CodeEvaluator): def teardown(self): super(BashStdioEvaluator, self).teardown() os.remove(self.submit_code_path) + if self.files: + delete_files(self.files) - def compile_code(self, user_answer, expected_input, expected_output): + def compile_code(self, user_answer, file_paths, expected_input, expected_output): + self.files = [] + if file_paths: + self.files = copy_files(file_paths) if not isfile(self.submit_code_path): msg = "No file at %s or Incorrect path" % self.submit_code_path return False, msg @@ -27,7 +32,7 @@ class BashStdioEvaluator(CodeEvaluator): user_answer = user_answer.replace("\r", "") self.write_to_submit_code_file(self.submit_code_path, user_answer) - def check_code(self, user_answer, expected_input, expected_output): + def check_code(self, user_answer, file_paths, expected_input, expected_output): success = False expected_input = str(expected_input).replace('\r', '') proc = subprocess.Popen("bash ./Test.sh", diff --git a/yaksh/c_cpp_files/file_data.c b/yaksh/c_cpp_files/file_data.c new file mode 100644 index 0000000..1c0ab12 --- /dev/null +++ b/yaksh/c_cpp_files/file_data.c @@ -0,0 +1,25 @@ +#include <stdio.h> +#include <stdlib.h> + +extern int ans(); + +template <class T> +void check(T expect,T result) +{ + if (expect == result) + { + printf("\nCorrect:\n Expected %d got %d \n",expect,result); + } + else + { + printf("\nIncorrect:\n Expected %d got %d \n",expect,result); + exit (0); + } +} + +int main(void) +{ + int result; + result = ans(); + check(50, result); +} diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py index aab99eb..2fb429f 100644 --- a/yaksh/code_evaluator.py +++ b/yaksh/code_evaluator.py @@ -88,7 +88,7 @@ class CodeEvaluator(object): def setup(self): self._change_dir(self.in_dir) - def safe_evaluate(self, user_answer, test_case_data): + def safe_evaluate(self, user_answer, test_case_data, file_paths=None): """ Handles code evaluation along with compilation, signal handling and Exception handling @@ -101,8 +101,8 @@ class CodeEvaluator(object): # Do whatever testing needed. try: for test_case in test_case_data: - self.compile_code(user_answer, **test_case) - success, err = self.check_code(user_answer, **test_case) + self.compile_code(user_answer, file_paths, **test_case) + success, err = self.check_code(user_answer, file_paths, **test_case) if not success: break @@ -124,7 +124,7 @@ class CodeEvaluator(object): def check_code(self): raise NotImplementedError("check_code method not implemented") - def compile_code(self, user_answer, **kwargs): + def compile_code(self, user_answer, file_paths, **kwargs): pass def create_submit_code_file(self, file_name): @@ -136,7 +136,6 @@ class CodeEvaluator(object): return submit_path - def write_to_submit_code_file(self, file_path, user_answer): """ Write the code (`answer`) to a file""" submit_f = open(file_path, 'w') diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py index b869442..c65242d 100644 --- a/yaksh/cpp_code_evaluator.py +++ b/yaksh/cpp_code_evaluator.py @@ -8,6 +8,7 @@ import importlib # local imports from code_evaluator import CodeEvaluator +from file_utils import copy_files, delete_files class CppCodeEvaluator(CodeEvaluator): @@ -22,6 +23,12 @@ class CppCodeEvaluator(CodeEvaluator): super(CppCodeEvaluator, self).teardown() # Delete the created file. os.remove(self.submit_code_path) + if os.path.exists(self.ref_output_path): + os.remove(self.ref_output_path) + if os.path.exists(self.user_output_path): + os.remove(self.user_output_path) + if self.files: + delete_files(self.files) def set_file_paths(self): user_output_path = os.getcwd() + '/output' @@ -38,14 +45,16 @@ class CppCodeEvaluator(CodeEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, test_case): + def compile_code(self, user_answer, file_paths, test_case): + self.files = [] if self.compiled_user_answer and self.compiled_test_code: return None else: ref_code_path = test_case clean_ref_code_path, clean_test_case_path = \ self._set_test_code_file_path(ref_code_path) - + if file_paths: + self.files = copy_files(file_paths) if not isfile(clean_ref_code_path): msg = "No file at %s or Incorrect path" % clean_ref_code_path return False, msg @@ -57,7 +66,7 @@ class CppCodeEvaluator(CodeEvaluator): self.user_output_path, self.ref_output_path = self.set_file_paths() self.compile_command, self.compile_main = self.get_commands( clean_ref_code_path, - self.user_output_path, + self.user_output_path, self.ref_output_path ) self.compiled_user_answer = self._run_command( @@ -76,7 +85,7 @@ class CppCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, test_case): + def check_code(self, user_answer, file_paths, test_case): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -117,7 +126,6 @@ class CppCodeEvaluator(CodeEvaluator): success, err = True, "Correct answer" else: err = stdout + "\n" + stderr - os.remove(self.ref_output_path) else: err = "Error:" try: @@ -129,7 +137,6 @@ class CppCodeEvaluator(CodeEvaluator): err = err + "\n" + e except: err = err + "\n" + main_err - os.remove(self.user_output_path) else: err = "Compilation Error:" try: diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py index db49adf..4ea1bbf 100644 --- a/yaksh/cpp_stdio_evaluator.py +++ b/yaksh/cpp_stdio_evaluator.py @@ -6,6 +6,7 @@ from os.path import isfile #local imports from code_evaluator import CodeEvaluator from stdio_evaluator import Evaluator +from file_utils import copy_files, delete_files class CppStdioEvaluator(CodeEvaluator): @@ -18,6 +19,8 @@ class CppStdioEvaluator(CodeEvaluator): def teardown(self): super(CppStdioEvaluator, self).teardown() os.remove(self.submit_code_path) + if self.files: + delete_files(self.files) def set_file_paths(self): user_output_path = os.getcwd() + '/output' @@ -31,8 +34,11 @@ class CppStdioEvaluator(CodeEvaluator): ref_output_path) return compile_command, compile_main - def compile_code(self, user_answer, expected_input, expected_output): + def compile_code(self, user_answer, file_paths, expected_input, expected_output): + self.files = [] + if file_paths: + self.files = copy_files(file_paths) if not isfile(self.submit_code_path): msg = "No file at %s or Incorrect path" % self.submit_code_path return False, msg @@ -56,7 +62,7 @@ class CppStdioEvaluator(CodeEvaluator): ) return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, expected_input, expected_output): + def check_code(self, user_answer, file_paths, expected_input, expected_output): success = False proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index ee6c1f0..addc5e6 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -11,10 +11,11 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): self.test_case_data = [ {"test_case": "bash_files/sample.sh,bash_files/sample.args"} ] - self.in_dir = "/tmp" + self.in_dir = os.getcwd() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" " code.").format(SERVER_TIMEOUT) + self.file_paths = None def test_correct_answer(self): user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]" @@ -22,7 +23,8 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): ) get_class = BashCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertTrue(result.get('success')) @@ -33,7 +35,8 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") get_class = BashCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) @@ -44,12 +47,27 @@ class BashAssertionEvaluationTestCases(unittest.TestCase): " do echo "" > /dev/null ; done") get_class = BashCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEquals(result.get("error"), self.timeout_msg) + def test_file_based_assert(self): + self.file_paths = [(os.getcwd()+"/yaksh/test.txt", False)] + self.test_case_data = [ + {"test_case": "bash_files/sample1.sh,bash_files/sample1.args"} + ] + user_answer = ("#!/bin/bash\ncat $1") + get_class = BashCodeEvaluator() + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } + result = get_class.evaluate(**kwargs) + self.assertTrue(result.get("success")) + self.assertEquals(result.get("error"), "Correct answer") class BashStdioEvaluationTestCases(unittest.TestCase): def setUp(self): diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index ff3cddf..0042d0f 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -9,16 +9,18 @@ from textwrap import dedent class CAssertionEvaluationTestCases(unittest.TestCase): def setUp(self): self.test_case_data = [{"test_case": "c_cpp_files/main.cpp"}] - self.in_dir = "/tmp" + self.in_dir = os.getcwd() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in your" " code.").format(SERVER_TIMEOUT) + self.file_paths = None def test_correct_answer(self): user_answer = "int add(int a, int b)\n{return a+b;}" get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertTrue(result.get('success')) @@ -28,7 +30,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase): user_answer = "int add(int a, int b)\n{return a-b;}" get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get('success')) @@ -39,7 +42,8 @@ class CAssertionEvaluationTestCases(unittest.TestCase): user_answer = "int add(int a, int b)\n{return a+b}" get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) @@ -49,17 +53,43 @@ class CAssertionEvaluationTestCases(unittest.TestCase): user_answer = "int add(int a, int b)\n{while(1>0){}}" get_class = CppCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEquals(result.get("error"), self.timeout_msg) + def test_file_based_assert(self): + self.file_paths = [(os.getcwd()+"/yaksh/test.txt", False)] + self.test_case_data = [{"test_case": "c_cpp_files/file_data.c"}] + user_answer = dedent(""" + #include<stdio.h> + char ans() + { + FILE *fp; + char buff[255]; + fp = fopen("test.txt", "r"); + fscanf(fp, "%s", buff); + fclose(fp); + return buff[0]; + } + """) + get_class = CppCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } + result = get_class.evaluate(**kwargs) + self.assertTrue(result.get('success')) + self.assertEquals(result.get('error'), "Correct answer") + class CppStdioEvaluationTestCases(unittest.TestCase): def setUp(self): self.test_case_data = [{'expected_output': '11', 'expected_input': '5\n6'}] + self.in_dir = os.getcwd() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" " your code.").format(SERVER_TIMEOUT) diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index c98a938..74ac677 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -12,11 +12,12 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): self.test_case_data = [ {"test_case": "java_files/main_square.java"} ] - self.in_dir = "/tmp" + self.in_dir = os.getcwd() evaluator.SERVER_TIMEOUT = 9 self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" " your code.").format(evaluator.SERVER_TIMEOUT) + self.file_paths = None def tearDown(self): evaluator.SERVER_TIMEOUT = 2 @@ -25,7 +26,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertEquals(result.get('error'), "Correct answer") @@ -35,7 +37,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get('success')) @@ -46,7 +49,8 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" get_class = JavaCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) @@ -56,12 +60,44 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase): user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" get_class = JavaCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) self.assertEquals(result.get("error"), self.timeout_msg) + def test_file_based_assert(self): + self.file_paths = [(os.getcwd()+"/yaksh/test.txt", False)] + self.test_case_data = [ + {"test_case": "java_files/read_file.java"} + ] + user_answer = dedent(""" + import java.io.BufferedReader; + import java.io.FileReader; + import java.io.IOException; + class Test{ + String readFile() throws IOException { + BufferedReader br = new BufferedReader(new FileReader("test.txt")); + try { + StringBuilder sb = new StringBuilder(); + String line = br.readLine(); + while (line != null) { + sb.append(line); + line = br.readLine();} + return sb.toString(); + } finally { + br.close(); + }}} + """) + get_class = JavaCodeEvaluator(self.in_dir) + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } + result = get_class.evaluate(**kwargs) + self.assertTrue(result.get("success")) + self.assertEquals(result.get("error"), "Correct answer") class JavaStdioEvaluationTestCases(unittest.TestCase): diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py index 40562db..eea9403 100644 --- a/yaksh/evaluator_tests/test_python_evaluation.py +++ b/yaksh/evaluator_tests/test_python_evaluation.py @@ -15,13 +15,15 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop in" " your code.").format(SERVER_TIMEOUT) + self.file_paths = None def test_correct_answer(self): user_answer = "def add(a,b):\n\treturn a + b" get_class = PythonAssertionEvaluator() - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } result = get_class.evaluate(**kwargs) self.assertTrue(result.get('success')) self.assertEqual(result.get('error'), "Correct answer") @@ -30,8 +32,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): user_answer = "def add(a,b):\n\treturn a - b" get_class = PythonAssertionEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data - } + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } result = get_class.evaluate(**kwargs) self.assertFalse(result.get('success')) self.assertEqual(result.get('error'), @@ -42,13 +45,14 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): user_answer = "def add(a, b):\n\twhile True:\n\t\tpass" get_class = PythonAssertionEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get('success')) self.assertEqual(result.get('error'), self.timeout_msg) - def test_syntax_error(self): + user_answer = dedent(""" def add(a, b); return a + b @@ -63,7 +67,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] get_class = PythonAssertionEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) err = result.get("error").splitlines() @@ -86,7 +91,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] get_class = PythonAssertionEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) err = result.get("error").splitlines() @@ -105,7 +111,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] get_class = PythonAssertionEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) err = result.get("error").splitlines() @@ -126,7 +133,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] get_class = PythonAssertionEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) err = result.get("error").splitlines() @@ -148,7 +156,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] get_class = PythonAssertionEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) err = result.get("error").splitlines() @@ -171,7 +180,8 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): ] get_class = PythonAssertionEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) err = result.get("error").splitlines() @@ -180,6 +190,22 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase): for msg in value_error_msg: self.assertIn(msg, result.get("error")) + def test_file_based_assert(self): + self.test_case_data = [{"test_case": "assert(ans()=='2')"}] + self.file_paths = [(os.getcwd()+"/yaksh/test.txt", False)] + user_answer = dedent(""" + def ans(): + with open("test.txt") as f: + return f.read()[0] + """) + get_class = PythonAssertionEvaluator() + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } + result = get_class.evaluate(**kwargs) + self.assertEqual(result.get('error'), "Correct answer") + self.assertTrue(result.get('success')) class PythonStdoutEvaluationTestCases(unittest.TestCase): def setUp(self): @@ -190,12 +216,14 @@ class PythonStdoutEvaluationTestCases(unittest.TestCase): self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop" " in your code.").format(SERVER_TIMEOUT) + self.file_paths = None def test_correct_answer(self): user_answer = "a,b=0,1\nfor i in range(5):\n\tprint a,\n\ta,b=b,a+b" get_class = PythonStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertEqual(result.get('error'), "Correct Answer") @@ -204,8 +232,9 @@ class PythonStdoutEvaluationTestCases(unittest.TestCase): def test_incorrect_answer(self): user_answer = "a,b=0,1\nfor i in range(5):\n\tprint b,\n\ta,b=b,a+b" get_class = PythonStdioEvaluator() - kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get('success')) @@ -217,11 +246,7 @@ class PythonStdoutEvaluationTestCases(unittest.TestCase): kwargs = {'user_answer': user_answer, 'test_case_data': self.test_case_data } - result = get_class.evaluate(**kwargs) - self.assertFalse(result.get('success')) - self.assertEqual(result.get('error'), self.timeout_msg) - - + class PythonStdIOEvaluator(unittest.TestCase): def setUp(self): @@ -305,12 +330,30 @@ class PythonStdIOEvaluator(unittest.TestCase): get_class = PythonStdioEvaluator() kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, } result = get_class.evaluate(**kwargs) self.assertFalse(result.get('success')) self.assertIn("Incorrect Answer", result.get('error')) + def test_file_based_answer(self): + self.test_case_data = [{"expected_input": "", "expected_output": "2"}] + self.file_paths = [(os.getcwd()+"/yaksh/test.txt", False)] + + user_answer = dedent(""" + with open("test.txt") as f: + a = f.read() + print a[0] + """ + ) + get_class = PythonStdioEvaluator() + kwargs = {'user_answer': user_answer, + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths + } + result = get_class.evaluate(**kwargs) + self.assertEqual(result.get('error'), "Correct Answer") + self.assertTrue(result.get('success')) if __name__ == '__main__': unittest.main() diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index 242f260..f5e3767 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -7,17 +7,19 @@ from yaksh.settings import SERVER_TIMEOUT class ScilabEvaluationTestCases(unittest.TestCase): def setUp(self): self.test_case_data = [{"test_case": "scilab_files/test_add.sce"}] - self.in_dir = "/tmp" + self.in_dir = os.getcwd() self.timeout_msg = ("Code took more than {0} seconds to run. " "You probably have an infinite loop" " in your code.").format(SERVER_TIMEOUT) + self.file_paths = None def test_correct_answer(self): user_answer = ("funcprot(0)\nfunction[c]=add(a,b)" "\n\tc=a+b;\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertEquals(result.get('error'), "Correct answer") @@ -28,7 +30,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): "\n\tc=a+b;\ndis(\tendfunction") get_class = ScilabCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) @@ -40,7 +43,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): "\n\tc=a-b;\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get('success')) @@ -52,7 +56,8 @@ class ScilabEvaluationTestCases(unittest.TestCase): "\n\tc=a;\nwhile(1==1)\nend\nendfunction") get_class = ScilabCodeEvaluator(self.in_dir) kwargs = {'user_answer': user_answer, - 'test_case_data': self.test_case_data + 'test_case_data': self.test_case_data, + 'file_paths': self.file_paths } result = get_class.evaluate(**kwargs) self.assertFalse(result.get("success")) diff --git a/yaksh/file_utils.py b/yaksh/file_utils.py new file mode 100644 index 0000000..8f6f6e5 --- /dev/null +++ b/yaksh/file_utils.py @@ -0,0 +1,33 @@ +import shutil +import os +import zipfile + + +def copy_files(file_paths): + """ Copy Files to current directory, takes + tuple with file paths and extract status""" + + files = [] + for src in file_paths: + file_path, extract = src + file_name = os.path.basename(file_path) + files.append(file_name) + shutil.copy(file_path, os.getcwd()) + if extract and zipfile.is_zipfile(file_name): + unzip = zipfile.ZipFile(file_name) + for zip_files in unzip.namelist(): + files.append(zip_files) + unzip.extractall() + unzip.close() + return files + + +def delete_files(files): + """ Delete Files from current directory """ + + for file in files: + if os.path.exists(file): + if os.path.isfile(file): + os.remove(file) + else: + shutil.rmtree(file) diff --git a/yaksh/forms.py b/yaksh/forms.py index 6bea0c8..4a20102 100644 --- a/yaksh/forms.py +++ b/yaksh/forms.py @@ -175,6 +175,11 @@ class QuestionForm(forms.ModelForm): exclude = ['user'] +class FileForm(forms.Form): + file_field = forms.FileField(widget=forms.ClearableFileInput(attrs={'multiple': True}), + required=False) + + class RandomQuestionForm(forms.Form): question_type = forms.CharField(max_length=8, widget=forms.Select\ (choices=question_types)) @@ -211,7 +216,7 @@ class ProfileForm(forms.ModelForm): class Meta: model = Profile fields = ['first_name', 'last_name', 'institute', - 'department', 'roll_number', 'position', 'timezone'] + 'department', 'roll_number', 'position', 'timezone'] first_name = forms.CharField(max_length=30) last_name = forms.CharField(max_length=30) diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py index c64aa3b..ff76317 100644 --- a/yaksh/java_code_evaluator.py +++ b/yaksh/java_code_evaluator.py @@ -8,6 +8,7 @@ import importlib # local imports from code_evaluator import CodeEvaluator +from file_utils import copy_files, delete_files class JavaCodeEvaluator(CodeEvaluator): @@ -21,7 +22,13 @@ class JavaCodeEvaluator(CodeEvaluator): def teardown(self): super(JavaCodeEvaluator, self).teardown() # Delete the created file. - os.remove(self.submit_code_path) + os.remove(self.submit_code_path) + if os.path.exists(self.user_output_path): + os.remove(self.user_output_path) + if os.path.exists(self.ref_output_path): + os.remove(self.ref_output_path) + if self.files: + delete_files(self.files) def get_commands(self, clean_ref_code_path, user_code_directory): compile_command = 'javac {0}'.format(self.submit_code_path), @@ -35,14 +42,16 @@ class JavaCodeEvaluator(CodeEvaluator): output_path = "{0}{1}.class".format(directory, file_name) return output_path - def compile_code(self, user_answer, test_case): + def compile_code(self, user_answer, file_paths, test_case): + self.files = [] if self.compiled_user_answer and self.compiled_test_code: return None else: ref_code_path = test_case clean_ref_code_path, clean_test_case_path = \ self._set_test_code_file_path(ref_code_path) - + if file_paths: + self.files = copy_files(file_paths) if not isfile(clean_ref_code_path): msg = "No file at %s or Incorrect path" % clean_ref_code_path return False, msg @@ -83,7 +92,7 @@ class JavaCodeEvaluator(CodeEvaluator): return self.compiled_user_answer, self.compiled_test_code - def check_code(self, user_answer, test_case): + def check_code(self, user_answer, file_paths, test_case): """ Function validates student code using instructor code as reference.The first argument ref_code_path, is the path to instructor code, it is assumed to have executable permission. @@ -123,7 +132,6 @@ class JavaCodeEvaluator(CodeEvaluator): success, err = True, "Correct answer" else: err = stdout + "\n" + stderr - os.remove(self.ref_output_path) else: err = "Error:" try: @@ -135,7 +143,6 @@ class JavaCodeEvaluator(CodeEvaluator): err = err + "\n" + e except: err = err + "\n" + main_err - os.remove(self.user_output_path) else: err = "Compilation Error:" try: @@ -147,5 +154,4 @@ class JavaCodeEvaluator(CodeEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - return success, err diff --git a/yaksh/java_files/read_file.java b/yaksh/java_files/read_file.java new file mode 100644 index 0000000..21a5836 --- /dev/null +++ b/yaksh/java_files/read_file.java @@ -0,0 +1,26 @@ +class read_file +{ + public static <E> void check(E expect, E result) + { + if(result.equals(expect)) + { + System.out.println("Correct:\nOutput expected "+expect+" and got "+result); + } + else + { + System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result); + System.exit(1); + } + } + public static void main(String arg[]) + { + String result = ""; + Test t = new Test(); + try{ + result = t.readFile();} + catch(Exception e){ + System.out.print(e); + } + check("2", result); + } +} diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py index 2ab2981..27dd4a9 100644 --- a/yaksh/java_stdio_evaluator.py +++ b/yaksh/java_stdio_evaluator.py @@ -6,6 +6,7 @@ from os.path import isfile #local imports from code_evaluator import CodeEvaluator from stdio_evaluator import Evaluator +from file_utils import copy_files, delete_files class JavaStdioEvaluator(CodeEvaluator): @@ -18,6 +19,8 @@ class JavaStdioEvaluator(CodeEvaluator): def teardown(self): super(JavaStdioEvaluator, self).teardown() os.remove(self.submit_code_path) + if self.files: + delete_files(self.files) def set_file_paths(self, directory, file_name): output_path = "{0}{1}.class".format(directory, file_name) @@ -27,10 +30,13 @@ class JavaStdioEvaluator(CodeEvaluator): compile_command = 'javac {0}'.format(self.submit_code_path) return compile_command - def compile_code(self, user_answer, expected_input, expected_output): + def compile_code(self, user_answer, file_paths, expected_input, expected_output): + self.files = [] if not isfile(self.submit_code_path): msg = "No file at %s or Incorrect path" % self.submit_code_path return False, msg + if file_paths: + self.files = copy_files(file_paths) user_code_directory = os.getcwd() + '/' self.write_to_submit_code_file(self.submit_code_path, user_answer) self.user_output_path = self.set_file_paths(user_code_directory, @@ -44,7 +50,7 @@ class JavaStdioEvaluator(CodeEvaluator): ) return self.compiled_user_answer - def check_code(self, user_answer, expected_input, expected_output): + def check_code(self, user_answer, file_paths, expected_input, expected_output): success = False proc, stdnt_out, stdnt_stderr = self.compiled_user_answer stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr) diff --git a/yaksh/models.py b/yaksh/models.py index 3fd7508..bdd3875 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -11,6 +11,8 @@ from django.contrib.contenttypes.models import ContentType from taggit.managers import TaggableManager from django.utils import timezone import pytz +import os +import shutil languages = ( ("python", "Python"), @@ -62,6 +64,9 @@ def has_profile(user): """ check if user has profile """ return True if hasattr(user, 'profile') else False +def get_upload_dir(instance, filename): + return "question_%s/%s" % (instance.question.id, filename) + ############################################################################### class CourseManager(models.Manager): @@ -216,6 +221,10 @@ class Question(models.Model): question_data['test_case_data'] = test_case_data question_data['user_answer'] = user_answer + files = FileUpload.objects.filter(question=self) + if files: + question_data['file_paths'] = [(file.file.path, file.extract) + for file in files] return json.dumps(question_data) @@ -252,7 +261,7 @@ class Question(models.Model): model=self.test_case_type ) test_cases = test_case_ctype.get_all_objects_for_this_type( - question=self, + question=self, **kwargs ) @@ -263,7 +272,7 @@ class Question(models.Model): model=self.test_case_type ) test_case = test_case_ctype.get_object_for_this_type( - question=self, + question=self, **kwargs ) @@ -274,6 +283,27 @@ class Question(models.Model): ############################################################################### +class FileUpload(models.Model): + file = models.FileField(upload_to=get_upload_dir, blank=True) + question = models.ForeignKey(Question, related_name="question") + extract = models.BooleanField(default=False) + + def remove(self): + if os.path.exists(self.file.path): + os.remove(self.file.path) + if os.listdir(os.path.dirname(self.file.path)) == []: + os.rmdir(os.path.dirname(self.file.path)) + self.delete() + + def set_extract_status(self): + if self.extract: + self.extract = False + else: + self.extract = True + self.save() + + +############################################################################### class Answer(models.Model): """Answers submitted by the users.""" diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index bf6a4be..04a4e69 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -7,6 +7,7 @@ import importlib # local imports from code_evaluator import CodeEvaluator, TimeoutException +from file_utils import copy_files, delete_files class PythonAssertionEvaluator(CodeEvaluator): @@ -16,7 +17,16 @@ class PythonAssertionEvaluator(CodeEvaluator): super(PythonAssertionEvaluator, self).setup() self.exec_scope = None - def compile_code(self, user_answer, test_case): + def teardown(self): + super(PythonAssertionEvaluator, self).teardown() + # Delete the created file. + if self.files: + delete_files(self.files) + + def compile_code(self, user_answer, file_paths, test_case): + self.files = [] + if file_paths: + self.files = copy_files(file_paths) if self.exec_scope: return None else: @@ -25,7 +35,7 @@ class PythonAssertionEvaluator(CodeEvaluator): exec submitted in self.exec_scope return self.exec_scope - def check_code(self, user_answer, test_case): + def check_code(self, user_answer, file_paths, test_case): success = False try: tb = None @@ -42,6 +52,5 @@ class PythonAssertionEvaluator(CodeEvaluator): else: success = True err = 'Correct answer' - del tb return success, err diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py index aeec744..4a02267 100644 --- a/yaksh/python_stdio_evaluator.py +++ b/yaksh/python_stdio_evaluator.py @@ -9,7 +9,7 @@ from ast import literal_eval # local imports from code_evaluator import CodeEvaluator from StringIO import StringIO - +from file_utils import copy_files, delete_files from textwrap import dedent @contextmanager def redirect_stdout(): @@ -24,7 +24,17 @@ def redirect_stdout(): class PythonStdioEvaluator(CodeEvaluator): """Tests the Python code obtained from Code Server""" - def compile_code(self, user_answer, expected_input, expected_output): + def teardown(self): + super(PythonStdioEvaluator, self).teardown() + # Delete the created file. + if self.files: + delete_files(self.files) + + + def compile_code(self, user_answer, file_paths, expected_input, expected_output): + self.files = [] + if file_paths: + self.files = copy_files(file_paths) submitted = compile(user_answer, '<string>', mode='exec') if expected_input: input_buffer = StringIO() @@ -37,7 +47,7 @@ class PythonStdioEvaluator(CodeEvaluator): self.output_value = output_buffer.getvalue().rstrip("\n") return self.output_value - def check_code(self, user_answer, expected_input, expected_output): + def check_code(self, user_answer, file_paths, expected_input, expected_output): success = False tb = None @@ -47,10 +57,11 @@ class PythonStdioEvaluator(CodeEvaluator): else: success = False err = dedent(""" - Incorrect Answer: - Given input - {0}, - Expected output - {1} and your output - {2} - """ + Incorrect Answer: + Given input - {0} + Expected output - {1} + Your output - {2} + """ .format(expected_input, expected_output, self.output_value ) diff --git a/yaksh/python_stdout_evaluator.py b/yaksh/python_stdout_evaluator.py new file mode 100644 index 0000000..8f69b24 --- /dev/null +++ b/yaksh/python_stdout_evaluator.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +import sys +import traceback +import os +from os.path import join +import importlib +from contextlib import contextmanager + +# local imports +from code_evaluator import CodeEvaluator +from file_utils import copy_files, delete_files + + +@contextmanager +def redirect_stdout(): + from StringIO import StringIO + new_target = StringIO() + + old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout + try: + yield new_target # run some code with the replaced stdout + finally: + sys.stdout = old_target # restore to the previous value + + +class PythonStdoutEvaluator(CodeEvaluator): + """Tests the Python code obtained from Code Server""" + + def teardown(self): + super(PythonStdoutEvaluator, self).teardown() + # Delete the created file. + if self.files: + delete_files(self.files) + + def compile_code(self, user_answer, file_paths, expected_output): + self.files = [] + if file_paths: + self.files = copy_files(file_paths) + if hasattr(self, 'output_value'): + return None + else: + submitted = compile(user_answer, '<string>', mode='exec') + with redirect_stdout() as output_buffer: + exec_scope = {} + exec submitted in exec_scope + self.output_value = output_buffer.getvalue() + return self.output_value + + def check_code(self, user_answer, file_paths, expected_output): + success = False + tb = None + if expected_output in user_answer: + success = False + err = ("Incorrect Answer: Please avoid " + "printing the expected output directly" + ) + elif self.output_value == expected_output: + success = True + err = "Correct answer" + + else: + success = False + err = "Incorrect Answer" + del tb + return success, err diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py index 91b4cb3..53fa343 100644 --- a/yaksh/scilab_code_evaluator.py +++ b/yaksh/scilab_code_evaluator.py @@ -8,6 +8,7 @@ import importlib # local imports from code_evaluator import CodeEvaluator +from file_utils import copy_files, delete_files class ScilabCodeEvaluator(CodeEvaluator): @@ -21,8 +22,13 @@ class ScilabCodeEvaluator(CodeEvaluator): super(ScilabCodeEvaluator, self).teardown() # Delete the created file. os.remove(self.submit_code_path) + if self.files: + delete_files(self.files) - def check_code(self, user_answer, test_case): + def check_code(self, user_answer, file_paths, test_case): + self.files = [] + if file_paths: + self.files = copy_files(file_paths) ref_code_path = test_case clean_ref_path, clean_test_case_path = \ self._set_test_code_file_path(ref_code_path) @@ -60,7 +66,6 @@ class ScilabCodeEvaluator(CodeEvaluator): err = add_err + stdout else: err = add_err + stderr - return success, err def _remove_scilab_exit(self, string): diff --git a/yaksh/templates/yaksh/add_question.html b/yaksh/templates/yaksh/add_question.html index 255deaa..f003256 100644 --- a/yaksh/templates/yaksh/add_question.html +++ b/yaksh/templates/yaksh/add_question.html @@ -15,19 +15,27 @@ {% block onload %} onload='javascript:textareaformat();' {% endblock %} {% block manage %} -<form action="" method="post" name=frm onSubmit="return autosubmit();"> +<form action="" method="post" name=frm onSubmit="return autosubmit();" enctype="multipart/form-data"> {% csrf_token %} <center><table class=span1> <tr><td>Summary: <td>{{ form.summary }}{{ form.summary.errors }} <tr><td> Language: <td> {{form.language}}{{form.language.errors}} <tr><td> Active: <td> {{ form.active }}{{form.active.errors}} Type: {{ form.type }}{{form.type.errors}} - <tr><td>Points:<td><button class="btn-mini" type="button" onClick="increase(frm);">+</button>{{ form.points }}<button class="btn-mini" type="button" onClick="decrease(frm);">-</button>{{ form.points.errors }} + <tr><td>Points:<td><button class="btn-mini" type="button" onClick="increase(frm);">+</button>{{form.points }}<button class="btn-mini" type="button" onClick="decrease(frm);">-</button>{{ form.points.errors }} <tr><td><strong>Rendered: </strong><td><p id='my'></p> <tr><td>Description: <td>{{ form.description}} {{form.description.errors}} <tr><td>Tags: <td>{{ form.tags }} <tr><td>Snippet: <td>{{ form.snippet }} <tr><td> Test Case Type: <td> {{ form.test_case_type }}{{ form.test_case_type.errors }} - + <tr><td> File: <td> {{ upload_form.file_field }}{{ upload_form.file_field.errors }} + {% if uploaded_files %}<br><b>Uploaded files:</b><br>Check the box to delete or extract files<br> + {% for file in uploaded_files %} + <input type="checkbox" name="clear" value="{{file.id}}"> delete</input> + <input type="checkbox" name="extract" value="{{file.id}}" >{% if file.extract %} dont extract{% else %} + extract{% endif %}</input><br> + <a href="{{file.file.url}}">{{ file.file.name }}</a> + <br> + {% endfor %}{% endif %} <div class="form-group"> {{ test_case_formset.management_form }} @@ -43,6 +51,7 @@ <center> <button class="btn" type="submit" name="save_question">Save & Add Testcase</button> <button class="btn" type="button" name="button" onClick='location.replace("{{URL_ROOT}}/exam/manage/questions/");'>Back to Questions</button> + <button class="btn" type="submit" name="delete_files">Delete Selected Files</button> </center> </form> {% endblock %} diff --git a/yaksh/templates/yaksh/question.html b/yaksh/templates/yaksh/question.html index 40d4482..2d52009 100644 --- a/yaksh/templates/yaksh/question.html +++ b/yaksh/templates/yaksh/question.html @@ -142,7 +142,14 @@ function call_skip(url) <h4><u> {{ question.summary }} </u><font class=pull-right>(Marks : {{ question.points }}) </font></h4><br> <font size=3 face=arial> {{ question.description|safe }} </font> <br><font size=3 face=arial> Language: {{ question.language }} </font><br> - + {% if files %} + <h4>Files to download for this question</h4> + {% for file in files %} + {% if file.question_id == question.id %} + <h5><a href="{{file.file.url}}">{{file.file.name}}</a></h5> + {% endif %} + {% endfor %} + {% endif %} {% if question.type == "code" %} <br><h4>Output:</h4></br> {% if error_message %} diff --git a/yaksh/test.txt b/yaksh/test.txt new file mode 100644 index 0000000..0cfbf08 --- /dev/null +++ b/yaksh/test.txt @@ -0,0 +1 @@ +2 diff --git a/yaksh/views.py b/yaksh/views.py index 56746b0..e1ec44e 100644 --- a/yaksh/views.py +++ b/yaksh/views.py @@ -22,14 +22,15 @@ import pytz from taggit.models import Tag from itertools import chain import json + # Local imports. -from yaksh.models import get_model_class, Quiz, Question, QuestionPaper,\ - QuestionSet, Course, Profile, Answer, AnswerPaper, User, TestCase,\ - has_profile +from yaksh.models import get_model_class, Quiz, Question, QuestionPaper, QuestionSet, Course +from yaksh.models import Profile, Answer, AnswerPaper, User, TestCase, FileUpload,\ + has_profile from yaksh.forms import UserRegisterForm, UserLoginForm, QuizForm,\ - QuestionForm, RandomQuestionForm,\ - QuestionFilterForm, CourseForm, ProfileForm, UploadFileForm,\ - get_object_form + QuestionForm, RandomQuestionForm,\ + QuestionFilterForm, CourseForm, ProfileForm, UploadFileForm,\ + get_object_form, FileForm from yaksh.xmlrpc_clients import code_server from settings import URL_ROOT from yaksh.models import AssignmentUpload @@ -151,19 +152,27 @@ def add_question(request): if request.method == "POST" and 'save_question' in request.POST: question_form = QuestionForm(request.POST) + form = FileForm(request.POST, request.FILES) if question_form.is_valid(): new_question = question_form.save(commit=False) new_question.user = user new_question.save() + files = request.FILES.getlist('file_field') + if files: + for file in files: + FileUpload.objects.get_or_create(question=new_question, file=file) return my_redirect("/exam/manage/addquestion/{0}".format(new_question.id)) else: return my_render_to_response('yaksh/add_question.html', - {'form': question_form}, + {'form': question_form, + 'upload_form': form}, context_instance=ci) else: question_form = QuestionForm() + form = FileForm() return my_render_to_response('yaksh/add_question.html', - {'form': question_form}, + {'form': question_form, + 'upload_form': form}, context_instance=ci) @login_required @@ -176,9 +185,24 @@ def edit_question(request, question_id=None): raise Http404('No Question Found') question_instance = Question.objects.get(id=question_id) - + if request.method == "POST" and 'delete_files' in request.POST: + remove_files_id = request.POST.getlist('clear') + if remove_files_id: + files = FileUpload.objects.filter(id__in=remove_files_id) + for file in files: + file.remove() if request.method == "POST" and 'save_question' in request.POST: question_form = QuestionForm(request.POST, instance=question_instance) + form = FileForm(request.POST, request.FILES) + files = request.FILES.getlist('file_field') + extract_files_id = request.POST.getlist('extract') + if files: + for file in files: + FileUpload.objects.get_or_create(question=question_instance, file=file) + if extract_files_id: + files = FileUpload.objects.filter(id__in=extract_files_id) + for file in files: + file.set_extract_status() if question_form.is_valid(): new_question = question_form.save(commit=False) test_case_type = question_form.cleaned_data.get('test_case_type') @@ -196,23 +220,29 @@ def edit_question(request, question_id=None): test_case_model_class = get_model_class(test_case_type) TestCaseInlineFormSet = inlineformset_factory(Question, test_case_model_class, form=test_case_form_class, extra=1) test_case_formset = TestCaseInlineFormSet(request.POST, request.FILES, instance=question_instance) + uploaded_files = FileUpload.objects.filter(question_id=question_instance.id) return my_render_to_response('yaksh/add_question.html', {'form': question_form, 'test_case_formset': test_case_formset, - 'question_id': question_id}, + 'question_id': question_id, + 'upload_form': form, + 'uploaded_files': uploaded_files}, context_instance=ci) else: question_form = QuestionForm(instance=question_instance) + form = FileForm() test_case_type = question_instance.test_case_type test_case_form_class = get_object_form(model=test_case_type, exclude_fields=['question']) test_case_model_class = get_model_class(test_case_type) TestCaseInlineFormSet = inlineformset_factory(Question, test_case_model_class, form=test_case_form_class, extra=1) test_case_formset = TestCaseInlineFormSet(instance=question_instance) - + uploaded_files = FileUpload.objects.filter(question_id=question_instance.id) return my_render_to_response('yaksh/add_question.html', {'form': question_form, 'test_case_formset': test_case_formset, - 'question_id': question_id}, + 'question_id': question_id, + 'upload_form': form, + 'uploaded_files': uploaded_files}, context_instance=ci) @login_required @@ -405,18 +435,14 @@ def show_question(request, question, paper, error_message=None): reason='Your time is up!' return complete(request, reason, paper.attempt_number, paper.question_paper.id) test_cases = question.get_test_cases() - context = {'question': question, - 'paper': paper, - 'error_message': error_message, - 'test_cases': test_cases, - 'last_attempt': question.snippet.encode('unicode-escape') - } - + files = FileUpload.objects.filter(question_id=question.id) + context = {'question': question, 'paper': paper, 'error_message': error_message, + 'test_cases': test_cases, 'files': files, + 'last_attempt': question.snippet.encode('unicode-escape')} answers = paper.get_previous_answers(question) if answers: last_attempt = answers[0].answer context['last_attempt'] = last_attempt.encode('unicode-escape') - # context['last_attempt'] = answers[0].answer.encode('unicode-escape') ci = RequestContext(request) return my_render_to_response('yaksh/question.html', context, context_instance=ci) @@ -832,7 +858,12 @@ def show_all_questions(request): if request.POST.get('delete') == 'delete': data = request.POST.getlist('question') if data is not None: - question = Question.objects.filter(id__in=data, user_id=user.id).delete() + questions = Question.objects.filter(id__in=data, user_id=user.id) + files = FileUpload.objects.filter(question_id__in=questions) + if files: + for file in files: + file.remove() + questions.delete() if request.POST.get('upload') == 'upload': form = UploadFileForm(request.POST, request.FILES) |