diff options
author | adityacp | 2017-01-04 11:42:32 +0530 |
---|---|---|
committer | adityacp | 2017-01-04 11:42:32 +0530 |
commit | 58481cdcba38e8e602b8e2d3e5172f9e653f93a9 (patch) | |
tree | 21312113c078b6b22f24d5bab052a52bd185a3c9 | |
parent | b271e3b33f673c70114893bf461d2a6116dd7cf7 (diff) | |
parent | ef6a61b1938ec399efb6d66b914f245afa3ed5ff (diff) | |
download | online_test-58481cdcba38e8e602b8e2d3e5172f9e653f93a9.tar.gz online_test-58481cdcba38e8e602b8e2d3e5172f9e653f93a9.tar.bz2 online_test-58481cdcba38e8e602b8e2d3e5172f9e653f93a9.zip |
Merge https://github.com/fossee/online_test into fix_management_commands
62 files changed, 488 insertions, 1142 deletions
diff --git a/Quiz_instructions.txt b/Quiz_instructions.txt deleted file mode 100644 index 34e010f..0000000 --- a/Quiz_instructions.txt +++ /dev/null @@ -1,18 +0,0 @@ -<p> -This examination system has been developed with the intention of making you -learn programming and be assessed in an interactive and fun manner. -You will be presented with a series of programming questions and problems that -you will answer online and get immediate feedback for. -</p> -<p> Here are some important instructions and rules that you should understand carefully.</p> -<ul> -<li>For any programming questions, you can submit solutions as many times as you want without a penalty. You may skip questions and solve them later. -</li> -<li> You <strong>may</strong> use your computer's Python/IPython shell or an editor to solve the problem and cut/paste the solution to the web interface. -</li> -<li> <strong>You are <strong>not allowed</strong> to use any internet resources, i.e. no google etc.</strong> </li> -<li> Do not copy or share the questions or answers with anyone until the exam is complete <strong>for everyone</strong>.</li> -<li> <strong>All</strong> your attempts at the questions are logged. Do not try to outsmart and break the testing system. If you do, we know who you are and we will expel you from the course. You have been warned. -</li> -</ul> -<p> We hope you enjoy taking this exam !!!</p> diff --git a/yaksh/base_evaluator.py b/yaksh/base_evaluator.py index ce1647f..071008f 100644 --- a/yaksh/base_evaluator.py +++ b/yaksh/base_evaluator.py @@ -62,15 +62,6 @@ class BaseEvaluator(object): submit_f.write(user_answer.lstrip()) submit_f.close() - def _set_test_code_file_path(self, ref_path=None, test_case_path=None): - if ref_path and not ref_path.startswith('/'): - ref_path = join(MY_DIR, ref_path) - - if test_case_path and not test_case_path.startswith('/'): - test_case_path = join(MY_DIR, test_case_path) - - return ref_path, test_case_path - def _set_file_as_executable(self, fname): os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py index 975af82..a4f1389 100644 --- a/yaksh/bash_code_evaluator.py +++ b/yaksh/bash_code_evaluator.py @@ -17,6 +17,9 @@ class BashCodeEvaluator(BaseEvaluator): # Private Protocol ########## def __init__(self, metadata, test_case_data): self.files = [] + self.submit_code_path = "" + self.test_code_path = "" + self.tc_args_path= "" # Set metadata values self.user_answer = metadata.get('user_answer') @@ -25,11 +28,18 @@ class BashCodeEvaluator(BaseEvaluator): # Set test case data values self.test_case = test_case_data.get('test_case') + self.test_case_args = test_case_data.get('test_case_args') + self.weight = test_case_data.get('weight') def teardown(self): # Delete the created file. - os.remove(self.submit_code_path) + if os.path.exists(self.submit_code_path): + os.remove(self.submit_code_path) + if os.path.exists(self.test_code_path): + os.remove(self.test_code_path) + if os.path.exists(self.tc_args_path): + os.remove(self.tc_args_path) if self.files: delete_files(self.files) @@ -58,18 +68,20 @@ class BashCodeEvaluator(BaseEvaluator): Returns (False, error_msg, 0.0): If mandatory arguments are not files or if the required permissions are not given to the file(s). """ - ref_code_path = self.test_case success = False mark_fraction = 0.0 - self.submit_code_path = self.create_submit_code_file('submit.sh') self._set_file_as_executable(self.submit_code_path) - - get_ref_path, get_test_case_path = ref_code_path.strip().split(',') - get_ref_path = get_ref_path.strip() - get_test_case_path = get_test_case_path.strip() - clean_ref_code_path, clean_test_case_path = \ - self._set_test_code_file_path(get_ref_path, get_test_case_path) + self.test_code_path = self.create_submit_code_file('main.sh') + self._set_file_as_executable(self.test_code_path) + if self.test_case_args: + self.tc_args_path = self.create_submit_code_file('main.args') + self.write_to_submit_code_file(self.tc_args_path, self.test_case_args) + self.user_answer = self.user_answer.replace("\r", "") + self.test_case = self.test_case.replace("\r", "") + self.write_to_submit_code_file(self.submit_code_path, self.user_answer) + self.write_to_submit_code_file(self.test_code_path, self.test_case) + clean_ref_code_path, clean_test_case_path = self.test_code_path, self.tc_args_path if self.file_paths: self.files = copy_files(self.file_paths) @@ -86,10 +98,7 @@ class BashCodeEvaluator(BaseEvaluator): msg = "Script %s is not executable" % self.submit_code_path return False, msg, 0.0 - self.user_answer = self.user_answer.replace("\r", "") - self.write_to_submit_code_file(self.submit_code_path, self.user_answer) - - if clean_test_case_path is None or "": + if not clean_test_case_path: ret = self._run_command(clean_ref_code_path, stdin=None, stdout=subprocess.PIPE, @@ -103,11 +112,11 @@ class BashCodeEvaluator(BaseEvaluator): ) proc, stdnt_stdout, stdnt_stderr = ret if inst_stdout == stdnt_stdout: - mark_fraction = float(self.weight) if self.partial_grading else 0.0 + mark_fraction = 1.0 if self.partial_grading else 0.0 return True, None, mark_fraction else: - err = "Error: expected %s, got %s" % (inst_stderr, - stdnt_stderr + err = "Error: expected %s, got %s" % (inst_stdout + inst_stderr, + stdnt_stdout + stdnt_stderr ) return False, err, 0.0 else: @@ -146,7 +155,7 @@ class BashCodeEvaluator(BaseEvaluator): proc, stdnt_stdout, stdnt_stderr = ret valid_answer = inst_stdout == stdnt_stdout if valid_answer and (num_lines == loop_count): - mark_fraction = float(self.weight) if self.partial_grading else 0.0 + mark_fraction = 1.0 if self.partial_grading else 0.0 return True, None, mark_fraction else: err = ("Error:expected" diff --git a/yaksh/bash_files/sample.args b/yaksh/bash_files/sample.args deleted file mode 100644 index 4d9f00d..0000000 --- a/yaksh/bash_files/sample.args +++ /dev/null @@ -1,2 +0,0 @@ -1 2 -2 1 diff --git a/yaksh/bash_files/sample.sh b/yaksh/bash_files/sample.sh deleted file mode 100755 index e935cb3..0000000 --- a/yaksh/bash_files/sample.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 )) diff --git a/yaksh/bash_files/sample1.args b/yaksh/bash_files/sample1.args deleted file mode 100644 index 541cb64..0000000 --- a/yaksh/bash_files/sample1.args +++ /dev/null @@ -1 +0,0 @@ -test.txt
\ No newline at end of file diff --git a/yaksh/bash_files/sample1.sh b/yaksh/bash_files/sample1.sh deleted file mode 100755 index 965874b..0000000 --- a/yaksh/bash_files/sample1.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -cat $1 diff --git a/yaksh/bash_files/sample2.args b/yaksh/bash_files/sample2.args deleted file mode 100755 index cf4499d..0000000 --- a/yaksh/bash_files/sample2.args +++ /dev/null @@ -1 +0,0 @@ -file1.csv file2.csv file3.csv diff --git a/yaksh/bash_files/sample2.sh b/yaksh/bash_files/sample2.sh deleted file mode 100755 index 5dc55b8..0000000 --- a/yaksh/bash_files/sample2.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/bash -cat $1 | cut -d: -f2 | paste -d: $3 - $2 diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py index 50ee0d6..334620d 100644 --- a/yaksh/bash_stdio_evaluator.py +++ b/yaksh/bash_stdio_evaluator.py @@ -55,5 +55,5 @@ class BashStdIOEvaluator(StdIOEvaluator): self.expected_input, self.expected_output ) - mark_fraction = float(self.weight) if self.partial_grading and success else 0.0 + mark_fraction = 1.0 if self.partial_grading and success else 0.0 return success, err, mark_fraction diff --git a/yaksh/c_cpp_files/file_data.c b/yaksh/c_cpp_files/file_data.c deleted file mode 100644 index 1c0ab12..0000000 --- a/yaksh/c_cpp_files/file_data.c +++ /dev/null @@ -1,25 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int ans(); - -template <class T> -void check(T expect,T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (0); - } -} - -int main(void) -{ - int result; - result = ans(); - check(50, result); -} diff --git a/yaksh/c_cpp_files/main.cpp b/yaksh/c_cpp_files/main.cpp deleted file mode 100755 index ebe1f08..0000000 --- a/yaksh/c_cpp_files/main.cpp +++ /dev/null @@ -1,32 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int add(int, int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - result = add(0,0); - printf("Input submitted to the function: 0, 0"); - check(0, result); - result = add(2,3); - printf("Input submitted to the function: 2 3"); - check(5,result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main2.c b/yaksh/c_cpp_files/main2.c deleted file mode 100755 index a62195f..0000000 --- a/yaksh/c_cpp_files/main2.c +++ /dev/null @@ -1,30 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int add(int, int, int); - -template <class T> -void check(T expect,T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - result = add(0,0,0); - printf("Input submitted to the function: 0, 0, 0"); - check(0, result); - result = add(2,3,3); - printf("Input submitted to the function: 2, 3, 3"); - check(8,result); - printf("All Correct\n"); -} diff --git a/yaksh/c_cpp_files/main_array_check.cpp b/yaksh/c_cpp_files/main_array_check.cpp deleted file mode 100755 index ea34fdd..0000000 --- a/yaksh/c_cpp_files/main_array_check.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern bool array_check(int [], int); - -template <class T> - -void check(T expect,T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - bool result; - int a[] = {1,2,3,0,0}; - result = array_check(a, 2); - printf("Input submitted to the function: {1, 2, 3, 0, 0} and index 2"); - check(false, result); - int b[] = {1,2,3,4,5}; - result = array_check(b, 3); - printf("Input submitted to the function: {1, 2, 3, 4, 5} and index 3"); - check(true, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_array_check_all.cpp b/yaksh/c_cpp_files/main_array_check_all.cpp deleted file mode 100755 index 140578e..0000000 --- a/yaksh/c_cpp_files/main_array_check_all.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern bool array_check_all(int []); - -template <class T> - -void check(T expect,T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - bool result; - int a[] = {1,2,3,2,8}; - result = array_check_all(a); - printf("Input submitted to the function: {1, 2, 3, 2, 8}"); - check(false, result); - int b[] = {4,2,32,4,56}; - result = array_check_all(b); - printf("Input submitted to the function: {4, 2, 32, 4, 56}"); - check(true, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_array_sum.cpp b/yaksh/c_cpp_files/main_array_sum.cpp deleted file mode 100755 index 55b2ebf..0000000 --- a/yaksh/c_cpp_files/main_array_sum.cpp +++ /dev/null @@ -1,34 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int array_sum(int []); - -template <class T> - -void check(T expect,T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - int a[] = {1,2,3,0,0}; - result = array_sum(a); - printf("Input submitted to the function: {1, 2, 3, 0, 0}"); - check(6, result); - int b[] = {1,2,3,4,5}; - result = array_sum(b); - printf("Input submitted to the function: {1, 2, 3, 4, 5}"); - check(15,result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_blackJack.cpp b/yaksh/c_cpp_files/main_blackJack.cpp deleted file mode 100755 index cc54e78..0000000 --- a/yaksh/c_cpp_files/main_blackJack.cpp +++ /dev/null @@ -1,41 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int blackJack(int, int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - result = blackJack(11, 12); - printf("Input submitted to the function: 11, 12"); - check(12, result); - result = blackJack(15, 19); - printf("Input submitted to the function: 15, 19"); - check(19, result); - result = blackJack(10, 21); - printf("Input submitted to the function: 10, 21"); - check(21, result); - result = blackJack(31, 22); - printf("Input submitted to the function: 31, 22"); - check(0, result); - result = blackJack(91, 61); - printf("Input submitted to the function: 91, 61"); - check(0, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_check_digit.cpp b/yaksh/c_cpp_files/main_check_digit.cpp deleted file mode 100755 index d3bf3d6..0000000 --- a/yaksh/c_cpp_files/main_check_digit.cpp +++ /dev/null @@ -1,32 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern bool check_digit(int, int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - bool result; - result = check_digit(12, 23); - printf("Input submitted to the function: 12, 23"); - check(true, result); - result = check_digit(22, 11); - printf("Input submitted to the function: 121"); - check(false, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_count667.cpp b/yaksh/c_cpp_files/main_count667.cpp deleted file mode 100755 index f146e8c..0000000 --- a/yaksh/c_cpp_files/main_count667.cpp +++ /dev/null @@ -1,42 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int count667(int[]); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - int arr[5] = {2,6,4,5,6}; - result = count667(arr); - printf("Input submitted to the function: [2, 6, 4, 5,6]"); - check(0, result); - int arr2[5] = {6,6,2,17,9}; - result = count667(arr2); - printf("Input submitted to the function: [6, 6, 2, 17, 9]"); - check(1, result); - int arr3[5] = {6,6,6,7,1}; - result = count667(arr3); - printf("Input submitted to the function: [6, 6, 7, 2, 1]"); - check(3, result); - int arr4[5] = {6,7,7,6,6}; - result = count667(arr4); - printf("Input submitted to the function: [6, 7, 7, 6, 6]"); - check(2, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_count7.cpp b/yaksh/c_cpp_files/main_count7.cpp deleted file mode 100755 index 982e930..0000000 --- a/yaksh/c_cpp_files/main_count7.cpp +++ /dev/null @@ -1,42 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int count7(int[]); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - int arr[4] = {2,3,4,5}; - result = count7(arr); - printf("Input submitted to the function: [2, 3, 4, 5]"); - check(0, result); - int arr2[4] = {1,2,17,9}; - result = count7(arr2); - printf("Input submitted to the function: [1, 2, 17, 9]"); - check(0, result); - int arr3[4] = {7,9,2,1}; - result = count7(arr3); - printf("Input submitted to the function: [7, 9, 2, 1]"); - check(1, result); - int arr4[4] = {1,7,7,7}; - result = count7(arr4); - printf("Input submitted to the function: [1, 7, 7, 7]"); - check(3, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_fact.cpp b/yaksh/c_cpp_files/main_fact.cpp deleted file mode 100755 index a4ff230..0000000 --- a/yaksh/c_cpp_files/main_fact.cpp +++ /dev/null @@ -1,32 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int factorial(int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - result = factorial(0); - printf("Input submitted to the function: 0"); - check(1, result); - result = factorial(3); - printf("Input submitted to the function: 3"); - check(6, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_greatest.cpp b/yaksh/c_cpp_files/main_greatest.cpp deleted file mode 100755 index 6d0a7c2..0000000 --- a/yaksh/c_cpp_files/main_greatest.cpp +++ /dev/null @@ -1,44 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int greatest(int, int, int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - result = greatest(1, 2, 3); - printf("Input submitted to the function: 1, 2, 3"); - check(3, result); - result = greatest(5, 9, 2); - printf("Input submitted to the function: 5, 9, 2"); - check(9, result); - result = greatest(7, 2, 4); - printf("Input submitted to the function: 7, 2, 4"); - check(7, result); - result = greatest(11, 2, 45); - printf("Input submitted to the function: 11, 2, 45"); - check(45, result); - result = greatest(2, 7, 0); - printf("Input submitted to the function: 2, 7, 0"); - check(7, result); - result = greatest(9, 6, 5); - printf("Input submitted to the function: 9, 6, 5"); - check(9, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_hello_name.c b/yaksh/c_cpp_files/main_hello_name.c deleted file mode 100755 index 71b83a2..0000000 --- a/yaksh/c_cpp_files/main_hello_name.c +++ /dev/null @@ -1,29 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - - -void check(char expect[], char result[]) -{ - if (expect == result) - { - printf("Correct:expected %s got %s \n",expect,result); - } - else - { - printf("ERROR:expected %s got %s \n",expect,result); - exit (0); - } -} - -int main(void) -{ - char result[20]; - char A[20]=" pratham"; - char B[20]=" sir"; - result[20] = message(A); - printf("%s",result); - check("hello pratham", result); - result[20] = message(B); - check("hello sir",result); - printf("All Correct\n"); -} diff --git a/yaksh/c_cpp_files/main_lessThan9.cpp b/yaksh/c_cpp_files/main_lessThan9.cpp deleted file mode 100755 index 722b4bb..0000000 --- a/yaksh/c_cpp_files/main_lessThan9.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern bool lessThan9(int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - bool result; - result = lessThan9(10); - printf("Input submitted to the function: 10"); - check(false, result); - result = lessThan9(17); - printf("Input submitted to the function: 17"); - check(true, result); - result = lessThan9(16); - printf("Input submitted to the function: 16"); - check(true, result); - result = lessThan9(15); - printf("Input submitted to the function: 15"); - check(false, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_mean.cpp b/yaksh/c_cpp_files/main_mean.cpp deleted file mode 100755 index 21a4b1a..0000000 --- a/yaksh/c_cpp_files/main_mean.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern bool mean(int, int , int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - bool result; - result = mean(11, 11, 11); - printf("Input submitted to the function: 11, 121, 11"); - check(true, result); - result = mean(16, 12, 9); - printf("Input submitted to the function: 16, 144, 9"); - check(true, result); - result = mean(19, 221, 9); - printf("Input submitted to the function: 19, 221, 9"); - check(false, result); - result = mean(34, 12, 3); - printf("Input submitted to the function: 11, 121, 11"); - check(false, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_palindrome.cpp b/yaksh/c_cpp_files/main_palindrome.cpp deleted file mode 100755 index 0e66928..0000000 --- a/yaksh/c_cpp_files/main_palindrome.cpp +++ /dev/null @@ -1,32 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern bool palindrome(int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - bool result; - result = palindrome(123); - printf("Input submitted to the function: 123"); - check(false, result); - result = palindrome(121); - printf("Input submitted to the function: 121"); - check(true, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_roundTo10.cpp b/yaksh/c_cpp_files/main_roundTo10.cpp deleted file mode 100755 index 12c961d..0000000 --- a/yaksh/c_cpp_files/main_roundTo10.cpp +++ /dev/null @@ -1,41 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int roundTo10(int,int,int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - result = roundTo10(10, 22, 39); - printf("Input submitted to the function: 10, 22, 39"); - check(70, result); - result = roundTo10(45, 42, 39); - printf("Input submitted to the function: 45, 42, 39"); - check(130, result); - result = roundTo10(7, 3, 9); - printf("Input submitted to the function: 7, 3, 9"); - check(20, result); - result = roundTo10(1, 2, 3); - printf("Input submitted to the function: 1, 2, 3"); - check(0, result); - result = roundTo10(30, 40, 50); - printf("Input submitted to the function: 30, 40, 50"); - check(120, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_specialSum.cpp b/yaksh/c_cpp_files/main_specialSum.cpp deleted file mode 100755 index d614536..0000000 --- a/yaksh/c_cpp_files/main_specialSum.cpp +++ /dev/null @@ -1,41 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern int specialSum(int,int,int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - int result; - result = specialSum(10, 2, 9); - printf("Input submitted to the function: 10, 2, 9"); - check(21, result); - result = specialSum(1, 21, 9); - printf("Input submitted to the function: 1, 21, 9"); - check(1, result); - result = specialSum(21, 2, 3); - printf("Input submitted to the function: 21, 2, 3"); - check(0, result); - result = specialSum(10, 2, 21); - printf("Input submitted to the function: 10, 2, 21"); - check(12, result); - result = specialSum(10, 2, 6); - printf("Input submitted to the function: 10, 2, 6"); - check(18, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/c_cpp_files/main_within.cpp b/yaksh/c_cpp_files/main_within.cpp deleted file mode 100755 index 50f9ad0..0000000 --- a/yaksh/c_cpp_files/main_within.cpp +++ /dev/null @@ -1,38 +0,0 @@ -#include <stdio.h> -#include <stdlib.h> - -extern bool within(int, int, int); - -template <class T> - -void check(T expect, T result) -{ - if (expect == result) - { - printf("\nCorrect:\n Expected %d got %d \n",expect,result); - } - else - { - printf("\nIncorrect:\n Expected %d got %d \n",expect,result); - exit (1); - } -} - -int main(void) -{ - bool result; - result = within(12, 3, 20); - printf("Input submitted to the function: 12, 3, 20"); - check(true, result); - result = within(12, 13, 20); - printf("Input submitted to the function: 12, 13, 20"); - check(false, result); - result = within(29, 13, 120); - printf("Input submitted to the function: 29, 13, 120"); - check(true, result); - result = within(12, 12, 20); - printf("Input submitted to the function: 12, 3, 20"); - check(false, result); - printf("All Correct\n"); - return 0; -} diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py index 91ba703..4c8e938 100644 --- a/yaksh/cpp_code_evaluator.py +++ b/yaksh/cpp_code_evaluator.py @@ -15,11 +15,12 @@ class CppCodeEvaluator(BaseEvaluator): """Tests the C code obtained from Code Server""" def __init__(self, metadata, test_case_data): self.files = [] - self.submit_code_path = '' self.compiled_user_answer = None self.compiled_test_code = None self.user_output_path = "" self.ref_output_path = "" + self.submit_code_path = "" + self.test_code_path = "" # Set metadata values self.user_answer = metadata.get('user_answer') @@ -32,11 +33,14 @@ class CppCodeEvaluator(BaseEvaluator): def teardown(self): # Delete the created file. - os.remove(self.submit_code_path) + if os.path.exists(self.submit_code_path): + os.remove(self.submit_code_path) if os.path.exists(self.ref_output_path): os.remove(self.ref_output_path) if os.path.exists(self.user_output_path): os.remove(self.user_output_path) + if os.path.exists(self.test_code_path): + os.remove(self.test_code_path) if self.files: delete_files(self.files) @@ -59,10 +63,11 @@ class CppCodeEvaluator(BaseEvaluator): if self.compiled_user_answer and self.compiled_test_code: return None else: - ref_code_path = self.test_case - clean_ref_code_path, clean_test_case_path = \ - self._set_test_code_file_path(ref_code_path) self.submit_code_path = self.create_submit_code_file('submit.c') + self.test_code_path = self.create_submit_code_file('main.c') + self.write_to_submit_code_file(self.submit_code_path, self.user_answer) + self.write_to_submit_code_file(self.test_code_path, self.test_case) + clean_ref_code_path = self.test_code_path if self.file_paths: self.files = copy_files(self.file_paths) if not isfile(clean_ref_code_path): @@ -72,7 +77,6 @@ class CppCodeEvaluator(BaseEvaluator): msg = "No file at %s or Incorrect path" % self.submit_code_path return False, msg - self.write_to_submit_code_file(self.submit_code_path, self.user_answer) self.user_output_path, self.ref_output_path = self.set_file_paths() self.compile_command, self.compile_main = self.get_commands( clean_ref_code_path, @@ -135,7 +139,7 @@ class CppCodeEvaluator(BaseEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, None - mark_fraction = float(self.weight) if self.partial_grading else 0.0 + mark_fraction = 1.0 if self.partial_grading else 0.0 else: err = "{0} \n {1}".format(stdout, stderr) else: diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py index c318a82..b302fa4 100644 --- a/yaksh/cpp_stdio_evaluator.py +++ b/yaksh/cpp_stdio_evaluator.py @@ -13,7 +13,6 @@ class CppStdIOEvaluator(StdIOEvaluator): """Evaluates C StdIO based code""" def __init__(self, metadata, test_case_data): self.files = [] - self.submit_code_path = self.create_submit_code_file('submit.c') # Set metadata values self.user_answer = metadata.get('user_answer') @@ -43,6 +42,7 @@ class CppStdIOEvaluator(StdIOEvaluator): return compile_command, compile_main def compile_code(self): + self.submit_code_path = self.create_submit_code_file('submit.c') if self.file_paths: self.files = copy_files(file_paths) if not isfile(self.submit_code_path): @@ -112,5 +112,5 @@ class CppStdIOEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - mark_fraction = float(self.weight) if self.partial_grading and success else 0.0 + mark_fraction = 1.0 if self.partial_grading and success else 0.0 return success, err, mark_fraction diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py index abadf26..4b551d7 100644 --- a/yaksh/evaluator_tests/test_bash_evaluation.py +++ b/yaksh/evaluator_tests/test_bash_evaluation.py @@ -13,10 +13,17 @@ from textwrap import dedent class BashAssertionEvaluationTestCases(EvaluatorBaseTest): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) + self.tc_data = dedent(""" + #!/bin/bash + [[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 )) + """) + self.tc_data_args = "1 2\n2 1" self.test_case_data = [ - {"test_case": "bash_files/sample.sh,bash_files/sample.args", + {"test_case": self.tc_data, + "test_case_args": self.tc_data_args, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -28,10 +35,11 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): self.file_paths = None def tearDown(self): - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]" " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))" ) @@ -45,12 +53,15 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_error(self): + # Given user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] " "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))") kwargs = { @@ -63,13 +74,16 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output("Error", result.get("error")) def test_infinite_loop(self): + # Given user_answer = ("#!/bin/bash\nwhile [ 1 ] ;" " do echo "" > /dev/null ; done") kwargs = { @@ -82,16 +96,25 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output(self.timeout_msg, result.get("error")) def test_file_based_assert(self): - self.file_paths = [('/tmp/test.txt', False)] + # Given + self.file_paths = [(self.f_path, False)] + self.tc_data = dedent(""" + #!/bin/bash + cat $1 + """) + self.tc_data_args = "test.txt" self.test_case_data = [ - {"test_case": "bash_files/sample1.sh,bash_files/sample1.args", + {"test_case": self.tc_data, + "test_case_args": self.tc_data_args, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -107,9 +130,11 @@ class BashAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get("success")) class BashStdIOEvaluationTestCases(EvaluatorBaseTest): @@ -122,6 +147,7 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): def test_correct_answer(self): + # Given user_answer = dedent(""" #!/bin/bash read A read B @@ -143,12 +169,15 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_array_input(self): + # Given user_answer = dedent(""" readarray arr; COUNTER=0 while [ $COUNTER -lt 3 ]; do @@ -172,12 +201,15 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = dedent(""" #!/bin/bash read A read B @@ -199,12 +231,16 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + + # Then self.assert_correct_output("Incorrect", result.get('error')) self.assertFalse(result.get('success')) def test_stdout_only(self): + # Given user_answer = dedent(""" #!/bin/bash A=6 B=4 @@ -226,8 +262,11 @@ class BashStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + + # Then self.assertTrue(result.get('success')) if __name__ == '__main__': diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py index ec59a6b..d734cf2 100644 --- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py +++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py @@ -15,10 +15,45 @@ from yaksh.settings import SERVER_TIMEOUT class CAssertionEvaluationTestCases(EvaluatorBaseTest): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() - self.test_case_data = [{"test_case": "c_cpp_files/main.cpp", + self.tc_data = dedent(""" + #include <stdio.h> + #include <stdlib.h> + + extern int add(int, int); + + template <class T> + + void check(T expect, T result) + { + if (expect == result) + { + printf("Correct: Expected %d got %d ",expect,result); + } + else + { + printf("Incorrect: Expected %d got %d ",expect,result); + exit (1); + } + } + + int main(void) + { + int result; + result = add(0,0); + printf("Input submitted to the function: 0, 0"); + check(0, result); + result = add(2,3); + printf("Input submitted to the function: 2 3"); + check(5,result); + printf("All Correct"); + return 0; + } + """) + self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 }] @@ -29,10 +64,11 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): self.file_paths = None def tearDown(self): - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = "int add(int a, int b)\n{return a+b;}" kwargs = { 'metadata': { @@ -44,12 +80,15 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = "int add(int a, int b)\n{return a-b;}" kwargs = { 'metadata': { @@ -61,15 +100,18 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then lines_of_error = len(result.get('error')[0].splitlines()) self.assertFalse(result.get('success')) self.assert_correct_output("Incorrect:", result.get('error')) self.assertTrue(lines_of_error > 1) def test_compilation_error(self): + # Given user_answer = "int add(int a, int b)\n{return a+b}" kwargs = { 'metadata': { @@ -81,13 +123,16 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output("Compilation Error", result.get("error")) def test_infinite_loop(self): + # Given user_answer = "int add(int a, int b)\n{while(1>0){}}" kwargs = { 'metadata': { @@ -99,15 +144,45 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output(self.timeout_msg, result.get("error")) def test_file_based_assert(self): - self.file_paths = [('/tmp/test.txt', False)] - self.test_case_data = [{"test_case": "c_cpp_files/file_data.c", + # Given + self.file_paths = [(self.f_path, False)] + self.tc_data = dedent(""" + #include <stdio.h> + #include <stdlib.h> + + extern int ans(); + + template <class T> + void check(T expect,T result) + { + if (expect == result) + { + printf("Correct: Expected %d got %d ",expect,result); + } + else + { + printf("Incorrect: Expected %d got %d ",expect,result); + exit (0); + } + } + + int main(void) + { + int result; + result = ans(); + check(50, result); + } + """) + self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 }] @@ -133,11 +208,14 @@ class CAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) + class CppStdIOEvaluationTestCases(EvaluatorBaseTest): def setUp(self): self.test_case_data = [{'expected_output': '11', @@ -151,7 +229,11 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): " your code.").format(SERVER_TIMEOUT) self.file_paths = None + def tearDown(self): + shutil.rmtree(self.in_dir) + def test_correct_answer(self): + # Given user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -169,12 +251,15 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_array_input(self): + # Given self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', 'weight': 0.0, @@ -199,12 +284,15 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_string_input(self): + # Given self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', 'weight': 0.0, @@ -227,12 +315,15 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -249,15 +340,18 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then lines_of_error = len(result.get('error')[0].splitlines()) self.assertFalse(result.get('success')) self.assert_correct_output("Incorrect", result.get('error')) self.assertTrue(lines_of_error > 1) def test_error(self): + # Given user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -274,13 +368,16 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output("Compilation Error", result.get("error")) def test_infinite_loop(self): + # Given user_answer = dedent(""" #include<stdio.h> int main(void){ @@ -297,13 +394,16 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output(self.timeout_msg, result.get("error")) def test_only_stdout(self): + # Given self.test_case_data = [{'expected_output': '11', 'expected_input': '', 'weight': 0.0, @@ -325,12 +425,15 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_cpp_correct_answer(self): + # Given user_answer = dedent(""" #include<iostream> using namespace std; @@ -349,12 +452,15 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_cpp_array_input(self): + # Given self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', 'weight': 0.0, @@ -380,12 +486,15 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_cpp_string_input(self): + # Given self.test_case_data = [{'expected_output': 'abc', 'expected_input': 'abc', 'weight': 0.0, @@ -409,12 +518,15 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_cpp_incorrect_answer(self): + # Given user_answer = dedent(""" #include<iostream> using namespace std; @@ -432,15 +544,18 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then lines_of_error = len(result.get('error')[0].splitlines()) self.assertFalse(result.get('success')) self.assert_correct_output("Incorrect", result.get('error')) self.assertTrue(lines_of_error > 1) def test_cpp_error(self): + # Given user_answer = dedent(""" #include<iostream> using namespace std; @@ -458,13 +573,16 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output("Compilation Error", result.get("error")) def test_cpp_infinite_loop(self): + # Given user_answer = dedent(""" #include<iostream> using namespace std; @@ -482,13 +600,16 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output(self.timeout_msg, result.get("error")) def test_cpp_only_stdout(self): + # Given self.test_case_data = [{'expected_output': '11', 'expected_input': '', 'weight': 0.0, @@ -511,9 +632,11 @@ class CppStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) if __name__ == '__main__': diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py index bfba38f..b53d8aa 100644 --- a/yaksh/evaluator_tests/test_java_evaluation.py +++ b/yaksh/evaluator_tests/test_java_evaluation.py @@ -15,11 +15,47 @@ from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() + self.tc_data = dedent(""" + class main + { + public static <E> void check(E expect, E result) + { + if(result.equals(expect)) + { + System.out.println("Correct:Output expected "+expect+" and got "+result); + } + else + { + System.out.println("Incorrect:Output expected "+expect+" but got "+result); + System.exit(1); + } + } + public static void main(String arg[]) + { + Test t = new Test(); + int result, input, output; + input = 0; output = 0; + result = t.square_num(input); + System.out.println("Input submitted to the function: "+input); + check(output, result); + input = 5; output = 25; + result = t.square_num(input); + System.out.println("Input submitted to the function: "+input); + check(output, result); + input = 6; output = 36; + result = t.square_num(input); + System.out.println("Input submitted to the function: "+input); + check(output, result); + } + } + """) + self.test_case_data = [ - {"test_case": "java_files/main_square.java", + {"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -34,10 +70,11 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): def tearDown(self): gd.SERVER_TIMEOUT = 4 - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}" kwargs = { 'metadata': { @@ -49,12 +86,15 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a;\n\t}\n}" kwargs = { 'metadata': { @@ -66,9 +106,11 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get('success')) lines_of_error = len(result.get('error')[0].splitlines()) self.assertFalse(result.get('success')) @@ -76,6 +118,7 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): self.assertTrue(lines_of_error > 1) def test_error(self): + # Given user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a" kwargs = { 'metadata': { @@ -87,13 +130,16 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output("Error", result.get("error")) def test_infinite_loop(self): + # Given user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}" kwargs = { 'metadata': { @@ -105,16 +151,47 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output(self.timeout_msg, result.get("error")) def test_file_based_assert(self): - self.file_paths = [("/tmp/test.txt", False)] + # Given + self.file_paths = [(self.f_path, False)] + self.tc_data = dedent(""" + class main + { + public static <E> void check(E expect, E result) + { + if(result.equals(expect)) + { + System.out.println("Correct:Output expected "+expect+" and got "+result); + } + else + { + System.out.println("Incorrect:Output expected "+expect+" but got "+result); + System.exit(1); + } + } + public static void main(String arg[]) + { + String result = ""; + Test t = new Test(); + try{ + result = t.readFile();} + catch(Exception e){ + System.out.print(e); + } + check("2", result); + } + } + """) self.test_case_data = [ - {"test_case": "java_files/read_file.java", + {"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 } @@ -147,14 +224,17 @@ class JavaAssertionEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get("success")) class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): def setUp(self): - with open('/tmp/test.txt', 'wb') as f: + self.f_path = os.path.join(tempfile.gettempdir(), "test.txt") + with open(self.f_path, 'wb') as f: f.write('2'.encode('ascii')) tmp_in_dir_path = tempfile.mkdtemp() self.in_dir = tmp_in_dir_path @@ -171,10 +251,11 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): def tearDown(self): gd.SERVER_TIMEOUT = 4 - os.remove('/tmp/test.txt') + os.remove(self.f_path) shutil.rmtree(self.in_dir) def test_correct_answer(self): + # Given user_answer = dedent(""" import java.util.Scanner; class Test @@ -194,12 +275,15 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_array_input(self): + # Given self.test_case_data = [{'expected_output': '561', 'expected_input': '5\n6\n1', 'test_case_type': 'stdiobasedtestcase', @@ -225,12 +309,15 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_incorrect_answer(self): + # Given user_answer = dedent(""" import java.util.Scanner; class Test @@ -250,15 +337,18 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then lines_of_error = len(result.get('error')[0].splitlines()) self.assertFalse(result.get('success')) self.assert_correct_output("Incorrect", result.get('error')) self.assertTrue(lines_of_error > 1) def test_error(self): + # Given user_answer = dedent(""" class Test { @@ -274,13 +364,16 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assertTrue("Compilation Error" in '\n'.join(result.get("error"))) def test_infinite_loop(self): + # Given user_answer = dedent(""" class Test {public static void main(String[] args){ @@ -298,13 +391,16 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertFalse(result.get("success")) self.assert_correct_output(self.timeout_msg, result.get("error")) def test_only_stdout(self): + # Given self.test_case_data = [{'expected_output': '11', 'expected_input': '', 'test_case_type': 'stdiobasedtestcase', @@ -327,12 +423,15 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_string_input(self): + # Given self.test_case_data = [{'expected_output': 'HelloWorld', 'expected_input': 'Hello\nWorld', 'test_case_type': 'stdiobasedtestcase', @@ -357,13 +456,16 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get('success')) def test_file_based_stdout(self): - self.file_paths = [("/tmp/test.txt", False)] + # Given + self.file_paths = [(self.f_path, False)] self.test_case_data = [{'expected_output': '2', 'expected_input': '', 'test_case_type': 'stdiobasedtestcase', @@ -397,9 +499,11 @@ class JavaStdIOEvaluationTestCases(EvaluatorBaseTest): 'test_case_data': self.test_case_data, } + # When grader = Grader(self.in_dir) result = grader.evaluate(kwargs) + # Then self.assertTrue(result.get("success")) diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py index 938d0e5..5a452a3 100644 --- a/yaksh/evaluator_tests/test_scilab_evaluation.py +++ b/yaksh/evaluator_tests/test_scilab_evaluation.py @@ -3,7 +3,7 @@ import unittest import os import shutil import tempfile - +from textwrap import dedent from yaksh import grader as gd from yaksh.grader import Grader from yaksh.scilab_code_evaluator import ScilabCodeEvaluator @@ -13,7 +13,38 @@ from yaksh.evaluator_tests.test_python_evaluation import EvaluatorBaseTest class ScilabEvaluationTestCases(EvaluatorBaseTest): def setUp(self): tmp_in_dir_path = tempfile.mkdtemp() - self.test_case_data = [{"test_case": "scilab_files/test_add.sce", + self.tc_data = dedent(""" + mode(-1) + exec("function.sci",-1); + i = 0 + p = add(3,5); + correct = (p == 8); + if correct then + i=i+1 + end + disp("Input submitted 3 and 5") + disp("Expected output 8 got " + string(p)) + p = add(22,-20); + correct = (p==2); + if correct then + i=i+1 + end + disp("Input submitted 22 and -20") + disp("Expected output 2 got " + string(p)) + p =add(91,0); + correct = (p==91); + if correct then + i=i+1 + end + disp("Input submitted 91 and 0") + disp("Expected output 91 got " + string(p)) + if i==3 then + exit(5); + else + exit(3); + end + """) + self.test_case_data = [{"test_case": self.tc_data, "test_case_type": "standardtestcase", "weight": 0.0 }] diff --git a/yaksh/forms.py b/yaksh/forms.py index 6fbaf5d..1d18d29 100644 --- a/yaksh/forms.py +++ b/yaksh/forms.py @@ -9,6 +9,7 @@ from taggit.managers import TaggableManager from taggit.forms import TagField from django.forms.models import inlineformset_factory from django.db.models import Q +from textwrap import dedent try: from string import letters except ImportError: @@ -165,6 +166,58 @@ class QuizForm(forms.ModelForm): self.fields['prerequisite'].required = False self.fields['course'] = forms.ModelChoiceField( queryset=Course.objects.filter(id=course_id), empty_label=None) + self.fields["instructions"].initial = dedent("""\ + <p> + This examination system has been + developed with the intention of + making you learn programming and + be assessed in an interactive and + fun manner. + You will be presented with a + series of programming questions + and problems that you will answer + online and get immediate + feedback for. + </p> + <p> + Here are some important + instructions and rules that you + should understand carefully.</p> + <ul> + <li>For any programming questions, + you can submit solutions as many + times as you want without a + penalty. You may skip questions + and solve them later.</li> + <li> You <strong>may</strong> + use your computer's Python/IPython + shell or an editor to solve the + problem and cut/paste the + solution to the web interface. + </li> + <li> <strong>You are not allowed + to use any internet resources, + i.e. no google etc.</strong> + </li> + <li> Do not copy or share the + questions or answers with anyone + until the exam is complete + <strong>for everyone</strong>. + </li> + <li> <strong>All</strong> your + attempts at the questions are + logged. Do not try to outsmart + and break the testing system. + If you do, we know who you are + and we will expel you from the + course. You have been warned. + </li> + </ul> + <p> + We hope you enjoy taking this + exam !!! + </p> + """) class Meta: model = Quiz diff --git a/yaksh/grader.py b/yaksh/grader.py index 0c057c2..086abb7 100644 --- a/yaksh/grader.py +++ b/yaksh/grader.py @@ -100,10 +100,10 @@ class Grader(object): self.setup() test_case_instances = self.get_evaluator_objects(kwargs) with change_dir(self.in_dir): - success, error, mark = self.safe_evaluate(test_case_instances) + success, error, weight = self.safe_evaluate(test_case_instances) self.teardown() - result = {'success': success, 'error': error, 'weight': mark} + result = {'success': success, 'error': error, 'weight': weight} return result # Private Protocol ########## @@ -144,7 +144,7 @@ class Grader(object): test_case_instance.compile_code() test_case_success, err, mark_fraction = test_case_instance.check_code() if test_case_success: - weight += mark_fraction + weight += mark_fraction * test_case_instance.weight else: error.append(err) test_case_success_status[idx] = test_case_success diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py index 91e5840..e6dc628 100644 --- a/yaksh/java_code_evaluator.py +++ b/yaksh/java_code_evaluator.py @@ -26,18 +26,20 @@ class JavaCodeEvaluator(BaseEvaluator): self.user_answer = metadata.get('user_answer') self.file_paths = metadata.get('file_paths') self.partial_grading = metadata.get('partial_grading') - # Set test case data values self.test_case = test_case_data.get('test_case') self.weight = test_case_data.get('weight') def teardown(self): # Delete the created file. - os.remove(self.submit_code_path) + if os.path.exists(self.submit_code_path): + os.remove(self.submit_code_path) if os.path.exists(self.user_output_path): os.remove(self.user_output_path) if os.path.exists(self.ref_output_path): os.remove(self.ref_output_path) + if os.path.exists(self.test_code_path): + os.remove(self.test_code_path) if self.files: delete_files(self.files) @@ -57,10 +59,14 @@ class JavaCodeEvaluator(BaseEvaluator): if self.compiled_user_answer and self.compiled_test_code: return None else: + # create student code and moderator code file self.submit_code_path = self.create_submit_code_file('Test.java') - ref_code_path = self.test_case - clean_ref_code_path, clean_test_case_path = \ - self._set_test_code_file_path(ref_code_path) + self.test_code_path = self.create_submit_code_file('main.java') + self.write_to_submit_code_file(self.submit_code_path, + self.user_answer + ) + self.write_to_submit_code_file(self.test_code_path, self.test_case) + clean_ref_code_path = self.test_code_path if self.file_paths: self.files = copy_files(self.file_paths) if not isfile(clean_ref_code_path): @@ -71,9 +77,6 @@ class JavaCodeEvaluator(BaseEvaluator): return False, msg user_code_directory = os.getcwd() + '/' - self.write_to_submit_code_file(self.submit_code_path, - self.user_answer - ) ref_file_name = (clean_ref_code_path.split('/')[-1]).split('.')[0] self.user_output_path = self.set_file_paths(user_code_directory, 'Test' @@ -144,7 +147,7 @@ class JavaCodeEvaluator(BaseEvaluator): proc, stdout, stderr = ret if proc.returncode == 0: success, err = True, None - mark_fraction = float(seelf.weight) if self.partial_grading else 0.0 + mark_fraction = 1.0 if self.partial_grading else 0.0 else: err = stdout + "\n" + stderr else: diff --git a/yaksh/java_files/main_array_sum.java b/yaksh/java_files/main_array_sum.java deleted file mode 100644 index 5eae299..0000000 --- a/yaksh/java_files/main_array_sum.java +++ /dev/null @@ -1,36 +0,0 @@ -class main_array_sum -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result); - System.exit(1); - } - } - public static void main(String arg[]) - { - int result; - Test t = new Test(); - int x[] = {0,0,0,0,0}; - result = t.array_sum(x); - System.out.println("Input submitted to the function: {0,0,0,0,0}"); - check(0, result); - int a[] = {1,2,3,4,5}; - result = t.array_sum(a); - System.out.println("Input submitted to the function: {1,2,3,4,5}"); - check(15, result); - int b[] = {1,2,3,0,0}; - result = t.array_sum(b); - System.out.println("Input submitted to the function: {1,2,3,0,0}"); - check(6, result); - int c[] = {1,1,1,1,1}; - result = t.array_sum(c); - System.out.println("Input submitted to the function: {1,1,1,1,1}"); - check(5, result); - } -} diff --git a/yaksh/java_files/main_fact.java b/yaksh/java_files/main_fact.java deleted file mode 100644 index 325dab6..0000000 --- a/yaksh/java_files/main_fact.java +++ /dev/null @@ -1,29 +0,0 @@ -class main_fact -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result); - System.exit(1); - } - } - public static void main(String arg[]) - { - Test t = new Test(); - int result; - result = t.factorial(0); - System.out.println("Input submitted to the function: 0"); - check(1, result); - result = t.factorial(3); - System.out.println("Input submitted to the function: 3"); - check(6, result); - result = t.factorial(4); - System.out.println("Input submitted to the function: 4"); - check(24, result); - } -} diff --git a/yaksh/java_files/main_great.java b/yaksh/java_files/main_great.java deleted file mode 100644 index 4bfcb1f..0000000 --- a/yaksh/java_files/main_great.java +++ /dev/null @@ -1,39 +0,0 @@ -class main_great -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result); - System.exit(1); - } - } - public static void main(String arg[]) - { - Test t = new Test(); - int result; - result = t.greatest(1, 3, 4); - System.out.println("Input submitted to the function: 1, 3, 4"); - check(4, result); - result = t.greatest(5, 10, 3); - System.out.println("Input submitted to the function: 5, 10, 3"); - check(10, result); - result = t.greatest(6, 1, 4); - System.out.println("Input submitted to the function: 6, 1, 4"); - check(6, result); - result = t.greatest(6, 11, 14); - System.out.println("Input submitted to the function: 6, 11, 14"); - check(14, result); - result = t.greatest(3, 31, 4); - System.out.println("Input submitted to the function: 3, 31, 4"); - check(31, result); - result = t.greatest(26, 13, 3); - System.out.println("Input submitted to the function: 26, 13, 3"); - check(26, result); - - } -} diff --git a/yaksh/java_files/main_hello_name.java b/yaksh/java_files/main_hello_name.java deleted file mode 100644 index 84bb282..0000000 --- a/yaksh/java_files/main_hello_name.java +++ /dev/null @@ -1,29 +0,0 @@ -class main_hello_name -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result); - System.exit(1); - } - } - public static void main(String arg[]) - { - Test t = new Test(); - String result; - result = t.hello_name("Raj"); - System.out.println("Input submitted to the function: 'Raj'"); - check("hello Raj", result); - result = t.hello_name("Pratham"); - System.out.println("Input submitted to the function: 'Pratham'"); - check("hello Pratham", result); - result = t.hello_name("Ram"); - System.out.println("Input submitted to the function: 'Ram'"); - check("hello Ram", result); - } -} diff --git a/yaksh/java_files/main_lastDigit.java b/yaksh/java_files/main_lastDigit.java deleted file mode 100644 index 05439e2..0000000 --- a/yaksh/java_files/main_lastDigit.java +++ /dev/null @@ -1,36 +0,0 @@ -class main_lastDigit -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result+"\n"); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result+"\n"); - System.exit(1); - } - } - public static void main(String arg[]) - { - Test t = new Test(); - boolean result; - result= t.lastDigit(12, 2, 13); - System.out.println("Input submitted to the function: 12, 2, 13"); - check(true, result); - result = t.lastDigit(11, 52, 32); - System.out.println("Input submitted to the function: 11, 52, 32"); - check(true, result); - result = t.lastDigit(6, 34, 22); - System.out.println("Input submitted to the function: 6, 34, 22"); - check(false, result); - result = t.lastDigit(6, 46, 26); - System.out.println("Input submitted to the function: 63"); - check(true, result); - result = t.lastDigit(91, 90, 92); - System.out.println("Input submitted to the function: 91"); - check(false, result); - - } -} diff --git a/yaksh/java_files/main_moreThan30.java b/yaksh/java_files/main_moreThan30.java deleted file mode 100644 index 7da31cb..0000000 --- a/yaksh/java_files/main_moreThan30.java +++ /dev/null @@ -1,36 +0,0 @@ -class main_moreThan30 -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result+"\n"); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result+"\n"); - System.exit(1); - } - } - public static void main(String arg[]) - { - Test t = new Test(); - boolean result; - result= t.moreThan30(30); - System.out.println("Input submitted to the function: 30"); - check(false, result); - result = t.moreThan30(151); - System.out.println("Input submitted to the function: 151"); - check(true, result); - result = t.moreThan30(66); - System.out.println("Input submitted to the function: 66"); - check(false, result); - result = t.moreThan30(63); - System.out.println("Input submitted to the function: 63"); - check(true, result); - result = t.moreThan30(91); - System.out.println("Input submitted to the function: 91"); - check(true, result); - - } -} diff --git a/yaksh/java_files/main_palindrome.java b/yaksh/java_files/main_palindrome.java deleted file mode 100644 index c0745f9..0000000 --- a/yaksh/java_files/main_palindrome.java +++ /dev/null @@ -1,29 +0,0 @@ -class main_palindrome -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result+"\n"); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result+"\n"); - System.exit(1); - } - } - public static void main(String arg[]) - { - Test t = new Test(); - boolean result; - result= t.palindrome(123); - System.out.println("Input submitted to the function: 123"); - check(false, result); - result = t.palindrome(151); - System.out.println("Input submitted to the function: 151"); - check(true, result); - result = t.palindrome(23432); - System.out.println("Input submitted to the function: 23432"); - check(true, result); - } -} diff --git a/yaksh/java_files/main_square.java b/yaksh/java_files/main_square.java deleted file mode 100644 index 5cb8c35..0000000 --- a/yaksh/java_files/main_square.java +++ /dev/null @@ -1,32 +0,0 @@ -class main_square -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result); - System.exit(1); - } - } - public static void main(String arg[]) - { - Test t = new Test(); - int result, input, output; - input = 0; output = 0; - result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); - check(output, result); - input = 5; output = 25; - result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); - check(output, result); - input = 6; output = 36; - result = t.square_num(input); - System.out.println("Input submitted to the function: "+input); - check(output, result); - } -} diff --git a/yaksh/java_files/read_file.java b/yaksh/java_files/read_file.java deleted file mode 100644 index 21a5836..0000000 --- a/yaksh/java_files/read_file.java +++ /dev/null @@ -1,26 +0,0 @@ -class read_file -{ - public static <E> void check(E expect, E result) - { - if(result.equals(expect)) - { - System.out.println("Correct:\nOutput expected "+expect+" and got "+result); - } - else - { - System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result); - System.exit(1); - } - } - public static void main(String arg[]) - { - String result = ""; - Test t = new Test(); - try{ - result = t.readFile();} - catch(Exception e){ - System.out.print(e); - } - check("2", result); - } -} diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py index a854847..48f265d 100644 --- a/yaksh/java_stdio_evaluator.py +++ b/yaksh/java_stdio_evaluator.py @@ -85,5 +85,5 @@ class JavaStdIOEvaluator(StdIOEvaluator): err = err + "\n" + e except: err = err + "\n" + stdnt_stderr - mark_fraction = float(self.weight) if self.partial_grading and success else 0.0 + mark_fraction = 1.0 if self.partial_grading and success else 0.0 return success, err, mark_fraction diff --git a/yaksh/management/commands/add_group.py b/yaksh/management/commands/add_group.py index 03ef103..624ff3c 100644 --- a/yaksh/management/commands/add_group.py +++ b/yaksh/management/commands/add_group.py @@ -15,7 +15,7 @@ class Command(BaseCommand): help = 'Adds the moderator group' def handle(self, *args, **options): - app = 'yaksh' + app_label = 'yaksh' group = Group(name='moderator') try: group.save() @@ -23,11 +23,10 @@ class Command(BaseCommand): raise CommandError("The group already exits") else: # Get the models for the given app - content_types = ContentType.objects.filter(app_label=app) + content_types = ContentType.objects.filter(app_label=app_label) # Get list of permissions for the models - permission_list = Permission.objects.filter(content_type=content_types) - for permission in permission_list: - group.permissions.add(permission) - group.save() + permission_list = Permission.objects.filter(content_type__in=content_types) + group.permissions.add(*permission_list) + group.save() self.stdout.write('Moderator group added successfully') diff --git a/yaksh/models.py b/yaksh/models.py index 0d8e7fe..d2a85f6 100644 --- a/yaksh/models.py +++ b/yaksh/models.py @@ -81,10 +81,6 @@ def has_profile(user): def get_upload_dir(instance, filename): return "question_%s/%s" % (instance.question.id, filename) -def get_quiz_instructions_info(): - file_path = os.path.join(os.getcwd(), "Quiz_instructions.txt") - with open(file_path, 'r') as file: - return file.read() ############################################################################### class CourseManager(models.Manager): @@ -317,17 +313,14 @@ class Question(models.Model): def get_test_cases(self, **kwargs): tc_list = [] - for tc in self.testcase_set.all(): - test_case_type = tc.type + for tc in self.testcase_set.values_list("type", flat=True).distinct(): test_case_ctype = ContentType.objects.get(app_label="yaksh", - model=test_case_type - ) - test_case = test_case_ctype.get_object_for_this_type( + model=tc) + test_case = test_case_ctype.get_all_objects_for_this_type( question=self, **kwargs ) - tc_list.append(test_case) - + tc_list.extend(test_case) return tc_list def get_test_case(self, **kwargs): @@ -553,7 +546,7 @@ class Quiz(models.Model): is_trial = models.BooleanField(default=False) instructions = models.TextField('Instructions for Students', - default=get_quiz_instructions_info) + default=None, blank=True, null=True) view_answerpaper = models.BooleanField('Allow student to view their answer\ paper', default=False) @@ -1154,19 +1147,22 @@ class TestCase(models.Model): class StandardTestCase(TestCase): test_case = models.TextField() weight = models.FloatField(default=1.0) + test_case_args = models.TextField(help_text="<b>Command Line arguments for bash only</b>", + blank=True) def get_field_value(self): return {"test_case_type": "standardtestcase", "test_case": self.test_case, - "weight": self.weight} + "weight": self.weight, + "test_case_args": self.test_case_args} def __str__(self): return u'Standard TestCase | Test Case: {0}'.format(self.test_case) class StdIOBasedTestCase(TestCase): - expected_input = models.CharField(max_length=100, blank=True) - expected_output = models.CharField(max_length=100) + expected_input = models.TextField(max_length=100, blank=True) + expected_output = models.TextField(max_length=100) weight = models.IntegerField(default=1.0) def get_field_value(self): diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py index 749a6ec..ae86c46 100644 --- a/yaksh/python_assertion_evaluator.py +++ b/yaksh/python_assertion_evaluator.py @@ -91,6 +91,6 @@ class PythonAssertionEvaluator(BaseEvaluator): else: success = True err = None - mark_fraction = float(self.weight) if self.partial_grading else 0.0 + mark_fraction = 1.0 if self.partial_grading else 0.0 del tb return success, err, mark_fraction diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py index bf16c84..f5c81b5 100644 --- a/yaksh/scilab_code_evaluator.py +++ b/yaksh/scilab_code_evaluator.py @@ -16,7 +16,8 @@ class ScilabCodeEvaluator(BaseEvaluator): """Tests the Scilab code obtained from Code Server""" def __init__(self, metadata, test_case_data): self.files = [] - + self.submit_code_path = "" + self.test_code_path = "" # Set metadata values self.user_answer = metadata.get('user_answer') self.file_paths = metadata.get('file_paths') @@ -28,23 +29,27 @@ class ScilabCodeEvaluator(BaseEvaluator): def teardown(self): # Delete the created file. - os.remove(self.submit_code_path) + if os.path.exists(self.submit_code_path): + os.remove(self.submit_code_path) + if os.path.exists(self.test_code_path): + os.remove(self.test_code_path) if self.files: delete_files(self.files) def check_code(self): self.submit_code_path = self.create_submit_code_file('function.sci') + self.test_code_path = self.create_submit_code_file('main.sci') if self.file_paths: self.files = copy_files(self.file_paths) ref_code_path = self.test_case - clean_ref_path, clean_test_case_path = \ - self._set_test_code_file_path(ref_code_path) + clean_ref_path, clean_test_case_path = self.test_code_path, None self.user_answer, terminate_commands = \ self._remove_scilab_exit(self.user_answer.lstrip()) success = False - test_case_weight = 0.0 + mark_fraction = 0.0 self.write_to_submit_code_file(self.submit_code_path, self.user_answer) + self.write_to_submit_code_file(self.test_code_path, self.test_case) # Throw message if there are commmands that terminates scilab add_err = "" if terminate_commands: @@ -70,13 +75,13 @@ class ScilabCodeEvaluator(BaseEvaluator): stdout = self._strip_output(stdout) if proc.returncode == 5: success, err = True, None - test_case_weight = float(self.weight) if self.partial_grading else 0.0 + mark_fraction = 1.0 if self.partial_grading else 0.0 else: err = add_err + stdout else: err = add_err + stderr - return success, err, test_case_weight + return success, err, mark_fraction def _remove_scilab_exit(self, string): """ diff --git a/yaksh/scilab_files/test_add.sce b/yaksh/scilab_files/test_add.sce deleted file mode 100644 index a317cdb..0000000 --- a/yaksh/scilab_files/test_add.sce +++ /dev/null @@ -1,29 +0,0 @@ -mode(-1) -exec("function.sci",-1); -i = 0 -p = add(3,5); -correct = (p == 8); -if correct then - i=i+1 -end -disp("Input submitted 3 and 5") -disp("Expected output 8 got " + string(p)) -p = add(22,-20); -correct = (p==2); -if correct then - i=i+1 -end -disp("Input submitted 22 and -20") -disp("Expected output 2 got " + string(p)) -p =add(91,0); -correct = (p==91); -if correct then - i=i+1 -end -disp("Input submitted 91 and 0") -disp("Expected output 91 got " + string(p)) -if i==3 then - exit(5); -else - exit(3); -end diff --git a/yaksh/templates/yaksh/add_question.html b/yaksh/templates/yaksh/add_question.html index 57e5e78..77a7b3a 100644 --- a/yaksh/templates/yaksh/add_question.html +++ b/yaksh/templates/yaksh/add_question.html @@ -34,8 +34,9 @@ <input type="checkbox" name="extract" value="{{file.id}}" >{% if file.extract %} dont extract{% else %} extract{% endif %}</input> <input type="checkbox" name="hide" value="{{file.id}}" >{% if file.hide %} show{% else %} - hide{% endif %}</input><br> + hide{% endif %}</input> <a href="{{file.file.url}}">{{ file.file.name }}</a> + <br> {% endfor %}{% endif %} </table></center> {% for formset in formsets %} diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html index 6fb8187..ec8c244 100644 --- a/yaksh/templates/yaksh/grade_user.html +++ b/yaksh/templates/yaksh/grade_user.html @@ -130,7 +130,7 @@ Status : <b style="color: green;"> Passed </b><br/> {% if question.type == "mcq" or question.type == "mcc" %} <h5> <u>Choices:</u></h5> {% for testcase in question.get_test_cases %} - <br/><strong>{{ forloop.counter }}. {{ testcase.options }}</strong> + <br/><strong>{{ forloop.counter }}. {{ testcase.options|safe }}</strong> {% endfor %} {% else %} <h5> <u>Test cases: </u></h5> diff --git a/yaksh/templates/yaksh/monitor.html b/yaksh/templates/yaksh/monitor.html index 7a3297b..0ad6401 100644 --- a/yaksh/templates/yaksh/monitor.html +++ b/yaksh/templates/yaksh/monitor.html @@ -1,4 +1,5 @@ {% extends "manage.html" %} +{% load custom_filters %} {% block pagetitle %} Quiz results {% endblock pagetitle %} @@ -49,6 +50,14 @@ $(document).ready(function() {% if papers %} <p>Number of papers: {{ papers|length }} </p> +{% completed papers as completed_papers %} + {# template tag used to get the count of completed papers #} + <p>Papers completed: <b> {{ completed_papers }} </b></p> + +{% inprogress papers as inprogress_papers %} + {# template tag used to get the count of inprogress papers #} + <p>Papers in progress:<b> {{ inprogress_papers }} </b></p> + <p><a href="{{URL_ROOT}}/exam/manage/statistics/question/{{papers.0.question_paper.id}}">Question Statisitics</a></p> <p><a href="{{URL_ROOT}}/exam/manage/monitor/download_csv/{{papers.0.question_paper.id}}">Download CSV</a></p> <table id="result-table" class="tablesorter table"> diff --git a/yaksh/templates/yaksh/question.html b/yaksh/templates/yaksh/question.html index 0279f0d..ba64b63 100644 --- a/yaksh/templates/yaksh/question.html +++ b/yaksh/templates/yaksh/question.html @@ -140,9 +140,9 @@ function call_skip(url) <div class="panel-heading"> <h4><u> {{ question.summary }} {% if question.type == "mcq" %} - (MCQ) + (Multiple Choice Questions) {% elif question.type == "mcc" %} - (MCC) + (Multiple Correct Choices) {% elif question.type == "code" %} (PROGRAMMING) {% elif question.type == "upload" %} @@ -173,7 +173,7 @@ function call_skip(url) </p> {% endif %} {% for test_case in test_cases %} - <input name="answer" type="radio" value="{{ test_case.options }}" />{{ test_case.options }} <br/> + <input name="answer" type="radio" value="{{ test_case.options }}" />{{ test_case.options|safe }} <br/> {% endfor %} {% endif %} {% if question.type == "mcc" %} @@ -189,7 +189,7 @@ function call_skip(url) </p> {% endif %} {% for test_case in test_cases %} - <input name="answer" type="checkbox" value="{{ test_case.options }}"> {{ test_case.options }} + <input name="answer" type="checkbox" value="{{ test_case.options }}"> {{ test_case.options|safe }} <br> {% endfor %} {% endif %} diff --git a/yaksh/templates/yaksh/user_data.html b/yaksh/templates/yaksh/user_data.html index 856433d..9c11dd9 100644 --- a/yaksh/templates/yaksh/user_data.html +++ b/yaksh/templates/yaksh/user_data.html @@ -66,7 +66,7 @@ User IP address: {{ paper.user_ip }} {% if question.type == "mcq" or question.type == "mcc" %} <h5> <u>Choices:</u></h5> {% for testcase in question.get_test_cases %} - <br/><strong>{{ forloop.counter }}. {{ testcase.options }}</strong> + <br/><strong>{{ forloop.counter }}. {{ testcase.options|safe }}</strong> {% endfor %} {% else %} <h5> <u>Test cases: </u></h5> diff --git a/yaksh/templates/yaksh/view_answerpaper.html b/yaksh/templates/yaksh/view_answerpaper.html index 9dfbda0..8dec5b3 100644 --- a/yaksh/templates/yaksh/view_answerpaper.html +++ b/yaksh/templates/yaksh/view_answerpaper.html @@ -42,7 +42,7 @@ {% if question.type == "mcq" or question.type == "mcc" %} <h5> <u>Choices:</u></h5> {% for testcase in question.get_test_cases %} - <br/><strong>{{ forloop.counter }}. {{ testcase.options }}</strong> + <br/><strong>{{ forloop.counter }}. {{ testcase.options|safe }}</strong> {% endfor %} {%endif%} diff --git a/yaksh/templatetags/custom_filters.py b/yaksh/templatetags/custom_filters.py index 9d7b939..f610cc6 100644 --- a/yaksh/templatetags/custom_filters.py +++ b/yaksh/templatetags/custom_filters.py @@ -10,4 +10,12 @@ def escape_quotes(value): escape_single_quotes = value.replace("'", "\\'") escape_single_and_double_quotes = escape_single_quotes.replace('"', '\\"') - return escape_single_and_double_quotes
\ No newline at end of file + return escape_single_and_double_quotes + +@register.assignment_tag(name="completed") +def completed(answerpaper): + return answerpaper.filter(status="completed").count() + +@register.assignment_tag(name="inprogress") +def inprogress(answerpaper): + return answerpaper.filter(status="inprogress").count() diff --git a/yaksh/test_models.py b/yaksh/test_models.py index 6764dd0..e95528b 100644 --- a/yaksh/test_models.py +++ b/yaksh/test_models.py @@ -145,6 +145,7 @@ class QuestionTestCases(unittest.TestCase): self.user_answer = "demo_answer" self.test_case_upload_data = [{"test_case": "assert fact(3)==6", "test_case_type": "standardtestcase", + "test_case_args": "", "weight": 1.0 }] questions_data = [{"snippet": "def fact()", "active": True, @@ -895,6 +896,7 @@ class TestCaseTestCases(unittest.TestCase): }, 'test_case_data': [{'test_case': 'assert myfunc(12, 13) == 15', 'test_case_type': 'standardtestcase', + 'test_case_args': "", 'weight': 1.0 }] } diff --git a/yaksh/test_views.py b/yaksh/test_views.py index 2419591..e052441 100644 --- a/yaksh/test_views.py +++ b/yaksh/test_views.py @@ -146,9 +146,6 @@ class TestAddQuiz(TestCase): self.mod_group = Group.objects.create(name='moderator') tzone = pytz.timezone('UTC') - file_path = os.path.join(os.getcwd(), "Quiz_instructions.txt") - with open(file_path, 'r') as file: - self.file_data = file.read() # Create Moderator with profile self.user_plaintext_pass = 'demo' self.user = User.objects.create_user( @@ -187,7 +184,7 @@ class TestAddQuiz(TestCase): self.pre_req_quiz = Quiz.objects.create( start_date_time=datetime(2014, 2, 1, 5, 8, 15, 0, tzone), end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone), - duration=30, active=True, instructions=self.file_data, + duration=30, active=True, instructions="Demo Instructions", attempts_allowed=-1, time_between_attempts=0, description='pre requisite quiz', pass_criteria=40, language='Python', prerequisite=None, @@ -197,7 +194,7 @@ class TestAddQuiz(TestCase): self.quiz = Quiz.objects.create( start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0, tzone), end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzone), - duration=30, active=True, instructions=self.file_data, + duration=30, active=True, instructions="Demo Instructions", attempts_allowed=-1, time_between_attempts=0, description='demo quiz', pass_criteria=40, language='Python', prerequisite=self.pre_req_quiz, @@ -274,7 +271,7 @@ class TestAddQuiz(TestCase): 'description': 'updated demo quiz', 'pass_criteria': 40, 'language': 'java', - 'instructions': self.file_data, + 'instructions': "Demo Instructions", 'prerequisite': self.pre_req_quiz.id, 'course': self.course.id } @@ -321,7 +318,7 @@ class TestAddQuiz(TestCase): 'description': 'new demo quiz', 'pass_criteria': 50, 'language': 'python', - 'instructions': self.file_data, + 'instructions': "Demo Instructions", 'prerequisite': self.pre_req_quiz.id, 'course': self.course.id } |