summaryrefslogtreecommitdiff
path: root/testapp/exam
diff options
context:
space:
mode:
authorPrabhu Ramachandran2015-05-12 20:20:43 +0530
committerPrabhu Ramachandran2015-05-12 20:20:43 +0530
commita022e0145ec8fb1622d58c2e2281c016b1d45b01 (patch)
tree1c0c3f2e8605d6f36405c57cbe5de9a895a47958 /testapp/exam
parentcd9f2542d09db0e4a352dd410f626f27e23c37e4 (diff)
parent5b23647de575fd90552807260a4b8e0a96ab6afe (diff)
downloadonline_test-a022e0145ec8fb1622d58c2e2281c016b1d45b01.tar.gz
online_test-a022e0145ec8fb1622d58c2e2281c016b1d45b01.tar.bz2
online_test-a022e0145ec8fb1622d58c2e2281c016b1d45b01.zip
Merge pull request #41 from ankitjavalkar/code-server-redesign-mymaster2
Code server redesign
Diffstat (limited to 'testapp/exam')
-rw-r--r--testapp/exam/admin.py3
-rw-r--r--testapp/exam/bash_code_evaluator.py122
-rw-r--r--testapp/exam/bash_files/sample.args2
-rwxr-xr-xtestapp/exam/bash_files/sample.sh2
-rwxr-xr-xtestapp/exam/c_cpp_files/main.cpp32
-rwxr-xr-xtestapp/exam/c_cpp_files/main2.c30
-rwxr-xr-xtestapp/exam/c_cpp_files/main_array_check.cpp34
-rwxr-xr-xtestapp/exam/c_cpp_files/main_array_check_all.cpp34
-rwxr-xr-xtestapp/exam/c_cpp_files/main_array_sum.cpp34
-rwxr-xr-xtestapp/exam/c_cpp_files/main_blackJack.cpp41
-rwxr-xr-xtestapp/exam/c_cpp_files/main_check_digit.cpp32
-rwxr-xr-xtestapp/exam/c_cpp_files/main_count667.cpp42
-rwxr-xr-xtestapp/exam/c_cpp_files/main_count7.cpp42
-rwxr-xr-xtestapp/exam/c_cpp_files/main_fact.cpp32
-rwxr-xr-xtestapp/exam/c_cpp_files/main_greatest.cpp44
-rwxr-xr-xtestapp/exam/c_cpp_files/main_hello_name.c29
-rwxr-xr-xtestapp/exam/c_cpp_files/main_lessThan9.cpp38
-rwxr-xr-xtestapp/exam/c_cpp_files/main_mean.cpp38
-rwxr-xr-xtestapp/exam/c_cpp_files/main_palindrome.cpp32
-rwxr-xr-xtestapp/exam/c_cpp_files/main_roundTo10.cpp41
-rwxr-xr-xtestapp/exam/c_cpp_files/main_specialSum.cpp41
-rwxr-xr-xtestapp/exam/c_cpp_files/main_within.cpp38
-rw-r--r--testapp/exam/code_evaluator.py206
-rwxr-xr-xtestapp/exam/code_server.py752
-rw-r--r--testapp/exam/cpp_code_evaluator.py125
-rw-r--r--testapp/exam/forms.py46
-rw-r--r--testapp/exam/java_code_evaluator.py127
-rw-r--r--testapp/exam/java_files/main_array_sum.java36
-rw-r--r--testapp/exam/java_files/main_fact.java29
-rw-r--r--testapp/exam/java_files/main_great.java39
-rw-r--r--testapp/exam/java_files/main_hello_name.java29
-rw-r--r--testapp/exam/java_files/main_lastDigit.java36
-rw-r--r--testapp/exam/java_files/main_moreThan30.java36
-rw-r--r--testapp/exam/java_files/main_palindrome.java29
-rw-r--r--testapp/exam/java_files/main_square.java32
-rw-r--r--testapp/exam/language_registry.py36
-rw-r--r--testapp/exam/models.py63
-rw-r--r--testapp/exam/python_code_evaluator.py61
-rw-r--r--testapp/exam/scilab_code_evaluator.py95
-rw-r--r--testapp/exam/scilab_files/test_add.sce29
-rw-r--r--testapp/exam/settings.py8
-rw-r--r--testapp/exam/static/exam/js/add_question.js9
-rw-r--r--testapp/exam/templates/exam/add_question.html21
-rw-r--r--testapp/exam/templates/exam/edit_question.html36
-rw-r--r--testapp/exam/tests.py55
-rw-r--r--testapp/exam/views.py186
-rw-r--r--testapp/exam/xmlrpc_clients.py29
47 files changed, 2085 insertions, 848 deletions
diff --git a/testapp/exam/admin.py b/testapp/exam/admin.py
index 060859a..86a10af 100644
--- a/testapp/exam/admin.py
+++ b/testapp/exam/admin.py
@@ -1,5 +1,6 @@
-from testapp.exam.models import Question, Quiz
+from testapp.exam.models import Question, Quiz, TestCase
from django.contrib import admin
admin.site.register(Question)
+admin.site.register(TestCase)
admin.site.register(Quiz)
diff --git a/testapp/exam/bash_code_evaluator.py b/testapp/exam/bash_code_evaluator.py
new file mode 100644
index 0000000..23c0ae5
--- /dev/null
+++ b/testapp/exam/bash_code_evaluator.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+import subprocess
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class BashCodeEvaluator(CodeEvaluator):
+ """Tests the Bash code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(BashCodeEvaluator, self).__init__(test_case_data, test, language, user_answer,
+ ref_code_path, in_dir)
+ self.submit_path = self.create_submit_code_file('submit.sh')
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(BashCodeEvaluator, self)._setup()
+
+ self._set_file_as_executable(self.submit_path)
+ get_ref_path, get_test_case_path = self.ref_code_path.strip().split(',')
+ get_ref_path = get_ref_path.strip()
+ get_test_case_path = get_test_case_path.strip()
+ ref_path, test_case_path = self._set_test_code_file_path(get_ref_path,
+ get_test_case_path)
+
+ return ref_path, self.submit_path, test_case_path
+
+ def _teardown(self):
+ # Delete the created file.
+ super(BashCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_path, submit_path,
+ test_case_path=None):
+ """ Function validates student script using instructor script as
+ reference. Test cases can optionally be provided. The first argument
+ ref_path, is the path to instructor script, it is assumed to
+ have executable permission. The second argument submit_path, is
+ the path to the student script, it is assumed to have executable
+ permission. The Third optional argument is the path to test the
+ scripts. Each line in this file is a test case and each test case is
+ passed to the script as standard arguments.
+
+ Returns
+ --------
+
+ returns (True, "Correct answer") : If the student script passes all
+ test cases/have same output, when compared to the instructor script
+
+ returns (False, error_msg): If the student script fails a single
+ test/have dissimilar output, when compared to the instructor script.
+
+ Returns (False, error_msg): If mandatory arguments are not files or if
+ the required permissions are not given to the file(s).
+
+ """
+ if not isfile(ref_path):
+ return False, "No file at %s or Incorrect path" % ref_path
+ if not isfile(submit_path):
+ return False, "No file at %s or Incorrect path" % submit_path
+ if not os.access(ref_path, os.X_OK):
+ return False, "Script %s is not executable" % ref_path
+ if not os.access(submit_path, os.X_OK):
+ return False, "Script %s is not executable" % submit_path
+
+ success = False
+
+ if test_case_path is None or "":
+ ret = self.run_command(ref_path, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, inst_stdout, inst_stderr = ret
+ ret = self.run_command(submit_path, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdnt_stdout, stdnt_stderr = ret
+ if inst_stdout == stdnt_stdout:
+ return True, "Correct answer"
+ else:
+ err = "Error: expected %s, got %s" % (inst_stderr,
+ stdnt_stderr)
+ return False, err
+ else:
+ if not isfile(test_case_path):
+ return False, "No test case at %s" % test_case_path
+ if not os.access(ref_path, os.R_OK):
+ return False, "Test script %s, not readable" % test_case_path
+ # valid_answer is True, so that we can stop once a test case fails
+ valid_answer = True
+ # loop_count has to be greater than or equal to one.
+ # Useful for caching things like empty test files,etc.
+ loop_count = 0
+ test_cases = open(test_case_path).readlines()
+ num_lines = len(test_cases)
+ for test_case in test_cases:
+ loop_count += 1
+ if valid_answer:
+ args = [ref_path] + [x for x in test_case.split()]
+ ret = self.run_command(args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, inst_stdout, inst_stderr = ret
+ args = [submit_path]+[x for x in test_case.split()]
+ ret = self.run_command(args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdnt_stdout, stdnt_stderr = ret
+ valid_answer = inst_stdout == stdnt_stdout
+ if valid_answer and (num_lines == loop_count):
+ return True, "Correct answer"
+ else:
+ err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
+ stdnt_stdout+stdnt_stderr)
+ return False, err
+
diff --git a/testapp/exam/bash_files/sample.args b/testapp/exam/bash_files/sample.args
new file mode 100644
index 0000000..4d9f00d
--- /dev/null
+++ b/testapp/exam/bash_files/sample.args
@@ -0,0 +1,2 @@
+1 2
+2 1
diff --git a/testapp/exam/bash_files/sample.sh b/testapp/exam/bash_files/sample.sh
new file mode 100755
index 0000000..e935cb3
--- /dev/null
+++ b/testapp/exam/bash_files/sample.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))
diff --git a/testapp/exam/c_cpp_files/main.cpp b/testapp/exam/c_cpp_files/main.cpp
new file mode 100755
index 0000000..ebe1f08
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main.cpp
@@ -0,0 +1,32 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int add(int, int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ result = add(0,0);
+ printf("Input submitted to the function: 0, 0");
+ check(0, result);
+ result = add(2,3);
+ printf("Input submitted to the function: 2 3");
+ check(5,result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main2.c b/testapp/exam/c_cpp_files/main2.c
new file mode 100755
index 0000000..ccd1768
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main2.c
@@ -0,0 +1,30 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int add(int, int, int);
+
+template <class T>
+void check(T expect,T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (0);
+ }
+}
+
+int main(void)
+{
+ int result;
+ result = add(0,0,0);
+ printf("Input submitted to the function: 0, 0, 0");
+ check(0, result);
+ result = add(2,3,3);
+ printf("Input submitted to the function: 2, 3, 3");
+ check(8,result);
+ printf("All Correct\n");
+}
diff --git a/testapp/exam/c_cpp_files/main_array_check.cpp b/testapp/exam/c_cpp_files/main_array_check.cpp
new file mode 100755
index 0000000..ea34fdd
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_array_check.cpp
@@ -0,0 +1,34 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern bool array_check(int [], int);
+
+template <class T>
+
+void check(T expect,T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ bool result;
+ int a[] = {1,2,3,0,0};
+ result = array_check(a, 2);
+ printf("Input submitted to the function: {1, 2, 3, 0, 0} and index 2");
+ check(false, result);
+ int b[] = {1,2,3,4,5};
+ result = array_check(b, 3);
+ printf("Input submitted to the function: {1, 2, 3, 4, 5} and index 3");
+ check(true, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_array_check_all.cpp b/testapp/exam/c_cpp_files/main_array_check_all.cpp
new file mode 100755
index 0000000..140578e
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_array_check_all.cpp
@@ -0,0 +1,34 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern bool array_check_all(int []);
+
+template <class T>
+
+void check(T expect,T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ bool result;
+ int a[] = {1,2,3,2,8};
+ result = array_check_all(a);
+ printf("Input submitted to the function: {1, 2, 3, 2, 8}");
+ check(false, result);
+ int b[] = {4,2,32,4,56};
+ result = array_check_all(b);
+ printf("Input submitted to the function: {4, 2, 32, 4, 56}");
+ check(true, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_array_sum.cpp b/testapp/exam/c_cpp_files/main_array_sum.cpp
new file mode 100755
index 0000000..55b2ebf
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_array_sum.cpp
@@ -0,0 +1,34 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int array_sum(int []);
+
+template <class T>
+
+void check(T expect,T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ int a[] = {1,2,3,0,0};
+ result = array_sum(a);
+ printf("Input submitted to the function: {1, 2, 3, 0, 0}");
+ check(6, result);
+ int b[] = {1,2,3,4,5};
+ result = array_sum(b);
+ printf("Input submitted to the function: {1, 2, 3, 4, 5}");
+ check(15,result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_blackJack.cpp b/testapp/exam/c_cpp_files/main_blackJack.cpp
new file mode 100755
index 0000000..cc54e78
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_blackJack.cpp
@@ -0,0 +1,41 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int blackJack(int, int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ result = blackJack(11, 12);
+ printf("Input submitted to the function: 11, 12");
+ check(12, result);
+ result = blackJack(15, 19);
+ printf("Input submitted to the function: 15, 19");
+ check(19, result);
+ result = blackJack(10, 21);
+ printf("Input submitted to the function: 10, 21");
+ check(21, result);
+ result = blackJack(31, 22);
+ printf("Input submitted to the function: 31, 22");
+ check(0, result);
+ result = blackJack(91, 61);
+ printf("Input submitted to the function: 91, 61");
+ check(0, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_check_digit.cpp b/testapp/exam/c_cpp_files/main_check_digit.cpp
new file mode 100755
index 0000000..d3bf3d6
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_check_digit.cpp
@@ -0,0 +1,32 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern bool check_digit(int, int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ bool result;
+ result = check_digit(12, 23);
+ printf("Input submitted to the function: 12, 23");
+ check(true, result);
+ result = check_digit(22, 11);
+ printf("Input submitted to the function: 121");
+ check(false, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_count667.cpp b/testapp/exam/c_cpp_files/main_count667.cpp
new file mode 100755
index 0000000..f146e8c
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_count667.cpp
@@ -0,0 +1,42 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int count667(int[]);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ int arr[5] = {2,6,4,5,6};
+ result = count667(arr);
+ printf("Input submitted to the function: [2, 6, 4, 5,6]");
+ check(0, result);
+ int arr2[5] = {6,6,2,17,9};
+ result = count667(arr2);
+ printf("Input submitted to the function: [6, 6, 2, 17, 9]");
+ check(1, result);
+ int arr3[5] = {6,6,6,7,1};
+ result = count667(arr3);
+ printf("Input submitted to the function: [6, 6, 7, 2, 1]");
+ check(3, result);
+ int arr4[5] = {6,7,7,6,6};
+ result = count667(arr4);
+ printf("Input submitted to the function: [6, 7, 7, 6, 6]");
+ check(2, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_count7.cpp b/testapp/exam/c_cpp_files/main_count7.cpp
new file mode 100755
index 0000000..982e930
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_count7.cpp
@@ -0,0 +1,42 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int count7(int[]);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ int arr[4] = {2,3,4,5};
+ result = count7(arr);
+ printf("Input submitted to the function: [2, 3, 4, 5]");
+ check(0, result);
+ int arr2[4] = {1,2,17,9};
+ result = count7(arr2);
+ printf("Input submitted to the function: [1, 2, 17, 9]");
+ check(0, result);
+ int arr3[4] = {7,9,2,1};
+ result = count7(arr3);
+ printf("Input submitted to the function: [7, 9, 2, 1]");
+ check(1, result);
+ int arr4[4] = {1,7,7,7};
+ result = count7(arr4);
+ printf("Input submitted to the function: [1, 7, 7, 7]");
+ check(3, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_fact.cpp b/testapp/exam/c_cpp_files/main_fact.cpp
new file mode 100755
index 0000000..a4ff230
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_fact.cpp
@@ -0,0 +1,32 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int factorial(int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ result = factorial(0);
+ printf("Input submitted to the function: 0");
+ check(1, result);
+ result = factorial(3);
+ printf("Input submitted to the function: 3");
+ check(6, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_greatest.cpp b/testapp/exam/c_cpp_files/main_greatest.cpp
new file mode 100755
index 0000000..6d0a7c2
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_greatest.cpp
@@ -0,0 +1,44 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int greatest(int, int, int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ result = greatest(1, 2, 3);
+ printf("Input submitted to the function: 1, 2, 3");
+ check(3, result);
+ result = greatest(5, 9, 2);
+ printf("Input submitted to the function: 5, 9, 2");
+ check(9, result);
+ result = greatest(7, 2, 4);
+ printf("Input submitted to the function: 7, 2, 4");
+ check(7, result);
+ result = greatest(11, 2, 45);
+ printf("Input submitted to the function: 11, 2, 45");
+ check(45, result);
+ result = greatest(2, 7, 0);
+ printf("Input submitted to the function: 2, 7, 0");
+ check(7, result);
+ result = greatest(9, 6, 5);
+ printf("Input submitted to the function: 9, 6, 5");
+ check(9, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_hello_name.c b/testapp/exam/c_cpp_files/main_hello_name.c
new file mode 100755
index 0000000..71b83a2
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_hello_name.c
@@ -0,0 +1,29 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+
+void check(char expect[], char result[])
+{
+ if (expect == result)
+ {
+ printf("Correct:expected %s got %s \n",expect,result);
+ }
+ else
+ {
+ printf("ERROR:expected %s got %s \n",expect,result);
+ exit (0);
+ }
+}
+
+int main(void)
+{
+ char result[20];
+ char A[20]=" pratham";
+ char B[20]=" sir";
+ result[20] = message(A);
+ printf("%s",result);
+ check("hello pratham", result);
+ result[20] = message(B);
+ check("hello sir",result);
+ printf("All Correct\n");
+}
diff --git a/testapp/exam/c_cpp_files/main_lessThan9.cpp b/testapp/exam/c_cpp_files/main_lessThan9.cpp
new file mode 100755
index 0000000..722b4bb
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_lessThan9.cpp
@@ -0,0 +1,38 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern bool lessThan9(int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ bool result;
+ result = lessThan9(10);
+ printf("Input submitted to the function: 10");
+ check(false, result);
+ result = lessThan9(17);
+ printf("Input submitted to the function: 17");
+ check(true, result);
+ result = lessThan9(16);
+ printf("Input submitted to the function: 16");
+ check(true, result);
+ result = lessThan9(15);
+ printf("Input submitted to the function: 15");
+ check(false, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_mean.cpp b/testapp/exam/c_cpp_files/main_mean.cpp
new file mode 100755
index 0000000..21a4b1a
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_mean.cpp
@@ -0,0 +1,38 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern bool mean(int, int , int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ bool result;
+ result = mean(11, 11, 11);
+ printf("Input submitted to the function: 11, 121, 11");
+ check(true, result);
+ result = mean(16, 12, 9);
+ printf("Input submitted to the function: 16, 144, 9");
+ check(true, result);
+ result = mean(19, 221, 9);
+ printf("Input submitted to the function: 19, 221, 9");
+ check(false, result);
+ result = mean(34, 12, 3);
+ printf("Input submitted to the function: 11, 121, 11");
+ check(false, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_palindrome.cpp b/testapp/exam/c_cpp_files/main_palindrome.cpp
new file mode 100755
index 0000000..0e66928
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_palindrome.cpp
@@ -0,0 +1,32 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern bool palindrome(int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ bool result;
+ result = palindrome(123);
+ printf("Input submitted to the function: 123");
+ check(false, result);
+ result = palindrome(121);
+ printf("Input submitted to the function: 121");
+ check(true, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_roundTo10.cpp b/testapp/exam/c_cpp_files/main_roundTo10.cpp
new file mode 100755
index 0000000..12c961d
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_roundTo10.cpp
@@ -0,0 +1,41 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int roundTo10(int,int,int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ result = roundTo10(10, 22, 39);
+ printf("Input submitted to the function: 10, 22, 39");
+ check(70, result);
+ result = roundTo10(45, 42, 39);
+ printf("Input submitted to the function: 45, 42, 39");
+ check(130, result);
+ result = roundTo10(7, 3, 9);
+ printf("Input submitted to the function: 7, 3, 9");
+ check(20, result);
+ result = roundTo10(1, 2, 3);
+ printf("Input submitted to the function: 1, 2, 3");
+ check(0, result);
+ result = roundTo10(30, 40, 50);
+ printf("Input submitted to the function: 30, 40, 50");
+ check(120, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_specialSum.cpp b/testapp/exam/c_cpp_files/main_specialSum.cpp
new file mode 100755
index 0000000..d614536
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_specialSum.cpp
@@ -0,0 +1,41 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern int specialSum(int,int,int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ int result;
+ result = specialSum(10, 2, 9);
+ printf("Input submitted to the function: 10, 2, 9");
+ check(21, result);
+ result = specialSum(1, 21, 9);
+ printf("Input submitted to the function: 1, 21, 9");
+ check(1, result);
+ result = specialSum(21, 2, 3);
+ printf("Input submitted to the function: 21, 2, 3");
+ check(0, result);
+ result = specialSum(10, 2, 21);
+ printf("Input submitted to the function: 10, 2, 21");
+ check(12, result);
+ result = specialSum(10, 2, 6);
+ printf("Input submitted to the function: 10, 2, 6");
+ check(18, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/c_cpp_files/main_within.cpp b/testapp/exam/c_cpp_files/main_within.cpp
new file mode 100755
index 0000000..50f9ad0
--- /dev/null
+++ b/testapp/exam/c_cpp_files/main_within.cpp
@@ -0,0 +1,38 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+extern bool within(int, int, int);
+
+template <class T>
+
+void check(T expect, T result)
+{
+ if (expect == result)
+ {
+ printf("\nCorrect:\n Expected %d got %d \n",expect,result);
+ }
+ else
+ {
+ printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
+ exit (1);
+ }
+}
+
+int main(void)
+{
+ bool result;
+ result = within(12, 3, 20);
+ printf("Input submitted to the function: 12, 3, 20");
+ check(true, result);
+ result = within(12, 13, 20);
+ printf("Input submitted to the function: 12, 13, 20");
+ check(false, result);
+ result = within(29, 13, 120);
+ printf("Input submitted to the function: 29, 13, 120");
+ check(true, result);
+ result = within(12, 12, 20);
+ printf("Input submitted to the function: 12, 3, 20");
+ check(false, result);
+ printf("All Correct\n");
+ return 0;
+}
diff --git a/testapp/exam/code_evaluator.py b/testapp/exam/code_evaluator.py
new file mode 100644
index 0000000..2a57257
--- /dev/null
+++ b/testapp/exam/code_evaluator.py
@@ -0,0 +1,206 @@
+import sys
+from SimpleXMLRPCServer import SimpleXMLRPCServer
+import pwd
+import os
+import stat
+from os.path import isdir, dirname, abspath, join, isfile
+import signal
+from multiprocessing import Process, Queue
+import subprocess
+import re
+import json
+# Local imports.
+from settings import SERVER_PORTS, SERVER_TIMEOUT, SERVER_POOL_PORT
+
+
+MY_DIR = abspath(dirname(__file__))
+
+
+# Raised when the code times-out.
+# c.f. http://pguides.net/python/timeout-a-function
+class TimeoutException(Exception):
+ pass
+
+
+def timeout_handler(signum, frame):
+ """A handler for the ALARM signal."""
+ raise TimeoutException('Code took too long to run.')
+
+
+def create_signal_handler():
+ """Add a new signal handler for the execution of this code."""
+ prev_handler = signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(SERVER_TIMEOUT)
+ return prev_handler
+
+
+def set_original_signal_handler(old_handler=None):
+ """Set back any original signal handler."""
+ if old_handler is not None:
+ signal.signal(signal.SIGALRM, old_handler)
+ return
+ else:
+ raise Exception("Signal Handler: object cannot be NoneType")
+
+
+def delete_signal_handler():
+ signal.alarm(0)
+ return
+
+
+class CodeEvaluator(object):
+ """Tests the code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ msg = 'Code took more than %s seconds to run. You probably '\
+ 'have an infinite loop in your code.' % SERVER_TIMEOUT
+ self.timeout_msg = msg
+ self.test_case_data = test_case_data
+ self.language = language.lower()
+ self.user_answer = user_answer
+ self.ref_code_path = ref_code_path
+ self.test = test
+ self.in_dir = in_dir
+ self.test_case_args = None
+
+ # Public Protocol ##########
+ @classmethod
+ def from_json(cls, language, json_data, in_dir):
+ json_data = json.loads(json_data)
+ test_case_data = json_data.get("test_case_data")
+ user_answer = json_data.get("user_answer")
+ ref_code_path = json_data.get("ref_code_path")
+ test = json_data.get("test")
+
+ instance = cls(test_case_data, test, language, user_answer, ref_code_path,
+ in_dir)
+ return instance
+
+ def evaluate(self):
+ """Evaluates given code with the test cases based on
+ given arguments in test_case_data.
+
+ The ref_code_path is a path to the reference code.
+ The reference code will call the function submitted by the student.
+ The reference code will check for the expected output.
+
+ If the path's start with a "/" then we assume they are absolute paths.
+ If not, we assume they are relative paths w.r.t. the location of this
+ code_server script.
+
+ If the optional `in_dir` keyword argument is supplied it changes the
+ directory to that directory (it does not change it back to the original
+ when done).
+
+ Returns
+ -------
+
+ A tuple: (success, error message).
+ """
+
+ self._setup()
+ success, err = self._evaluate(self.test_case_args)
+ self._teardown()
+
+ result = {'success': success, 'error': err}
+ return result
+
+ # Private Protocol ##########
+ def _setup(self):
+ self._change_dir(self.in_dir)
+
+ def _evaluate(self, args):
+ # Add a new signal handler for the execution of this code.
+ prev_handler = create_signal_handler()
+ success = False
+ args = args or []
+
+ # Do whatever testing needed.
+ try:
+ success, err = self._check_code(*args)
+
+ except TimeoutException:
+ err = self.timeout_msg
+ except:
+ _type, value = sys.exc_info()[:2]
+ err = "Error: {0}".format(repr(value))
+ finally:
+ # Set back any original signal handler.
+ set_original_signal_handler(prev_handler)
+
+ return success, err
+
+ def _teardown(self):
+ # Cancel the signal
+ delete_signal_handler()
+
+ def _check_code(self):
+ raise NotImplementedError("check_code method not implemented")
+
+ def create_submit_code_file(self, file_name):
+ """ Write the code (`answer`) to a file and set the file path"""
+ submit_f = open(file_name, 'w')
+ submit_f.write(self.user_answer.lstrip())
+ submit_f.close()
+ submit_path = abspath(submit_f.name)
+
+ return submit_path
+
+ def _set_file_as_executable(self, fname):
+ os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
+ | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
+
+ def _set_test_code_file_path(self, ref_path=None, test_case_path=None):
+ if ref_path and not ref_path.startswith('/'):
+ ref_path = join(MY_DIR, ref_path)
+
+ if test_case_path and not test_case_path.startswith('/'):
+ test_case_path = join(MY_DIR, test_case_path)
+
+ return ref_path, test_case_path
+
+ def _run_command(self, cmd_args, *args, **kw):
+ """Run a command in a subprocess while blocking, the process is killed
+ if it takes more than 2 seconds to run. Return the Popen object, the
+ stdout and stderr.
+ """
+ try:
+ proc = subprocess.Popen(cmd_args, *args, **kw)
+ stdout, stderr = proc.communicate()
+ except TimeoutException:
+ # Runaway code, so kill it.
+ proc.kill()
+ # Re-raise exception.
+ raise
+ return proc, stdout, stderr
+
+ def _compile_command(self, cmd, *args, **kw):
+ """Compiles C/C++/java code and returns errors if any.
+ Run a command in a subprocess while blocking, the process is killed
+ if it takes more than 2 seconds to run. Return the Popen object, the
+ stderr.
+ """
+ try:
+ proc_compile = subprocess.Popen(cmd, shell=True, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = proc_compile.communicate()
+ except TimeoutException:
+ # Runaway code, so kill it.
+ proc_compile.kill()
+ # Re-raise exception.
+ raise
+ return proc_compile, err
+
+ def _change_dir(self, in_dir):
+ if in_dir is not None and isdir(in_dir):
+ os.chdir(in_dir)
+
+ def _remove_null_substitute_char(self, string):
+ """Returns a string without any null and substitute characters"""
+ stripped = ""
+ for c in string:
+ if ord(c) is not 26 and ord(c) is not 0:
+ stripped = stripped + c
+ return ''.join(stripped)
diff --git a/testapp/exam/code_server.py b/testapp/exam/code_server.py
index 792197d..580379f 100755
--- a/testapp/exam/code_server.py
+++ b/testapp/exam/code_server.py
@@ -19,7 +19,6 @@ settings.py:SERVER_POOL_PORT. This port exposes a `get_server_port` function
that returns an available server.
"""
import sys
-import traceback
from SimpleXMLRPCServer import SimpleXMLRPCServer
import pwd
import os
@@ -29,12 +28,16 @@ import signal
from multiprocessing import Process, Queue
import subprocess
import re
+import json
# Local imports.
from settings import SERVER_PORTS, SERVER_TIMEOUT, SERVER_POOL_PORT
+from language_registry import set_registry
+
MY_DIR = abspath(dirname(__file__))
+# Private Protocol ##########
def run_as_nobody():
"""Runs the current process as nobody."""
# Set the effective uid and to that of nobody.
@@ -43,17 +46,6 @@ def run_as_nobody():
os.seteuid(nobody.pw_uid)
-# Raised when the code times-out.
-# c.f. http://pguides.net/python/timeout-a-function
-class TimeoutException(Exception):
- pass
-
-
-def timeout_handler(signum, frame):
- """A handler for the ALARM signal."""
- raise TimeoutException('Code took too long to run.')
-
-
###############################################################################
# `CodeServer` class.
###############################################################################
@@ -64,736 +56,38 @@ class CodeServer(object):
def __init__(self, port, queue):
self.port = port
self.queue = queue
- msg = 'Code took more than %s seconds to run. You probably '\
- 'have an infinite loop in your code.' % SERVER_TIMEOUT
- self.timeout_msg = msg
-
- def run_python_code(self, answer, test_code, in_dir=None):
- """Tests given Python function (`answer`) with the `test_code`
- supplied. If the optional `in_dir` keyword argument is supplied
- it changes the directory to that directory (it does not change
- it back to the original when done). This function also timesout
- when the function takes more than SERVER_TIMEOUT seconds to run
- to prevent runaway code.
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- success = False
- tb = None
- try:
- submitted = compile(answer, '<string>', mode='exec')
- g = {}
- exec submitted in g
- _tests = compile(test_code, '<string>', mode='exec')
- exec _tests in g
- except TimeoutException:
- err = self.timeout_msg
- except AssertionError:
- type, value, tb = sys.exc_info()
- info = traceback.extract_tb(tb)
- fname, lineno, func, text = info[-1]
- text = str(test_code).splitlines()[lineno-1]
- err = "{0} {1} in: {2}".format(type.__name__, str(value), text)
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- else:
- success = True
- err = 'Correct answer'
- finally:
- del tb
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def run_bash_code(self, answer, test_code, in_dir=None):
- """Tests given Bash code (`answer`) with the `test_code` supplied.
-
- The testcode should typically contain two lines, the first is a path to
- the reference script we are to compare against. The second is a path
- to the arguments to be supplied to the reference and submitted script.
- The output of these will be compared for correctness.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- If the optional `in_dir` keyword argument is supplied it changes the
- directory to that directory (it does not change it back to the original
- when done).
-
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- def _set_exec(fname):
- os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
- | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
- | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
- submit_f = open('submit.sh', 'w')
- submit_f.write(answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
- _set_exec(submit_path)
-
- ref_path, test_case_path = test_code.strip().splitlines()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
- if not test_case_path.startswith('/'):
- test_case_path = join(MY_DIR, test_case_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- success, err = self.check_bash_script(ref_path, submit_path,
- test_case_path)
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def _run_command(self, cmd_args, *args, **kw):
- """Run a command in a subprocess while blocking, the process is killed
- if it takes more than 2 seconds to run. Return the Popen object, the
- stdout and stderr.
- """
- try:
- proc = subprocess.Popen(cmd_args, *args, **kw)
- stdout, stderr = proc.communicate()
- except TimeoutException:
- # Runaway code, so kill it.
- proc.kill()
- # Re-raise exception.
- raise
- return proc, stdout, stderr
-
- def check_bash_script(self, ref_script_path, submit_script_path,
- test_case_path=None):
- """ Function validates student script using instructor script as
- reference. Test cases can optionally be provided. The first argument
- ref_script_path, is the path to instructor script, it is assumed to
- have executable permission. The second argument submit_script_path, is
- the path to the student script, it is assumed to have executable
- permission. The Third optional argument is the path to test the
- scripts. Each line in this file is a test case and each test case is
- passed to the script as standard arguments.
-
- Returns
- --------
-
- returns (True, "Correct answer") : If the student script passes all
- test cases/have same output, when compared to the instructor script
-
- returns (False, error_msg): If the student script fails a single
- test/have dissimilar output, when compared to the instructor script.
-
- Returns (False, error_msg): If mandatory arguments are not files or if
- the required permissions are not given to the file(s).
-
- """
- if not isfile(ref_script_path):
- return False, "No file at %s" % ref_script_path
- if not isfile(submit_script_path):
- return False, 'No file at %s' % submit_script_path
- if not os.access(ref_script_path, os.X_OK):
- return False, 'Script %s is not executable' % ref_script_path
- if not os.access(submit_script_path, os.X_OK):
- return False, 'Script %s is not executable' % submit_script_path
-
- if test_case_path is None:
- ret = self._run_command(ref_script_path, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, inst_stdout, inst_stderr = ret
- ret = self._run_command(submit_script_path, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdnt_stdout, stdnt_stderr = ret
- if inst_stdout == stdnt_stdout:
- return True, 'Correct answer'
- else:
- err = "Error: expected %s, got %s" % (inst_stderr,
- stdnt_stderr)
- return False, err
- else:
- if not isfile(test_case_path):
- return False, "No test case at %s" % test_case_path
- if not os.access(ref_script_path, os.R_OK):
- return False, "Test script %s, not readable" % test_case_path
- valid_answer = True # We initially make it one, so that we can
- # stop once a test case fails
- loop_count = 0 # Loop count has to be greater than or
- # equal to one.
- # Useful for caching things like empty
- # test files,etc.
- test_cases = open(test_case_path).readlines()
- num_lines = len(test_cases)
- for test_case in test_cases:
- loop_count += 1
- if valid_answer:
- args = [ref_script_path] + [x for x in test_case.split()]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, inst_stdout, inst_stderr = ret
- args = [submit_script_path]+[x for x in test_case.split()]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdnt_stdout, stdnt_stderr = ret
- valid_answer = inst_stdout == stdnt_stdout
- if valid_answer and (num_lines == loop_count):
- return True, "Correct answer"
- else:
- err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
- stdnt_stdout+stdnt_stderr)
- return False, err
-
- def run_c_code(self, answer, test_code, in_dir=None):
- """Tests given C code (`answer`) with the `test_code` supplied.
-
- The testcode is a path to the reference code.
- The reference code will call the function submitted by the student.
- The reference code will check for the expected output.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- If the optional `in_dir` keyword argument is supplied it changes the
- directory to that directory (it does not change it back to the original
- when done).
-
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # File extension must be .c
- submit_f = open('submit.c', 'w')
- submit_f.write(answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
-
- ref_path = test_code.strip()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- success, err = self._check_c_cpp_code(ref_path, submit_path)
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def _compile_command(self, cmd, *args, **kw):
- """Compiles C/C++/java code and returns errors if any.
- Run a command in a subprocess while blocking, the process is killed
- if it takes more than 2 seconds to run. Return the Popen object, the
- stderr.
- """
- try:
- proc_compile = subprocess.Popen(cmd, shell=True, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = proc_compile.communicate()
- except TimeoutException:
- # Runaway code, so kill it.
- proc_compile.kill()
- # Re-raise exception.
- raise
- return proc_compile, err
-
- def _check_c_cpp_code(self, ref_code_path, submit_code_path):
- """ Function validates student code using instructor code as
- reference.The first argument ref_code_path, is the path to
- instructor code, it is assumed to have executable permission.
- The second argument submit_code_path, is the path to the student
- code, it is assumed to have executable permission.
-
- Returns
- --------
-
- returns (True, "Correct answer") : If the student function returns
- expected output when called by reference code.
-
- returns (False, error_msg): If the student function fails to return
- expected output when called by reference code.
-
- Returns (False, error_msg): If mandatory arguments are not files or
- if the required permissions are not given to the file(s).
-
- """
- if not isfile(ref_code_path):
- return False, "No file at %s" % ref_code_path
- if not isfile(submit_code_path):
- return False, 'No file at %s' % submit_code_path
-
- success = False
- output_path = os.getcwd() + '/output'
- compile_command = "g++ %s -c -o %s" % (submit_code_path, output_path)
- ret = self._compile_command(compile_command)
- proc, stdnt_stderr = ret
-
- # Only if compilation is successful, the program is executed
- # And tested with testcases
- if stdnt_stderr == '':
- executable = os.getcwd() + '/executable'
- compile_main = "g++ %s %s -o %s" % (ref_code_path, output_path,
- executable)
- ret = self._compile_command(compile_main)
- proc, main_err = ret
- if main_err == '':
- args = [executable]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdout, stderr = ret
- if proc.returncode == 0:
- success, err = True, "Correct answer"
- else:
- err = stdout + "\n" + stderr
- os.remove(executable)
- else:
- err = "Error:"
- try:
- error_lines = main_err.splitlines()
- for e in error_lines:
- err = err + "\n" + e.split(":", 1)[1]
- except:
- err = err + "\n" + main_err
- os.remove(output_path)
- else:
- err = "Compilation Error:"
- try:
- error_lines = stdnt_stderr.splitlines()
- for e in error_lines:
- if ':' in e:
- err = err + "\n" + e.split(":", 1)[1]
- else:
- err = err + "\n" + e
- except:
- err = err + "\n" + stdnt_stderr
- return success, err
-
- def run_cplus_code(self, answer, test_code, in_dir=None):
- """Tests given C++ code (`answer`) with the `test_code` supplied.
-
- The testcode is a path to the reference code.
- The reference code will call the function submitted by the student.
- The reference code will check for the expected output.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- If the optional `in_dir` keyword argument is supplied it changes the
- directory to that directory (it does not change it back to the original
- when done).
-
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # The file extension must be .cpp
- submit_f = open('submitstd.cpp', 'w')
- submit_f.write(answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
-
- ref_path = test_code.strip()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- success, err = self._check_c_cpp_code(ref_path, submit_path)
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def run_java_code(self, answer, test_code, in_dir=None):
- """Tests given java code (`answer`) with the `test_code` supplied.
-
- The testcode is a path to the reference code.
- The reference code will call the function submitted by the student.
- The reference code will check for the expected output.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- If the optional `in_dir` keyword argument is supplied it changes the
- directory to that directory (it does not change it back to the original
- when done).
-
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # The file extension must be .java
- # The class name and file name must be same in java
- submit_f = open('Test.java', 'w')
- submit_f.write(answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
-
- ref_path = test_code.strip()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- success, err = self._check_java_code(ref_path, submit_path)
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def _check_java_code(self, ref_code_path, submit_code_path):
- """ Function validates student code using instructor code as
- reference.The first argument ref_code_path, is the path to
- instructor code, it is assumed to have executable permission.
- The second argument submit_code_path, is the path to the student
- code, it is assumed to have executable permission.
-
- Returns
- --------
-
- returns (True, "Correct answer") : If the student function returns
- expected output when called by reference code.
-
- returns (False, error_msg): If the student function fails to return
- expected output when called by reference code.
-
- Returns (False, error_msg): If mandatory arguments are not files or
- if the required permissions are not given to the file(s).
-
- """
- if not isfile(ref_code_path):
- return False, "No file at %s" % ref_code_path
- if not isfile(submit_code_path):
- return False, 'No file at %s' % submit_code_path
-
- success = False
- compile_command = "javac %s" % (submit_code_path)
- ret = self._compile_command(compile_command)
- proc, stdnt_stderr = ret
- stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
-
- # Only if compilation is successful, the program is executed
- # And tested with testcases
- if stdnt_stderr == '':
- student_directory = os.getcwd() + '/'
- student_file_name = "Test"
- compile_main = "javac %s -classpath %s -d %s" % (ref_code_path,
- student_directory,
- student_directory)
- ret = self._compile_command(compile_main)
- proc, main_err = ret
- main_err = self._remove_null_substitute_char(main_err)
-
- if main_err == '':
- main_file_name = (ref_code_path.split('/')[-1]).split('.')[0]
- run_command = "java -cp %s %s" % (student_directory,
- main_file_name)
- ret = self._run_command(run_command,
- stdin=None,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdout, stderr = ret
- if proc.returncode == 0:
- success, err = True, "Correct answer"
- else:
- err = stdout + "\n" + stderr
- success = False
- os.remove("%s%s.class" % (student_directory, main_file_name))
- else:
- err = "Error:\n"
- try:
- error_lines = main_err.splitlines()
- for e in error_lines:
- if ':' in e:
- err = err + "\n" + e.split(":", 1)[1]
- else:
- err = err + "\n" + e
- except:
- err = err + "\n" + main_err
- os.remove("%s%s.class" % (student_directory, student_file_name))
- else:
- err = "Compilation Error:\n"
- try:
- error_lines = stdnt_stderr.splitlines()
- for e in error_lines:
- if ':' in e:
- err = err + "\n" + e.split(":", 1)[1]
- else:
- err = err + "\n" + e
- except:
- err = err + "\n" + stdnt_stderr
- return success, err
-
- def _remove_null_substitute_char(self, string):
- """Returns a string without any null and substitute characters"""
- stripped = ""
- for c in string:
- if ord(c) is not 26 and ord(c) is not 0:
- stripped = stripped + c
- return ''.join(stripped)
-
- def run_scilab_code(self, answer, test_code, in_dir=None):
- """Tests given Scilab function (`answer`) with the `test_code`
- supplied. If the optional `in_dir` keyword argument is supplied
- it changes the directory to that directory (it does not change
- it back to the original when done). This function also timesout
- when the function takes more than SERVER_TIMEOUT seconds to run
- to prevent runaway code.
-
- The testcode is a path to the reference code.
- The reference code will call the function submitted by the student.
- The reference code will check for the expected output.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- Returns
- -------
-
- A tuple: (success, error message).
+ # Public Protocol ##########
+ def check_code(self, language, json_data, in_dir=None):
+ """Calls relevant EvaluateCode class based on language to check the
+ answer code
"""
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # Removes all the commands that terminates scilab
- answer,i = self._remove_scilab_exit(answer.lstrip())
-
- # Throw message if there are commmands that terminates scilab
- add_err=""
- if i > 0:
- add_err = "Please do not use exit, quit and abort commands in your\
- code.\n Otherwise your code will not be evaluated\
- correctly.\n"
-
- # The file extension should be .sci
- submit_f = open('function.sci','w')
- submit_f.write(answer)
- submit_f.close()
- submit_path = abspath(submit_f.name)
-
- ref_path = test_code.strip()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(ref_path)
- cmd += ' | timeout 8 scilab-cli -nb'
- ret = self._run_command(cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdout, stderr = ret
-
- # Get only the error.
- stderr = self._get_error(stdout)
- if stderr is None:
- # Clean output
- stdout = self._strip_output(stdout)
- if proc.returncode == 5:
- success, err = True, "Correct answer"
- else:
- err = add_err + stdout
- else:
- err = add_err + stderr
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
+ code_evaluator = self._create_evaluator_instance(language, json_data,
+ in_dir)
+ result = code_evaluator.evaluate()
# Put us back into the server pool queue since we are free now.
self.queue.put(self.port)
- return success, err
-
- def _remove_scilab_exit(self, string):
- """
- Removes exit, quit and abort from the scilab code
- """
- new_string = ""
- i=0
- for line in string.splitlines():
- new_line = re.sub(r"exit.*$","",line)
- new_line = re.sub(r"quit.*$","",new_line)
- new_line = re.sub(r"abort.*$","",new_line)
- if line != new_line:
- i=i+1
- new_string = new_string +'\n'+ new_line
- return new_string, i
-
- def _get_error(self, string):
- """
- Fetches only the error from the string.
- Returns None if no error.
- """
- obj = re.search("!.+\n.+",string);
- if obj:
- return obj.group()
- return None
-
- def _strip_output(self, out):
- """
- Cleans whitespace from the output
- """
- strip_out = "Message"
- for l in out.split('\n'):
- if l.strip():
- strip_out = strip_out+"\n"+l.strip()
- return strip_out
+ return json.dumps(result)
def run(self):
- """Run XMLRPC server, serving our methods.
- """
+ """Run XMLRPC server, serving our methods."""
server = SimpleXMLRPCServer(("localhost", self.port))
self.server = server
server.register_instance(self)
self.queue.put(self.port)
server.serve_forever()
+ # Private Protocol ##########
+ def _create_evaluator_instance(self, language, json_data, in_dir):
+ """Create instance of relevant EvaluateCode class based on language"""
+ set_registry()
+ registry = get_registry()
+ cls = registry.get_class(language)
+ instance = cls.from_json(language, json_data, in_dir)
+ return instance
+
###############################################################################
# `ServerPool` class.
@@ -825,6 +119,8 @@ class ServerPool(object):
p.start()
self.servers = servers
+ # Public Protocol ##########
+
def get_server_port(self):
"""Get available server port from ones in the pool. This will block
till it gets an available server.
diff --git a/testapp/exam/cpp_code_evaluator.py b/testapp/exam/cpp_code_evaluator.py
new file mode 100644
index 0000000..15e2b13
--- /dev/null
+++ b/testapp/exam/cpp_code_evaluator.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+import subprocess
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class CppCodeEvaluator(CodeEvaluator):
+ """Tests the C code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(CppCodeEvaluator, self).__init__(test_case_data, test, language,
+ user_answer, ref_code_path,
+ in_dir)
+ self.submit_path = self.create_submit_code_file('submit.c')
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(CppCodeEvaluator, self)._setup()
+
+ get_ref_path = self.ref_code_path
+ ref_path, test_case_path = self._set_test_code_file_path(get_ref_path)
+
+ # Set file paths
+ c_user_output_path = os.getcwd() + '/output'
+ c_ref_output_path = os.getcwd() + '/executable'
+
+ # Set command variables
+ compile_command = 'g++ {0} -c -o {1}'.format(self.submit_path,
+ c_user_output_path)
+ compile_main = 'g++ {0} {1} -o {2}'.format(ref_path,
+ c_user_output_path,
+ c_ref_output_path)
+ run_command_args = [c_ref_output_path]
+ remove_user_output = c_user_output_path
+ remove_ref_output = c_ref_output_path
+
+ return (ref_path, self.submit_path, compile_command, compile_main,
+ run_command_args, remove_user_output, remove_ref_output)
+
+ def _teardown(self):
+ # Delete the created file.
+ super(CppCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_code_path, submit_code_path, compile_command,
+ compile_main, run_command_args, remove_user_output,
+ remove_ref_output):
+ """ Function validates student code using instructor code as
+ reference.The first argument ref_code_path, is the path to
+ instructor code, it is assumed to have executable permission.
+ The second argument submit_code_path, is the path to the student
+ code, it is assumed to have executable permission.
+
+ Returns
+ --------
+
+ returns (True, "Correct answer") : If the student function returns
+ expected output when called by reference code.
+
+ returns (False, error_msg): If the student function fails to return
+ expected output when called by reference code.
+
+ Returns (False, error_msg): If mandatory arguments are not files or
+ if the required permissions are not given to the file(s).
+
+ """
+ if not isfile(ref_code_path):
+ return False, "No file at %s or Incorrect path" % ref_code_path
+ if not isfile(submit_code_path):
+ return False, 'No file at %s or Incorrect path' % submit_code_path
+
+ success = False
+ ret = self._compile_command(compile_command)
+ proc, stdnt_stderr = ret
+ stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
+
+ # Only if compilation is successful, the program is executed
+ # And tested with testcases
+ if stdnt_stderr == '':
+ ret = self._compile_command(compile_main)
+ proc, main_err = ret
+ main_err = self._remove_null_substitute_char(main_err)
+
+ if main_err == '':
+ ret = self._run_command(run_command_args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdout, stderr = ret
+ if proc.returncode == 0:
+ success, err = True, "Correct answer"
+ else:
+ err = stdout + "\n" + stderr
+ os.remove(remove_ref_output)
+ else:
+ err = "Error:"
+ try:
+ error_lines = main_err.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + main_err
+ os.remove(remove_user_output)
+ else:
+ err = "Compilation Error:"
+ try:
+ error_lines = stdnt_stderr.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + stdnt_stderr
+
+ return success, err
diff --git a/testapp/exam/forms.py b/testapp/exam/forms.py
index 1f12a3b..93584a6 100644
--- a/testapp/exam/forms.py
+++ b/testapp/exam/forms.py
@@ -1,5 +1,5 @@
from django import forms
-from testapp.exam.models import Profile, Quiz, Question
+from exam.models import Profile, Quiz, Question, TestCase
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
@@ -8,6 +8,7 @@ from taggit.forms import TagField
from taggit_autocomplete_modified.managers import TaggableManagerAutocomplete
from taggit_autocomplete_modified.widgets import TagAutocomplete
from taggit_autocomplete_modified import settings
+from django.forms.models import inlineformset_factory
from string import letters, punctuation, digits
import datetime
@@ -177,7 +178,7 @@ class QuizForm(forms.Form):
new_quiz.save()
-class QuestionForm(forms.Form):
+class QuestionForm(forms.ModelForm):
"""Creates a form to add or edit a Question.
It has the related fields and functions required."""
@@ -186,8 +187,8 @@ class QuestionForm(forms.Form):
description = forms.CharField(widget=forms.Textarea\
(attrs={'cols': 40, 'rows': 1}))
points = forms.FloatField()
- test = forms.CharField(widget=forms.Textarea\
- (attrs={'cols': 40, 'rows': 1}))
+ solution = forms.CharField(widget=forms.Textarea\
+ (attrs={'cols': 40, 'rows': 1}), required=False)
options = forms.CharField(widget=forms.Textarea\
(attrs={'cols': 40, 'rows': 1}), required=False)
language = forms.CharField(max_length=20, widget=forms.Select\
@@ -198,29 +199,37 @@ class QuestionForm(forms.Form):
tags = TagField(widget=TagAutocomplete(), required=False)
snippet = forms.CharField(widget=forms.Textarea\
(attrs={'cols': 40, 'rows': 1}), required=False)
-
- def save(self):
- summary = self.cleaned_data["summary"]
- description = self.cleaned_data["description"]
- points = self.cleaned_data['points']
- test = self.cleaned_data["test"]
- options = self.cleaned_data['options']
- language = self.cleaned_data['language']
- type = self.cleaned_data["type"]
- active = self.cleaned_data["active"]
- snippet = self.cleaned_data["snippet"]
+ ref_code_path = forms.CharField(widget=forms.Textarea\
+ (attrs={'cols': 40, 'rows': 1}), required=False)
+
+ def save(self, commit=True):
+ summary = self.cleaned_data.get("summary")
+ description = self.cleaned_data.get("description")
+ points = self.cleaned_data.get("points")
+ options = self.cleaned_data.get("options")
+ language = self.cleaned_data.get("language")
+ type = self.cleaned_data.get("type")
+ active = self.cleaned_data.get("active")
+ snippet = self.cleaned_data.get("snippet")
new_question = Question()
new_question.summary = summary
new_question.description = description
new_question.points = points
- new_question.test = test
+ # new_question.test = test
new_question.options = options
new_question.language = language
new_question.type = type
new_question.active = active
new_question.snippet = snippet
- new_question.save()
+ new_question = super(QuestionForm, self).save(commit=False)
+ if commit:
+ new_question.save()
+
+ return new_question
+
+ class Meta:
+ model = Question
class RandomQuestionForm(forms.Form):
@@ -229,3 +238,6 @@ class RandomQuestionForm(forms.Form):
marks = forms.CharField(max_length=8, widget=forms.Select\
(choices=(('select', 'Select Marks'),)))
shuffle_questions = forms.BooleanField(required=False)
+
+TestCaseFormSet = inlineformset_factory(Question, TestCase,\
+ can_order=False, can_delete=False, extra=1)
diff --git a/testapp/exam/java_code_evaluator.py b/testapp/exam/java_code_evaluator.py
new file mode 100644
index 0000000..08ae208
--- /dev/null
+++ b/testapp/exam/java_code_evaluator.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+import subprocess
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class JavaCodeEvaluator(CodeEvaluator):
+ """Tests the Java code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(JavaCodeEvaluator, self).__init__(test_case_data, test,
+ language, user_answer,
+ ref_code_path, in_dir)
+ self.submit_path = self.create_submit_code_file('Test.java')
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(JavaCodeEvaluator, self)._setup()
+
+ ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path)
+
+ # Set file paths
+ java_student_directory = os.getcwd() + '/'
+ java_ref_file_name = (ref_path.split('/')[-1]).split('.')[0]
+
+ # Set command variables
+ compile_command = 'javac {0}'.format(self.submit_path),
+ compile_main = ('javac {0} -classpath '
+ '{1} -d {2}').format(ref_path,
+ java_student_directory,
+ java_student_directory)
+ run_command_args = "java -cp {0} {1}".format(java_student_directory,
+ java_ref_file_name)
+ remove_user_output = "{0}{1}.class".format(java_student_directory,
+ 'Test')
+ remove_ref_output = "{0}{1}.class".format(java_student_directory,
+ java_ref_file_name)
+
+ return (ref_path, self.submit_path, compile_command, compile_main,
+ run_command_args, remove_user_output, remove_ref_output)
+
+ def _teardown(self):
+ # Delete the created file.
+ super(JavaCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_code_path, submit_code_path, compile_command,
+ compile_main, run_command_args, remove_user_output,
+ remove_ref_output):
+ """ Function validates student code using instructor code as
+ reference.The first argument ref_code_path, is the path to
+ instructor code, it is assumed to have executable permission.
+ The second argument submit_code_path, is the path to the student
+ code, it is assumed to have executable permission.
+
+ Returns
+ --------
+
+ returns (True, "Correct answer") : If the student function returns
+ expected output when called by reference code.
+
+ returns (False, error_msg): If the student function fails to return
+ expected output when called by reference code.
+
+ Returns (False, error_msg): If mandatory arguments are not files or
+ if the required permissions are not given to the file(s).
+
+ """
+ if not isfile(ref_code_path):
+ return False, "No file at %s or Incorrect path" % ref_code_path
+ if not isfile(submit_code_path):
+ return False, 'No file at %s or Incorrect path' % submit_code_path
+
+ success = False
+ ret = self._compile_command(compile_command)
+ proc, stdnt_stderr = ret
+ stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
+
+ # Only if compilation is successful, the program is executed
+ # And tested with testcases
+ if stdnt_stderr == '':
+ ret = self._compile_command(compile_main)
+ proc, main_err = ret
+ main_err = self._remove_null_substitute_char(main_err)
+
+ if main_err == '':
+ ret = self._run_command(run_command_args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdout, stderr = ret
+ if proc.returncode == 0:
+ success, err = True, "Correct answer"
+ else:
+ err = stdout + "\n" + stderr
+ os.remove(remove_ref_output)
+ else:
+ err = "Error:"
+ try:
+ error_lines = main_err.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + main_err
+ os.remove(remove_user_output)
+ else:
+ err = "Compilation Error:"
+ try:
+ error_lines = stdnt_stderr.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + stdnt_stderr
+
+ return success, err
diff --git a/testapp/exam/java_files/main_array_sum.java b/testapp/exam/java_files/main_array_sum.java
new file mode 100644
index 0000000..5eae299
--- /dev/null
+++ b/testapp/exam/java_files/main_array_sum.java
@@ -0,0 +1,36 @@
+class main_array_sum
+{
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:\nOutput expected "+expect+" and got "+result);
+ }
+ else
+ {
+ System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result);
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ int result;
+ Test t = new Test();
+ int x[] = {0,0,0,0,0};
+ result = t.array_sum(x);
+ System.out.println("Input submitted to the function: {0,0,0,0,0}");
+ check(0, result);
+ int a[] = {1,2,3,4,5};
+ result = t.array_sum(a);
+ System.out.println("Input submitted to the function: {1,2,3,4,5}");
+ check(15, result);
+ int b[] = {1,2,3,0,0};
+ result = t.array_sum(b);
+ System.out.println("Input submitted to the function: {1,2,3,0,0}");
+ check(6, result);
+ int c[] = {1,1,1,1,1};
+ result = t.array_sum(c);
+ System.out.println("Input submitted to the function: {1,1,1,1,1}");
+ check(5, result);
+ }
+}
diff --git a/testapp/exam/java_files/main_fact.java b/testapp/exam/java_files/main_fact.java
new file mode 100644
index 0000000..325dab6
--- /dev/null
+++ b/testapp/exam/java_files/main_fact.java
@@ -0,0 +1,29 @@
+class main_fact
+{
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:\nOutput expected "+expect+" and got "+result);
+ }
+ else
+ {
+ System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result);
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ Test t = new Test();
+ int result;
+ result = t.factorial(0);
+ System.out.println("Input submitted to the function: 0");
+ check(1, result);
+ result = t.factorial(3);
+ System.out.println("Input submitted to the function: 3");
+ check(6, result);
+ result = t.factorial(4);
+ System.out.println("Input submitted to the function: 4");
+ check(24, result);
+ }
+}
diff --git a/testapp/exam/java_files/main_great.java b/testapp/exam/java_files/main_great.java
new file mode 100644
index 0000000..4bfcb1f
--- /dev/null
+++ b/testapp/exam/java_files/main_great.java
@@ -0,0 +1,39 @@
+class main_great
+{
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:\nOutput expected "+expect+" and got "+result);
+ }
+ else
+ {
+ System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result);
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ Test t = new Test();
+ int result;
+ result = t.greatest(1, 3, 4);
+ System.out.println("Input submitted to the function: 1, 3, 4");
+ check(4, result);
+ result = t.greatest(5, 10, 3);
+ System.out.println("Input submitted to the function: 5, 10, 3");
+ check(10, result);
+ result = t.greatest(6, 1, 4);
+ System.out.println("Input submitted to the function: 6, 1, 4");
+ check(6, result);
+ result = t.greatest(6, 11, 14);
+ System.out.println("Input submitted to the function: 6, 11, 14");
+ check(14, result);
+ result = t.greatest(3, 31, 4);
+ System.out.println("Input submitted to the function: 3, 31, 4");
+ check(31, result);
+ result = t.greatest(26, 13, 3);
+ System.out.println("Input submitted to the function: 26, 13, 3");
+ check(26, result);
+
+ }
+}
diff --git a/testapp/exam/java_files/main_hello_name.java b/testapp/exam/java_files/main_hello_name.java
new file mode 100644
index 0000000..84bb282
--- /dev/null
+++ b/testapp/exam/java_files/main_hello_name.java
@@ -0,0 +1,29 @@
+class main_hello_name
+{
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:\nOutput expected "+expect+" and got "+result);
+ }
+ else
+ {
+ System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result);
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ Test t = new Test();
+ String result;
+ result = t.hello_name("Raj");
+ System.out.println("Input submitted to the function: 'Raj'");
+ check("hello Raj", result);
+ result = t.hello_name("Pratham");
+ System.out.println("Input submitted to the function: 'Pratham'");
+ check("hello Pratham", result);
+ result = t.hello_name("Ram");
+ System.out.println("Input submitted to the function: 'Ram'");
+ check("hello Ram", result);
+ }
+}
diff --git a/testapp/exam/java_files/main_lastDigit.java b/testapp/exam/java_files/main_lastDigit.java
new file mode 100644
index 0000000..05439e2
--- /dev/null
+++ b/testapp/exam/java_files/main_lastDigit.java
@@ -0,0 +1,36 @@
+class main_lastDigit
+{
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:\nOutput expected "+expect+" and got "+result+"\n");
+ }
+ else
+ {
+ System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result+"\n");
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ Test t = new Test();
+ boolean result;
+ result= t.lastDigit(12, 2, 13);
+ System.out.println("Input submitted to the function: 12, 2, 13");
+ check(true, result);
+ result = t.lastDigit(11, 52, 32);
+ System.out.println("Input submitted to the function: 11, 52, 32");
+ check(true, result);
+ result = t.lastDigit(6, 34, 22);
+ System.out.println("Input submitted to the function: 6, 34, 22");
+ check(false, result);
+ result = t.lastDigit(6, 46, 26);
+ System.out.println("Input submitted to the function: 63");
+ check(true, result);
+ result = t.lastDigit(91, 90, 92);
+ System.out.println("Input submitted to the function: 91");
+ check(false, result);
+
+ }
+}
diff --git a/testapp/exam/java_files/main_moreThan30.java b/testapp/exam/java_files/main_moreThan30.java
new file mode 100644
index 0000000..7da31cb
--- /dev/null
+++ b/testapp/exam/java_files/main_moreThan30.java
@@ -0,0 +1,36 @@
+class main_moreThan30
+{
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:\nOutput expected "+expect+" and got "+result+"\n");
+ }
+ else
+ {
+ System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result+"\n");
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ Test t = new Test();
+ boolean result;
+ result= t.moreThan30(30);
+ System.out.println("Input submitted to the function: 30");
+ check(false, result);
+ result = t.moreThan30(151);
+ System.out.println("Input submitted to the function: 151");
+ check(true, result);
+ result = t.moreThan30(66);
+ System.out.println("Input submitted to the function: 66");
+ check(false, result);
+ result = t.moreThan30(63);
+ System.out.println("Input submitted to the function: 63");
+ check(true, result);
+ result = t.moreThan30(91);
+ System.out.println("Input submitted to the function: 91");
+ check(true, result);
+
+ }
+}
diff --git a/testapp/exam/java_files/main_palindrome.java b/testapp/exam/java_files/main_palindrome.java
new file mode 100644
index 0000000..c0745f9
--- /dev/null
+++ b/testapp/exam/java_files/main_palindrome.java
@@ -0,0 +1,29 @@
+class main_palindrome
+{
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:\nOutput expected "+expect+" and got "+result+"\n");
+ }
+ else
+ {
+ System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result+"\n");
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ Test t = new Test();
+ boolean result;
+ result= t.palindrome(123);
+ System.out.println("Input submitted to the function: 123");
+ check(false, result);
+ result = t.palindrome(151);
+ System.out.println("Input submitted to the function: 151");
+ check(true, result);
+ result = t.palindrome(23432);
+ System.out.println("Input submitted to the function: 23432");
+ check(true, result);
+ }
+}
diff --git a/testapp/exam/java_files/main_square.java b/testapp/exam/java_files/main_square.java
new file mode 100644
index 0000000..5cb8c35
--- /dev/null
+++ b/testapp/exam/java_files/main_square.java
@@ -0,0 +1,32 @@
+class main_square
+{
+ public static <E> void check(E expect, E result)
+ {
+ if(result.equals(expect))
+ {
+ System.out.println("Correct:\nOutput expected "+expect+" and got "+result);
+ }
+ else
+ {
+ System.out.println("Incorrect:\nOutput expected "+expect+" but got "+result);
+ System.exit(1);
+ }
+ }
+ public static void main(String arg[])
+ {
+ Test t = new Test();
+ int result, input, output;
+ input = 0; output = 0;
+ result = t.square_num(input);
+ System.out.println("Input submitted to the function: "+input);
+ check(output, result);
+ input = 5; output = 25;
+ result = t.square_num(input);
+ System.out.println("Input submitted to the function: "+input);
+ check(output, result);
+ input = 6; output = 36;
+ result = t.square_num(input);
+ System.out.println("Input submitted to the function: "+input);
+ check(output, result);
+ }
+}
diff --git a/testapp/exam/language_registry.py b/testapp/exam/language_registry.py
new file mode 100644
index 0000000..76a23d7
--- /dev/null
+++ b/testapp/exam/language_registry.py
@@ -0,0 +1,36 @@
+from settings import code_evaluators
+import importlib
+
+registry = None
+
+def set_registry():
+ global registry
+ registry = _LanguageRegistry()
+
+def get_registry():
+ return registry
+
+class _LanguageRegistry(object):
+ def __init__(self):
+ self._register = {}
+ for language, module in code_evaluators.iteritems():
+ self._register[language] = None
+
+ # Public Protocol ##########
+ def get_class(self, language):
+ """ Get the code evaluator class for the given language """
+ if not self._register.get(language):
+ self._register[language] = code_evaluators.get(language)
+
+ cls = self._register[language]
+ module_name, class_name = cls.rsplit(".", 1)
+ # load the module, will raise ImportError if module cannot be loaded
+ get_module = importlib.import_module(module_name)
+ # get the class, will raise AttributeError if class cannot be found
+ get_class = getattr(get_module, class_name)
+ return get_class
+
+ def register(self, language, class_name):
+ """ Register a new code evaluator class for language"""
+ self._register[language] = class_name
+
diff --git a/testapp/exam/models.py b/testapp/exam/models.py
index 72fb51b..c5043dc 100644
--- a/testapp/exam/models.py
+++ b/testapp/exam/models.py
@@ -1,4 +1,5 @@
import datetime
+import json
from random import sample, shuffle
from django.db import models
from django.contrib.auth.models import User
@@ -19,8 +20,8 @@ class Profile(models.Model):
languages = (
("python", "Python"),
("bash", "Bash"),
- ("C", "C Language"),
- ("C++", "C++ Language"),
+ ("c", "C Language"),
+ ("cpp", "C++ Language"),
("java", "Java Language"),
("scilab", "Scilab"),
)
@@ -59,9 +60,13 @@ class Question(models.Model):
# Number of points for the question.
points = models.FloatField(default=1.0)
- # Test cases for the question in the form of code that is run.
+ # Answer for MCQs.
test = models.TextField(blank=True)
+ # Test cases file paths (comma seperated for reference code path and test case code path)
+ # Applicable for CPP, C, Java and Scilab
+ ref_code_path = models.TextField(blank=True)
+
# Any multiple choice options. Place one option per line.
options = models.TextField(blank=True)
@@ -82,6 +87,41 @@ class Question(models.Model):
# Tags for the Question.
tags = TaggableManager()
+ def consolidate_answer_data(self, test_cases, user_answer):
+ test_case_data_dict = []
+ question_info_dict = {}
+
+ for test_case in test_cases:
+ kw_args_dict = {}
+ pos_args_list = []
+
+ test_case_data = {}
+ test_case_data['test_id'] = test_case.id
+ test_case_data['func_name'] = test_case.func_name
+ test_case_data['expected_answer'] = test_case.expected_answer
+
+ if test_case.kw_args:
+ for args in test_case.kw_args.split(","):
+ arg_name, arg_value = args.split("=")
+ kw_args_dict[arg_name.strip()] = arg_value.strip()
+
+ if test_case.pos_args:
+ for args in test_case.pos_args.split(","):
+ pos_args_list.append(args.strip())
+
+ test_case_data['kw_args'] = kw_args_dict
+ test_case_data['pos_args'] = pos_args_list
+ test_case_data_dict.append(test_case_data)
+
+ # question_info_dict['language'] = self.language
+ question_info_dict['id'] = self.id
+ question_info_dict['user_answer'] = user_answer
+ question_info_dict['test_parameter'] = test_case_data_dict
+ question_info_dict['ref_code_path'] = self.ref_code_path
+ question_info_dict['test'] = self.test
+
+ return json.dumps(question_info_dict)
+
def __unicode__(self):
return self.summary
@@ -396,3 +436,20 @@ class AssignmentUpload(models.Model):
user = models.ForeignKey(Profile)
assignmentQuestion = models.ForeignKey(Question)
assignmentFile = models.FileField(upload_to=get_assignment_dir)
+
+
+################################################################################
+class TestCase(models.Model):
+ question = models.ForeignKey(Question, blank=True, null = True)
+
+ # Test case function name
+ func_name = models.CharField(blank=True, null = True, max_length=200)
+
+ # Test case Keyword arguments in dict form
+ kw_args = models.TextField(blank=True, null = True)
+
+ # Test case Positional arguments in list form
+ pos_args = models.TextField(blank=True, null = True)
+
+ # Test case Expected answer in list form
+ expected_answer = models.TextField(blank=True, null = True)
diff --git a/testapp/exam/python_code_evaluator.py b/testapp/exam/python_code_evaluator.py
new file mode 100644
index 0000000..0c473cf
--- /dev/null
+++ b/testapp/exam/python_code_evaluator.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+import sys
+import traceback
+import os
+from os.path import join
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class PythonCodeEvaluator(CodeEvaluator):
+ """Tests the Python code obtained from Code Server"""
+ # Private Protocol ##########
+ def _check_code(self):
+ success = False
+
+ try:
+ tb = None
+ test_code = self._create_test_case()
+ submitted = compile(self.user_answer, '<string>', mode='exec')
+ g = {}
+ exec submitted in g
+ _tests = compile(test_code, '<string>', mode='exec')
+ exec _tests in g
+ except AssertionError:
+ type, value, tb = sys.exc_info()
+ info = traceback.extract_tb(tb)
+ fname, lineno, func, text = info[-1]
+ text = str(test_code).splitlines()[lineno-1]
+ err = "{0} {1} in: {2}".format(type.__name__, str(value), text)
+ else:
+ success = True
+ err = 'Correct answer'
+
+ del tb
+ return success, err
+
+ def _create_test_case(self):
+ """
+ Create assert based test cases in python
+ """
+ test_code = ""
+ if self.test:
+ return self.test
+ elif self.test_case_data:
+ for test_case in self.test_case_data:
+ pos_args = ", ".join(str(i) for i in test_case.get('pos_args')) \
+ if test_case.get('pos_args') else ""
+ kw_args = ", ".join(str(k+"="+a) for k, a
+ in test_case.get('kw_args').iteritems()) \
+ if test_case.get('kw_args') else ""
+ args = pos_args + ", " + kw_args if pos_args and kw_args \
+ else pos_args or kw_args
+ function_name = test_case.get('func_name')
+ expected_answer = test_case.get('expected_answer')
+
+ tcode = "assert {0}({1}) == {2}".format(function_name, args,
+ expected_answer)
+ test_code += tcode + "\n"
+ return test_code
diff --git a/testapp/exam/scilab_code_evaluator.py b/testapp/exam/scilab_code_evaluator.py
new file mode 100644
index 0000000..53640cc
--- /dev/null
+++ b/testapp/exam/scilab_code_evaluator.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+import traceback
+import os
+from os.path import join, isfile
+import subprocess
+import re
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class ScilabCodeEvaluator(CodeEvaluator):
+ """Tests the Scilab code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(ScilabCodeEvaluator, self).__init__(test_case_data, test,
+ language, user_answer,
+ ref_code_path, in_dir)
+ self.submit_path = self.create_submit_code_file('function.sci')
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(ScilabCodeEvaluator, self)._setup()
+
+ ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path)
+
+ return ref_path, # Return as a tuple
+
+ def _teardown(self):
+ # Delete the created file.
+ super(ScilabCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_path):
+ success = False
+
+ cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(ref_path)
+ cmd += ' | timeout 8 scilab-cli -nb'
+ ret = self._run_command(cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdout, stderr = ret
+
+ # Get only the error.
+ stderr = self._get_error(stdout)
+ if stderr is None:
+ # Clean output
+ stdout = self._strip_output(stdout)
+ if proc.returncode == 5:
+ success, err = True, "Correct answer"
+ else:
+ err = add_err + stdout
+ else:
+ err = add_err + stderr
+
+ return success, err
+
+ def _remove_scilab_exit(self, string):
+ """
+ Removes exit, quit and abort from the scilab code
+ """
+ new_string = ""
+ i = 0
+ for line in string.splitlines():
+ new_line = re.sub(r"exit.*$", "", line)
+ new_line = re.sub(r"quit.*$", "", new_line)
+ new_line = re.sub(r"abort.*$", "", new_line)
+ if line != new_line:
+ i = i + 1
+ new_string = new_string + '\n' + new_line
+ return new_string, i
+
+ def _get_error(self, string):
+ """
+ Fetches only the error from the string.
+ Returns None if no error.
+ """
+ obj = re.search("!.+\n.+", string)
+ if obj:
+ return obj.group()
+ return None
+
+ def _strip_output(self, out):
+ """
+ Cleans whitespace from the output
+ """
+ strip_out = "Message"
+ for l in out.split('\n'):
+ if l.strip():
+ strip_out = strip_out+"\n"+l.strip()
+ return strip_out
+
diff --git a/testapp/exam/scilab_files/test_add.sce b/testapp/exam/scilab_files/test_add.sce
new file mode 100644
index 0000000..a317cdb
--- /dev/null
+++ b/testapp/exam/scilab_files/test_add.sce
@@ -0,0 +1,29 @@
+mode(-1)
+exec("function.sci",-1);
+i = 0
+p = add(3,5);
+correct = (p == 8);
+if correct then
+ i=i+1
+end
+disp("Input submitted 3 and 5")
+disp("Expected output 8 got " + string(p))
+p = add(22,-20);
+correct = (p==2);
+if correct then
+ i=i+1
+end
+disp("Input submitted 22 and -20")
+disp("Expected output 2 got " + string(p))
+p =add(91,0);
+correct = (p==91);
+if correct then
+ i=i+1
+end
+disp("Input submitted 91 and 0")
+disp("Expected output 91 got " + string(p))
+if i==3 then
+ exit(5);
+else
+ exit(3);
+end
diff --git a/testapp/exam/settings.py b/testapp/exam/settings.py
index 682516f..93f90a9 100644
--- a/testapp/exam/settings.py
+++ b/testapp/exam/settings.py
@@ -18,3 +18,11 @@ SERVER_TIMEOUT = 2
# reason set this to the root you have to serve at. In the above example
# host.org/foo/exam set URL_ROOT='/foo'
URL_ROOT = ''
+
+code_evaluators = {"python": "python_code_evaluator.PythonCodeEvaluator",
+ "c": "c_cpp_code_evaluator.CCPPCodeEvaluator",
+ "cpp": "c_cpp_code_evaluator.CCPPCodeEvaluator",
+ "java": "java_evaluator.JavaCodeEvaluator",
+ "bash": "bash_evaluator.BashCodeEvaluator",
+ "scilab": "scilab_evaluator.ScilabCodeEvaluator",
+ }
diff --git a/testapp/exam/static/exam/js/add_question.js b/testapp/exam/static/exam/js/add_question.js
index 267cdb2..946c139 100644
--- a/testapp/exam/static/exam/js/add_question.js
+++ b/testapp/exam/static/exam/js/add_question.js
@@ -153,8 +153,7 @@ function textareaformat()
if(value == 'mcq' || value == 'mcc')
{
document.getElementById('id_options').style.visibility='visible';
- document.getElementById('label_option').innerHTML="Options :"
-
+ document.getElementById('label_option').innerHTML="Options :";
}
else
{
@@ -168,7 +167,6 @@ function textareaformat()
{
document.getElementById('id_options').style.visibility='visible';
document.getElementById('label_option').innerHTML="Options :"
-
}
else
{
@@ -189,8 +187,9 @@ function autosubmit()
if(type.value == 'select')
{
type.style.border = 'solid red';
- return false;
- }
+ return false;
+ }
+
if (type.value == 'mcq' || type.value == 'mcc')
{
diff --git a/testapp/exam/templates/exam/add_question.html b/testapp/exam/templates/exam/add_question.html
index b0b22b1..43f09e1 100644
--- a/testapp/exam/templates/exam/add_question.html
+++ b/testapp/exam/templates/exam/add_question.html
@@ -27,14 +27,25 @@
<tr><td>Points:<td><button class="btn-mini" type="button" onClick="increase(frm);">+</button>{{ form.points }}<button class="btn-mini" type="button" onClick="decrease(frm);">-</button>{{ form.points.errors }}
<tr><td><strong>Rendered: </strong><td><p id='my'></p>
<tr><td>Description: <td>{{ form.description}} {{form.description.errors}}
- <tr><td>Test: <td>{{ form.test }}{{form.test.errors}}
<tr><td>Snippet: <td>{{ form.snippet }}{{ form.snippet.errors }}</td></tD></td></tr>
<tr><td>Tags: <td>{{ form.tags }}
- <tr><td id='label_option'>Options: <td>{{ form.options }} {{form.options.errors}}
-
-
+ <tr><td id='label_option'>Options: <td>{{ form.options }} {{form.options.errors}}
+ <tr><td id='label_solution'>Test: <td>{{ form.solution }} {{form.solution.errors}}
+ <tr><td id='label_ref_code_path'>Reference Code Path: <td>{{ form.ref_code_path }} {{form.ref_code_path.errors}}
+
+ <form method="post" action="">
+ {% if formset%}
+ {{ formset.management_form }}
+ {% for form in formset %}
+ {{ form }}
+ {% endfor %}
+ {% endif %}
+ </form>
</table></center>
- <center><button class="btn" type="submit" name="savequestion">Save</button>
+ <center><button class="btn" type="submit" name="add_test">Add Test Case</button>
+ <button class="btn" type="submit" name="delete_test">Remove Test Case</button>
+ </center><br>
+ <center><button class="btn" type="submit" name="save_question">Save</button>
<button class="btn" type="button" name="button" onClick='location.replace("{{URL_ROOT}}/exam/manage/questions/");'>Cancel</button> </center>
</form>
{% endblock %}
diff --git a/testapp/exam/templates/exam/edit_question.html b/testapp/exam/templates/exam/edit_question.html
index b28cc3e..6deca4a 100644
--- a/testapp/exam/templates/exam/edit_question.html
+++ b/testapp/exam/templates/exam/edit_question.html
@@ -21,29 +21,37 @@
<table>
- {% for form in forms %}
+ {% for question, test in data_list %}
- <tr><td height=10><a id='a{{forloop.counter}}' onClick="data('contentDiv{{forloop.counter}}','myContent{{forloop.counter}}','a{{forloop.counter}}','{{form.summary.value}}');" style='cursor:pointer;'>{{form.summary.value}}</a>
-
+ <tr><td height=10><a id='a{{forloop.counter}}' onClick="data('contentDiv{{forloop.counter}}','myContent{{forloop.counter}}','a{{forloop.counter}}','{{question.summary.value}}');" style='cursor:pointer;'>{{question.summary.value}}</a>
+
<div id="contentDiv{{forloop.counter}}" style="display:none;">
<div id="myContent{{forloop.counter}}" style="display: none;">
-
- <center><table class=span1>
- <tr><td><b>Summary:</b> <td>{{ form.summary }}{{ form.summary.errors }}
- <tr><td><b> Language: </b><td> {{form.language}}{{form.language.errors}}
- <tr><td><b> Active: </b><td>&nbsp; {{ form.active }}{{form.active.errors}} &nbsp; Type: &nbsp;{{ form.type }}{{form.type.errors}}
- <tr><td><b>Points:<td><button class="btn-mini" name={{forloop.counter}} type="button" onClick="increase(frm,{{forloop.counter}});">+</button>{{ form.points }}<button class="btn-mini" type="button" onClick="decrease(frm,{{forloop.counter}});">-</button>{{ form.points.errors }}
+
+ <center><table class=span1>
+ <tr><td><b>Summary:</b> <td>{{ question.summary }}{{ question.summary.errors }}
+ <tr><td><b> Language: </b><td> {{question.language}}{{question.language.errors}}
+ <tr><td><b> Active: </b><td>&nbsp; {{ question.active }}{{question.active.errors}} &nbsp; Type: &nbsp;{{ question.type }}{{question.type.errors}}
+ <tr><td><b>Points:<td><button class="btn-mini" name={{forloop.counter}} type="button" onClick="increase(frm,{{forloop.counter}});">+</button>{{ question.points }}<button class="btn-mini" type="button" onClick="decrease(frm,{{forloop.counter}});">-</button>{{ question.points.errors }}
<tr><td><strong>Rendered: </strong><td><p id='my{{forloop.counter}}'></p>
- <tr><td><b>Description: <td>{{ form.description }} {{form.description.errors}}
- <tr><td><b>Test: <td>{{ form.test }}{{form.test.errors}}
- <tr><td><b>Snippet: <td>{{ form.snippet }}{{ form.snippet.errors }}</td></b></td></tr>
- <tr><td><b>Tags: </b><td>{{ form.tags }}
- <tr><td id='label_option{{forloop.counter}}'><b>Options:<td>{{ form.options }} {{form.options.errors}} {{form.options.helptext}}
+ <tr><td><b>Description: <td>{{ question.description }}
+ {{question.description.errors}} <tr><td><b>Test: <td>
+ {{ question.test }}{{question.test.errors}}
+ <tr><td><b>Snippet: <td>{{ question.snippet }}{{ question.snippet.errors }}
+ </td></b></td></tr>
+ <tr><td><b>Tags: </b><td>{{ question.tags }}
+ <tr><td id='label_option{{forloop.counter}}'><b>Options:<td>{{ question.options }}
+ {{question.options.errors}} {{question.options.helptext}}
+ </table></center>
+ <center><table class=span1>
+ {{ test }}
</table></center>
</div>
</div>
{% endfor %}
</table></center>
+
+
{% for i in data %}
<input type=hidden name='questions' value="{{ i }}" />
{% endfor %}
diff --git a/testapp/exam/tests.py b/testapp/exam/tests.py
index d76e4f8..ff48c25 100644
--- a/testapp/exam/tests.py
+++ b/testapp/exam/tests.py
@@ -1,8 +1,7 @@
from django.utils import unittest
from exam.models import User, Profile, Question, Quiz, QuestionPaper,\
- QuestionSet, AnswerPaper, Answer
-import datetime
-
+ QuestionSet, AnswerPaper, Answer, TestCase
+import datetime, json
def setUpModule():
# create user profile
@@ -51,12 +50,31 @@ class ProfileTestCases(unittest.TestCase):
class QuestionTestCases(unittest.TestCase):
def setUp(self):
# Single question details
+ # self.question = Question(summary='Demo question', language='Python',
+ # type='Code', active=True,
+ # description='Write a function', points=1.0,
+ # test='Test Cases', snippet='def myfunc()')
self.question = Question(summary='Demo question', language='Python',
type='Code', active=True,
description='Write a function', points=1.0,
- test='Test Cases', snippet='def myfunc()')
+ snippet='def myfunc()')
self.question.save()
self.question.tags.add('python', 'function')
+ self.testcase = TestCase(question=self.question,
+ func_name='def myfunc', kw_args='a=10,b=11',
+ pos_args='12,13', expected_answer='15')
+ answer_data = {"user_answer": "demo_answer",
+ "test_parameter": [{"func_name": "def myfunc",
+ "expected_answer": "15",
+ "test_id": self.testcase.id,
+ "pos_args": ["12", "13"],
+ "kw_args": {"a": "10",
+ "b": "11"}
+ }],
+ "id": self.question.id,
+ "language": "Python"}
+ self.answer_data_json = json.dumps(answer_data)
+ self.user_answer = "demo_answer"
def test_question(self):
""" Test question """
@@ -67,13 +85,40 @@ class QuestionTestCases(unittest.TestCase):
self.assertEqual(self.question.description, 'Write a function')
self.assertEqual(self.question.points, 1.0)
self.assertTrue(self.question.active)
- self.assertEqual(self.question.test, 'Test Cases')
self.assertEqual(self.question.snippet, 'def myfunc()')
tag_list = []
for tag in self.question.tags.all():
tag_list.append(tag.name)
self.assertEqual(tag_list, ['python', 'function'])
+ def test_consolidate_answer_data(self):
+ """ Test consolidate_answer_data function """
+ result = self.question.consolidate_answer_data([self.testcase],
+ self.user_answer)
+ self.assertEqual(result, self.answer_data_json)
+
+
+
+###############################################################################
+class TestCaseTestCases(unittest.TestCase):
+ def setUp(self):
+ self.question = Question(summary='Demo question', language='Python',
+ type='Code', active=True,
+ description='Write a function', points=1.0,
+ snippet='def myfunc()')
+ self.question.save()
+ self.testcase = TestCase(question=self.question,
+ func_name='def myfunc', kw_args='a=10,b=11',
+ pos_args='12,13', expected_answer='15')
+
+ def test_testcase(self):
+ """ Test question """
+ self.assertEqual(self.testcase.question, self.question)
+ self.assertEqual(self.testcase.func_name, 'def myfunc')
+ self.assertEqual(self.testcase.kw_args, 'a=10,b=11')
+ self.assertEqual(self.testcase.pos_args, '12,13')
+ self.assertEqual(self.testcase.expected_answer, '15')
+
###############################################################################
class QuizTestCases(unittest.TestCase):
diff --git a/testapp/exam/views.py b/testapp/exam/views.py
index 11aca06..5b7baac 100644
--- a/testapp/exam/views.py
+++ b/testapp/exam/views.py
@@ -14,12 +14,13 @@ from django.db.models import Sum
from django.views.decorators.csrf import csrf_exempt
from taggit.models import Tag
from itertools import chain
+import json
# Local imports.
from testapp.exam.models import Quiz, Question, QuestionPaper, QuestionSet
-from testapp.exam.models import Profile, Answer, AnswerPaper, User
+from testapp.exam.models import Profile, Answer, AnswerPaper, User, TestCase
from testapp.exam.forms import UserRegisterForm, UserLoginForm, QuizForm,\
- QuestionForm, RandomQuestionForm
-from testapp.exam.xmlrpc_clients import code_server
+ QuestionForm, RandomQuestionForm, TestCaseFormSet
+from exam.xmlrpc_clients import code_server
from settings import URL_ROOT
from testapp.exam.models import AssignmentUpload
@@ -281,16 +282,14 @@ def edit_quiz(request):
def edit_question(request):
- """Edit the list of questions seleted by the user for editing."""
+ """Edit the list of questions selected by the user for editing."""
user = request.user
if not user.is_authenticated() or not is_moderator(user):
raise Http404('You are not allowed to view this page!')
-
question_list = request.POST.getlist('questions')
summary = request.POST.getlist('summary')
description = request.POST.getlist('description')
points = request.POST.getlist('points')
- test = request.POST.getlist('test')
options = request.POST.getlist('options')
type = request.POST.getlist('type')
active = request.POST.getlist('active')
@@ -298,14 +297,21 @@ def edit_question(request):
snippet = request.POST.getlist('snippet')
for j, question_id in enumerate(question_list):
question = Question.objects.get(id=question_id)
+ test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=question)
+ if test_case_formset.is_valid():
+ test_case_instance = test_case_formset.save(commit=False)
+ for i in test_case_instance:
+ i.save()
+
question.summary = summary[j]
question.description = description[j]
question.points = points[j]
- question.test = test[j]
question.options = options[j]
question.active = active[j]
question.language = language[j]
question.snippet = snippet[j]
+ question.ref_code_path = ref_code_path[j]
+ question.test = test[j]
question.type = type[j]
question.save()
return my_redirect("/exam/manage/questions")
@@ -314,6 +320,16 @@ def edit_question(request):
def add_question(request, question_id=None):
"""To add a new question in the database.
Create a new question and store it."""
+
+ def add_or_delete_test_form(post_request, instance):
+ request_copy = post_request.copy()
+ if 'add_test' in post_request:
+ request_copy['test-TOTAL_FORMS'] = int(request_copy['test-TOTAL_FORMS']) + 1
+ elif 'delete_test' in post_request:
+ request_copy['test-TOTAL_FORMS'] = int(request_copy['test-TOTAL_FORMS']) - 1
+ test_case_formset = TestCaseFormSet(request_copy, prefix='test', instance=instance)
+ return test_case_formset
+
user = request.user
ci = RequestContext(request)
if not user.is_authenticated() or not is_moderator(user):
@@ -321,44 +337,88 @@ def add_question(request, question_id=None):
if request.method == "POST":
form = QuestionForm(request.POST)
if form.is_valid():
- data = form.cleaned_data
if question_id is None:
- form.save()
- question = Question.objects.order_by("-id")[0]
- tags = form['tags'].data.split(',')
- for i in range(0, len(tags)-1):
- tag = tags[i].strip()
- question.tags.add(tag)
- return my_redirect("/exam/manage/questions")
+ test_case_formset = add_or_delete_test_form(request.POST, form.save(commit=False))
+ if 'save_question' in request.POST:
+ qtn = form.save(commit=False)
+ test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=qtn)
+ form.save()
+ question = Question.objects.order_by("-id")[0]
+ tags = form['tags'].data.split(',')
+ for i in range(0, len(tags)-1):
+ tag = tags[i].strip()
+ question.tags.add(tag)
+ if test_case_formset.is_valid():
+ test_case_formset.save()
+ else:
+ return my_render_to_response('exam/add_question.html',
+ {'form': form,
+ 'formset': test_case_formset},
+ context_instance=ci)
+
+ return my_redirect("/exam/manage/questions")
+
+ return my_render_to_response('exam/add_question.html',
+ {'form': form,
+ 'formset': test_case_formset},
+ context_instance=ci)
+
else:
d = Question.objects.get(id=question_id)
- d.summary = form['summary'].data
- d.description = form['description'].data
- d.points = form['points'].data
- d.test = form['test'].data
- d.options = form['options'].data
- d.type = form['type'].data
- d.active = form['active'].data
- d.language = form['language'].data
- d.snippet = form['snippet'].data
- d.save()
- question = Question.objects.get(id=question_id)
- for tag in question.tags.all():
- question.tags.remove(tag)
- tags = form['tags'].data.split(',')
- for i in range(0, len(tags)-1):
- tag = tags[i].strip()
- question.tags.add(tag)
- return my_redirect("/exam/manage/questions")
+ test_case_formset = add_or_delete_test_form(request.POST, d)
+ if 'save_question' in request.POST:
+ d.summary = form['summary'].data
+ d.description = form['description'].data
+ d.points = form['points'].data
+ d.options = form['options'].data
+ d.type = form['type'].data
+ d.active = form['active'].data
+ d.language = form['language'].data
+ d.snippet = form['snippet'].data
+ d.ref_code_path = form['ref_code_path'].data
+ d.test = form['test'].data
+ d.save()
+ question = Question.objects.get(id=question_id)
+ for tag in question.tags.all():
+ question.tags.remove(tag)
+ tags = form['tags'].data.split(',')
+ for i in range(0, len(tags)-1):
+ tag = tags[i].strip()
+ question.tags.add(tag)
+
+ test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=question)
+ if test_case_formset.is_valid():
+ test_case_instance = test_case_formset.save(commit=False)
+ for i in test_case_instance:
+ i.save()
+ else:
+ return my_render_to_response('exam/add_question.html',
+ {'form': form,
+ 'formset': test_case_formset},
+ context_instance=ci)
+
+
+ return my_redirect("/exam/manage/questions")
+ return my_render_to_response('exam/add_question.html',
+ {'form': form,
+ 'formset': test_case_formset},
+ context_instance=ci)
+
else:
+ test_case_formset = add_or_delete_test_form(request.POST, form.save(commit=False))
return my_render_to_response('exam/add_question.html',
- {'form': form},
+ {'form': form,
+ 'formset': test_case_formset},
context_instance=ci)
else:
+ form = QuestionForm()
+ test_case_formset = TestCaseFormSet(prefix='test', instance=Question())
if question_id is None:
form = QuestionForm()
+ test_case_formset = TestCaseFormSet(prefix='test', instance=Question())
return my_render_to_response('exam/add_question.html',
- {'form': form},
+ {'form': form,
+ 'formset': test_case_formset},
context_instance=ci)
else:
d = Question.objects.get(id=question_id)
@@ -366,12 +426,13 @@ def add_question(request, question_id=None):
form.initial['summary'] = d.summary
form.initial['description'] = d.description
form.initial['points'] = d.points
- form.initial['test'] = d.test
form.initial['options'] = d.options
form.initial['type'] = d.type
form.initial['active'] = d.active
form.initial['language'] = d.language
form.initial['snippet'] = d.snippet
+ form.initial['ref_code_path'] = d.ref_code_path
+ form.initial['test'] = d.test
form_tags = d.tags.all()
form_tags_split = form_tags.values('name')
initial_tags = ""
@@ -380,8 +441,13 @@ def add_question(request, question_id=None):
if (initial_tags == ","):
initial_tags = ""
form.initial['tags'] = initial_tags
+
+ test_case_formset = TestCaseFormSet(prefix='test',
+ instance=d)
+
return my_render_to_response('exam/add_question.html',
- {'form': form},
+ {'form': form,
+ 'formset': test_case_formset},
context_instance=ci)
@@ -848,6 +914,10 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
if not user.is_authenticated() or paper.end_time < datetime.datetime.now():
return my_redirect('/exam/login/')
question = get_object_or_404(Question, pk=q_id)
+ q_paper = QuestionPaper.objects.get(id=questionpaper_id)
+ paper = AnswerPaper.objects.get(user=request.user, question_paper=q_paper)
+ test_cases = TestCase.objects.filter(question=question)
+
snippet_code = request.POST.get('snippet')
user_code = request.POST.get('answer')
skip = request.POST.get('skip', None)
@@ -876,7 +946,8 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
assign.save()
user_answer = 'ASSIGNMENT UPLOADED'
else:
- user_answer = snippet_code + "\n" + user_code
+ user_code = request.POST.get('answer')
+ user_answer = snippet_code + "\n" + user_code if snippet_code else user_code
new_answer = Answer(question=question, answer=user_answer,
correct=False)
@@ -887,18 +958,20 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
# questions, we obtain the results via XML-RPC with the code executed
# safely in a separate process (the code_server.py) running as nobody.
if not question.type == 'upload':
- correct, success, err_msg = validate_answer(user, user_answer, question)
+ json_data = question.consolidate_answer_data(test_cases, user_answer) \
+ if question.type == 'code' else None
+ correct, result = validate_answer(user, user_answer, question, json_data)
if correct:
new_answer.correct = correct
new_answer.marks = question.points
- new_answer.error = err_msg
+ new_answer.error = result.get('error')
success_msg = True
else:
- new_answer.error = err_msg
+ new_answer.error = result.get('error')
new_answer.save()
time_left = paper.time_left()
- if not success: # Should only happen for non-mcq questions.
+ if not result.get('success'): # Should only happen for non-mcq questions.
if time_left == 0:
reason = 'Your time is up!'
return complete(request, reason, attempt_num, questionpaper_id)
@@ -913,8 +986,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
if old_answer:
old_answer[0].answer = user_code
old_answer[0].save()
- context = {'question': question, 'questions': questions,
- 'error_message': err_msg,
+ context = {'question': question, 'error_message': result.get('error'),
'paper': paper, 'last_attempt': user_code,
'quiz_name': paper.question_paper.quiz.description,
'time_left': time_left, 'to_attempt': to_attempt,
@@ -933,7 +1005,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
questionpaper_id, success_msg)
-def validate_answer(user, user_answer, question):
+def validate_answer(user, user_answer, question, json_data=None):
"""
Checks whether the answer submitted by the user is right or wrong.
If right then returns correct = True, success and
@@ -942,9 +1014,9 @@ def validate_answer(user, user_answer, question):
only one attempt are allowed for them.
For code questions success is True only if the answer is correct.
"""
- success = True
+
+ result = {'success': True, 'error': 'Incorrect answer'}
correct = False
- message = 'Incorrect answer'
if user_answer is not None:
if question.type == 'mcq':
@@ -958,11 +1030,12 @@ def validate_answer(user, user_answer, question):
message = 'Correct answer'
elif question.type == 'code':
user_dir = get_user_dir(user)
- success, message = code_server.run_code(user_answer, question.test,
- user_dir, question.language)
- if success:
+ json_result = code_server.run_code(question.language, json_data, user_dir)
+ result = json.loads(json_result)
+ if result.get('success'):
correct = True
- return correct, success, message
+
+ return correct, result
def quit(request, attempt_num=None, questionpaper_id=None):
@@ -1167,18 +1240,20 @@ def show_all_questions(request):
data = request.POST.getlist('question')
forms = []
+ formsets = []
for j in data:
d = Question.objects.get(id=j)
form = QuestionForm()
form.initial['summary'] = d.summary
form.initial['description'] = d.description
form.initial['points'] = d.points
- form.initial['test'] = d.test
form.initial['options'] = d.options
form.initial['type'] = d.type
form.initial['active'] = d.active
form.initial['language'] = d.language
form.initial['snippet'] = d.snippet
+ form.initial['ref_code_path'] = d.ref_code_path
+ form.initial['test'] = d.test
form_tags = d.tags.all()
form_tags_split = form_tags.values('name')
initial_tags = ""
@@ -1188,8 +1263,13 @@ def show_all_questions(request):
initial_tags = ""
form.initial['tags'] = initial_tags
forms.append(form)
+ test_case_formset = TestCaseFormSet(prefix='test', instance=d)
+ formsets.append(test_case_formset)
+ data_list = zip(forms, formsets)
+
return my_render_to_response('exam/edit_question.html',
- {'forms': forms, 'data': data},
+ {'data': data,
+ 'data_list': data_list},
context_instance=ci)
else:
questions = Question.objects.all()
diff --git a/testapp/exam/xmlrpc_clients.py b/testapp/exam/xmlrpc_clients.py
index 14ebf27..8f5642e 100644
--- a/testapp/exam/xmlrpc_clients.py
+++ b/testapp/exam/xmlrpc_clients.py
@@ -21,15 +21,8 @@ class CodeServerProxy(object):
def __init__(self):
pool_url = 'http://localhost:%d' % (SERVER_POOL_PORT)
self.pool_server = ServerProxy(pool_url)
- self.methods = {"python": 'run_python_code',
- "bash": 'run_bash_code',
- "C": "run_c_code",
- "C++": "run_cplus_code",
- "java": "run_java_code",
- "scilab": "run_scilab_code",
- }
- def run_code(self, answer, test_code, user_dir, language):
+ def run_code(self, language, json_data, user_dir):
"""Tests given code (`answer`) with the `test_code` supplied. If the
optional `in_dir` keyword argument is supplied it changes the directory
to that directory (it does not change it back to the original when
@@ -38,26 +31,28 @@ class CodeServerProxy(object):
Parameters
----------
- answer : str
- The user's answer for the question.
+ json_data contains;
+ user_answer : str
+ The user's answer for the question.
test_code : str
The test code to check the user code with.
- user_dir : str (directory)
- The directory to run the tests inside.
language : str
The programming language to use.
+ user_dir : str (directory)
+ The directory to run the tests inside.
+
+
Returns
-------
- A tuple: (success, error message).
+ A json string of a dict: {success: success, err: error message}.
"""
- method_name = self.methods[language]
+
try:
server = self._get_server()
- method = getattr(server, method_name)
- result = method(answer, test_code, user_dir)
+ result = server.check_code(language, json_data, user_dir)
except ConnectionError:
- result = [False, 'Unable to connect to any code servers!']
+ result = json.dumps({'success': False, 'error': 'Unable to connect to any code servers!'})
return result
def _get_server(self):