summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPrabhu Ramachandran2015-05-12 20:20:43 +0530
committerPrabhu Ramachandran2015-05-12 20:20:43 +0530
commita022e0145ec8fb1622d58c2e2281c016b1d45b01 (patch)
tree1c0c3f2e8605d6f36405c57cbe5de9a895a47958
parentcd9f2542d09db0e4a352dd410f626f27e23c37e4 (diff)
parent5b23647de575fd90552807260a4b8e0a96ab6afe (diff)
downloadonline_test-a022e0145ec8fb1622d58c2e2281c016b1d45b01.tar.gz
online_test-a022e0145ec8fb1622d58c2e2281c016b1d45b01.tar.bz2
online_test-a022e0145ec8fb1622d58c2e2281c016b1d45b01.zip
Merge pull request #41 from ankitjavalkar/code-server-redesign-mymaster2
Code server redesign
-rw-r--r--testapp/docs/sample_questions.py84
-rw-r--r--testapp/docs/sample_questions.xml43
-rw-r--r--testapp/exam/admin.py3
-rw-r--r--testapp/exam/bash_code_evaluator.py122
-rw-r--r--testapp/exam/bash_files/sample.args (renamed from testapp/docs/sample.args)0
-rwxr-xr-xtestapp/exam/bash_files/sample.sh (renamed from testapp/docs/sample.sh)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main.cpp (renamed from testapp/c_cpp_files/main.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main2.c (renamed from testapp/c_cpp_files/main2.c)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_array_check.cpp (renamed from testapp/c_cpp_files/main_array_check.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_array_check_all.cpp (renamed from testapp/c_cpp_files/main_array_check_all.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_array_sum.cpp (renamed from testapp/c_cpp_files/main_array_sum.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_blackJack.cpp (renamed from testapp/c_cpp_files/main_blackJack.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_check_digit.cpp (renamed from testapp/c_cpp_files/main_check_digit.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_count667.cpp (renamed from testapp/c_cpp_files/main_count667.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_count7.cpp (renamed from testapp/c_cpp_files/main_count7.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_fact.cpp (renamed from testapp/c_cpp_files/main_fact.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_greatest.cpp (renamed from testapp/c_cpp_files/main_greatest.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_hello_name.c (renamed from testapp/c_cpp_files/main_hello_name.c)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_lessThan9.cpp (renamed from testapp/c_cpp_files/main_lessThan9.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_mean.cpp (renamed from testapp/c_cpp_files/main_mean.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_palindrome.cpp (renamed from testapp/c_cpp_files/main_palindrome.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_roundTo10.cpp (renamed from testapp/c_cpp_files/main_roundTo10.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_specialSum.cpp (renamed from testapp/c_cpp_files/main_specialSum.cpp)0
-rwxr-xr-xtestapp/exam/c_cpp_files/main_within.cpp (renamed from testapp/c_cpp_files/main_within.cpp)0
-rw-r--r--testapp/exam/code_evaluator.py206
-rwxr-xr-xtestapp/exam/code_server.py752
-rw-r--r--testapp/exam/cpp_code_evaluator.py125
-rw-r--r--testapp/exam/forms.py46
-rw-r--r--testapp/exam/java_code_evaluator.py127
-rw-r--r--testapp/exam/java_files/main_array_sum.java (renamed from testapp/java_files/main_array_sum.java)0
-rw-r--r--testapp/exam/java_files/main_fact.java (renamed from testapp/java_files/main_fact.java)0
-rw-r--r--testapp/exam/java_files/main_great.java (renamed from testapp/java_files/main_great.java)0
-rw-r--r--testapp/exam/java_files/main_hello_name.java (renamed from testapp/java_files/main_hello_name.java)0
-rw-r--r--testapp/exam/java_files/main_lastDigit.java (renamed from testapp/java_files/main_lastDigit.java)0
-rw-r--r--testapp/exam/java_files/main_moreThan30.java (renamed from testapp/java_files/main_moreThan30.java)0
-rw-r--r--testapp/exam/java_files/main_palindrome.java (renamed from testapp/java_files/main_palindrome.java)0
-rw-r--r--testapp/exam/java_files/main_square.java (renamed from testapp/java_files/main_square.java)0
-rw-r--r--testapp/exam/language_registry.py36
-rw-r--r--testapp/exam/models.py63
-rw-r--r--testapp/exam/python_code_evaluator.py61
-rw-r--r--testapp/exam/scilab_code_evaluator.py95
-rw-r--r--testapp/exam/scilab_files/test_add.sce (renamed from testapp/scilab_files/test_add.sce)0
-rw-r--r--testapp/exam/settings.py8
-rw-r--r--testapp/exam/static/exam/js/add_question.js9
-rw-r--r--testapp/exam/templates/exam/add_question.html21
-rw-r--r--testapp/exam/templates/exam/edit_question.html36
-rw-r--r--testapp/exam/tests.py55
-rw-r--r--testapp/exam/views.py186
-rw-r--r--testapp/exam/xmlrpc_clients.py29
-rw-r--r--testapp/test_server.py302
-rw-r--r--testapp/tests/__init__.py0
-rw-r--r--testapp/tests/test_bash_evaluation.py41
-rw-r--r--testapp/tests/test_c_cpp_evaluation.py77
-rw-r--r--testapp/tests/test_code_evaluation.py24
-rw-r--r--testapp/tests/test_java_evaluation.py41
-rw-r--r--testapp/tests/test_python_evaluation.py54
-rw-r--r--testapp/tests/test_scilab_evaluation.py39
57 files changed, 1408 insertions, 1277 deletions
diff --git a/testapp/docs/sample_questions.py b/testapp/docs/sample_questions.py
deleted file mode 100644
index 60f32cb..0000000
--- a/testapp/docs/sample_questions.py
+++ /dev/null
@@ -1,84 +0,0 @@
-from datetime import date
-
-questions = [
-[Question(
- summary='Factorial',
- points=2,
- language='python',
- type='code',
- description='''
-Write a function called <code>fact</code> which takes a single integer argument
-(say <code>n</code>) and returns the factorial of the number.
-For example:<br/>
-<code>fact(3) -> 6</code>
-''',
- test='''
-assert fact(0) == 1
-assert fact(5) == 120
-''',
- snippet="def fact(num):"
- ),
-#Add tags here as a list of string.
-['Python','function','factorial'],
-],
-
-[Question(
- summary='Simple function',
- points=1,
- language='python',
- type='code',
- description='''Create a simple function called <code>sqr</code> which takes a single
-argument and returns the square of the argument. For example: <br/>
-<code>sqr(3) -> 9</code>.''',
- test='''
-import math
-assert sqr(3) == 9
-assert abs(sqr(math.sqrt(2)) - 2.0) < 1e-14
- ''',
- snippet="def sqr(num):"
- ),
-#Add tags here as a list of string.
-['Python','function'],
-],
-
-[Question(
- summary='Bash addition',
- points=2,
- language='bash',
- type='code',
- description='''Write a shell script which takes two arguments on the
- command line and prints the sum of the two on the output.''',
- test='''\
-docs/sample.sh
-docs/sample.args
-''',
- snippet="#!/bin/bash"
- ),
-#Add tags here as a list of string.
-[''],
-],
-
-[Question(
- summary='Size of integer in Python',
- points=0.5,
- language='python',
- type='mcq',
- description='''What is the largest integer value that can be represented
-in Python?''',
- options='''No Limit
-2**32
-2**32 - 1
-None of the above
-''',
- test = "No Limit"
- ),
-#Add tags here as a list of string.
-['mcq'],
-],
-
-] #list of questions ends here
-
-quiz = Quiz(start_date=date.today(),
- duration=10,
- description='Basic Python Quiz 1'
- )
diff --git a/testapp/docs/sample_questions.xml b/testapp/docs/sample_questions.xml
deleted file mode 100644
index 53c76f8..0000000
--- a/testapp/docs/sample_questions.xml
+++ /dev/null
@@ -1,43 +0,0 @@
-<question_bank>
-
-<question>
-<summary>
-Factorial
-</summary>
-<description>
-Write a function called "fact" which takes a single integer argument (say "n")
-and returns the factorial of the number.
-For example fact(3) -> 6
-</description>
-<points>2</points>
-<type>python</type>
-<test>
-assert fact(0) == 1
-assert fact(5) == 120
-</test>
-<options>
-</options>
-</question>
-
-<question>
-<summary>
-Simple function
-</summary>
-<description>
-Create a simple function called "sqr" which takes a single argument and
-returns the square of the argument
-For example sqr(3) -> 9.
-</description>
-<points>1</points>
-<type>python</type>
-<test>
-import math
-assert sqr(3) == 9
-assert abs(sqr(math.sqrt(2)) - 2.0) &lt; 1e-14
-</test>
-<options>
-</options>
-</question>
-
-
-</question_bank>
diff --git a/testapp/exam/admin.py b/testapp/exam/admin.py
index 060859a..86a10af 100644
--- a/testapp/exam/admin.py
+++ b/testapp/exam/admin.py
@@ -1,5 +1,6 @@
-from testapp.exam.models import Question, Quiz
+from testapp.exam.models import Question, Quiz, TestCase
from django.contrib import admin
admin.site.register(Question)
+admin.site.register(TestCase)
admin.site.register(Quiz)
diff --git a/testapp/exam/bash_code_evaluator.py b/testapp/exam/bash_code_evaluator.py
new file mode 100644
index 0000000..23c0ae5
--- /dev/null
+++ b/testapp/exam/bash_code_evaluator.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+import subprocess
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class BashCodeEvaluator(CodeEvaluator):
+ """Tests the Bash code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(BashCodeEvaluator, self).__init__(test_case_data, test, language, user_answer,
+ ref_code_path, in_dir)
+ self.submit_path = self.create_submit_code_file('submit.sh')
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(BashCodeEvaluator, self)._setup()
+
+ self._set_file_as_executable(self.submit_path)
+ get_ref_path, get_test_case_path = self.ref_code_path.strip().split(',')
+ get_ref_path = get_ref_path.strip()
+ get_test_case_path = get_test_case_path.strip()
+ ref_path, test_case_path = self._set_test_code_file_path(get_ref_path,
+ get_test_case_path)
+
+ return ref_path, self.submit_path, test_case_path
+
+ def _teardown(self):
+ # Delete the created file.
+ super(BashCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_path, submit_path,
+ test_case_path=None):
+ """ Function validates student script using instructor script as
+ reference. Test cases can optionally be provided. The first argument
+ ref_path, is the path to instructor script, it is assumed to
+ have executable permission. The second argument submit_path, is
+ the path to the student script, it is assumed to have executable
+ permission. The Third optional argument is the path to test the
+ scripts. Each line in this file is a test case and each test case is
+ passed to the script as standard arguments.
+
+ Returns
+ --------
+
+ returns (True, "Correct answer") : If the student script passes all
+ test cases/have same output, when compared to the instructor script
+
+ returns (False, error_msg): If the student script fails a single
+ test/have dissimilar output, when compared to the instructor script.
+
+ Returns (False, error_msg): If mandatory arguments are not files or if
+ the required permissions are not given to the file(s).
+
+ """
+ if not isfile(ref_path):
+ return False, "No file at %s or Incorrect path" % ref_path
+ if not isfile(submit_path):
+ return False, "No file at %s or Incorrect path" % submit_path
+ if not os.access(ref_path, os.X_OK):
+ return False, "Script %s is not executable" % ref_path
+ if not os.access(submit_path, os.X_OK):
+ return False, "Script %s is not executable" % submit_path
+
+ success = False
+
+ if test_case_path is None or "":
+ ret = self.run_command(ref_path, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, inst_stdout, inst_stderr = ret
+ ret = self.run_command(submit_path, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdnt_stdout, stdnt_stderr = ret
+ if inst_stdout == stdnt_stdout:
+ return True, "Correct answer"
+ else:
+ err = "Error: expected %s, got %s" % (inst_stderr,
+ stdnt_stderr)
+ return False, err
+ else:
+ if not isfile(test_case_path):
+ return False, "No test case at %s" % test_case_path
+ if not os.access(ref_path, os.R_OK):
+ return False, "Test script %s, not readable" % test_case_path
+ # valid_answer is True, so that we can stop once a test case fails
+ valid_answer = True
+ # loop_count has to be greater than or equal to one.
+ # Useful for caching things like empty test files,etc.
+ loop_count = 0
+ test_cases = open(test_case_path).readlines()
+ num_lines = len(test_cases)
+ for test_case in test_cases:
+ loop_count += 1
+ if valid_answer:
+ args = [ref_path] + [x for x in test_case.split()]
+ ret = self.run_command(args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, inst_stdout, inst_stderr = ret
+ args = [submit_path]+[x for x in test_case.split()]
+ ret = self.run_command(args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdnt_stdout, stdnt_stderr = ret
+ valid_answer = inst_stdout == stdnt_stdout
+ if valid_answer and (num_lines == loop_count):
+ return True, "Correct answer"
+ else:
+ err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
+ stdnt_stdout+stdnt_stderr)
+ return False, err
+
diff --git a/testapp/docs/sample.args b/testapp/exam/bash_files/sample.args
index 4d9f00d..4d9f00d 100644
--- a/testapp/docs/sample.args
+++ b/testapp/exam/bash_files/sample.args
diff --git a/testapp/docs/sample.sh b/testapp/exam/bash_files/sample.sh
index e935cb3..e935cb3 100755
--- a/testapp/docs/sample.sh
+++ b/testapp/exam/bash_files/sample.sh
diff --git a/testapp/c_cpp_files/main.cpp b/testapp/exam/c_cpp_files/main.cpp
index ebe1f08..ebe1f08 100755
--- a/testapp/c_cpp_files/main.cpp
+++ b/testapp/exam/c_cpp_files/main.cpp
diff --git a/testapp/c_cpp_files/main2.c b/testapp/exam/c_cpp_files/main2.c
index ccd1768..ccd1768 100755
--- a/testapp/c_cpp_files/main2.c
+++ b/testapp/exam/c_cpp_files/main2.c
diff --git a/testapp/c_cpp_files/main_array_check.cpp b/testapp/exam/c_cpp_files/main_array_check.cpp
index ea34fdd..ea34fdd 100755
--- a/testapp/c_cpp_files/main_array_check.cpp
+++ b/testapp/exam/c_cpp_files/main_array_check.cpp
diff --git a/testapp/c_cpp_files/main_array_check_all.cpp b/testapp/exam/c_cpp_files/main_array_check_all.cpp
index 140578e..140578e 100755
--- a/testapp/c_cpp_files/main_array_check_all.cpp
+++ b/testapp/exam/c_cpp_files/main_array_check_all.cpp
diff --git a/testapp/c_cpp_files/main_array_sum.cpp b/testapp/exam/c_cpp_files/main_array_sum.cpp
index 55b2ebf..55b2ebf 100755
--- a/testapp/c_cpp_files/main_array_sum.cpp
+++ b/testapp/exam/c_cpp_files/main_array_sum.cpp
diff --git a/testapp/c_cpp_files/main_blackJack.cpp b/testapp/exam/c_cpp_files/main_blackJack.cpp
index cc54e78..cc54e78 100755
--- a/testapp/c_cpp_files/main_blackJack.cpp
+++ b/testapp/exam/c_cpp_files/main_blackJack.cpp
diff --git a/testapp/c_cpp_files/main_check_digit.cpp b/testapp/exam/c_cpp_files/main_check_digit.cpp
index d3bf3d6..d3bf3d6 100755
--- a/testapp/c_cpp_files/main_check_digit.cpp
+++ b/testapp/exam/c_cpp_files/main_check_digit.cpp
diff --git a/testapp/c_cpp_files/main_count667.cpp b/testapp/exam/c_cpp_files/main_count667.cpp
index f146e8c..f146e8c 100755
--- a/testapp/c_cpp_files/main_count667.cpp
+++ b/testapp/exam/c_cpp_files/main_count667.cpp
diff --git a/testapp/c_cpp_files/main_count7.cpp b/testapp/exam/c_cpp_files/main_count7.cpp
index 982e930..982e930 100755
--- a/testapp/c_cpp_files/main_count7.cpp
+++ b/testapp/exam/c_cpp_files/main_count7.cpp
diff --git a/testapp/c_cpp_files/main_fact.cpp b/testapp/exam/c_cpp_files/main_fact.cpp
index a4ff230..a4ff230 100755
--- a/testapp/c_cpp_files/main_fact.cpp
+++ b/testapp/exam/c_cpp_files/main_fact.cpp
diff --git a/testapp/c_cpp_files/main_greatest.cpp b/testapp/exam/c_cpp_files/main_greatest.cpp
index 6d0a7c2..6d0a7c2 100755
--- a/testapp/c_cpp_files/main_greatest.cpp
+++ b/testapp/exam/c_cpp_files/main_greatest.cpp
diff --git a/testapp/c_cpp_files/main_hello_name.c b/testapp/exam/c_cpp_files/main_hello_name.c
index 71b83a2..71b83a2 100755
--- a/testapp/c_cpp_files/main_hello_name.c
+++ b/testapp/exam/c_cpp_files/main_hello_name.c
diff --git a/testapp/c_cpp_files/main_lessThan9.cpp b/testapp/exam/c_cpp_files/main_lessThan9.cpp
index 722b4bb..722b4bb 100755
--- a/testapp/c_cpp_files/main_lessThan9.cpp
+++ b/testapp/exam/c_cpp_files/main_lessThan9.cpp
diff --git a/testapp/c_cpp_files/main_mean.cpp b/testapp/exam/c_cpp_files/main_mean.cpp
index 21a4b1a..21a4b1a 100755
--- a/testapp/c_cpp_files/main_mean.cpp
+++ b/testapp/exam/c_cpp_files/main_mean.cpp
diff --git a/testapp/c_cpp_files/main_palindrome.cpp b/testapp/exam/c_cpp_files/main_palindrome.cpp
index 0e66928..0e66928 100755
--- a/testapp/c_cpp_files/main_palindrome.cpp
+++ b/testapp/exam/c_cpp_files/main_palindrome.cpp
diff --git a/testapp/c_cpp_files/main_roundTo10.cpp b/testapp/exam/c_cpp_files/main_roundTo10.cpp
index 12c961d..12c961d 100755
--- a/testapp/c_cpp_files/main_roundTo10.cpp
+++ b/testapp/exam/c_cpp_files/main_roundTo10.cpp
diff --git a/testapp/c_cpp_files/main_specialSum.cpp b/testapp/exam/c_cpp_files/main_specialSum.cpp
index d614536..d614536 100755
--- a/testapp/c_cpp_files/main_specialSum.cpp
+++ b/testapp/exam/c_cpp_files/main_specialSum.cpp
diff --git a/testapp/c_cpp_files/main_within.cpp b/testapp/exam/c_cpp_files/main_within.cpp
index 50f9ad0..50f9ad0 100755
--- a/testapp/c_cpp_files/main_within.cpp
+++ b/testapp/exam/c_cpp_files/main_within.cpp
diff --git a/testapp/exam/code_evaluator.py b/testapp/exam/code_evaluator.py
new file mode 100644
index 0000000..2a57257
--- /dev/null
+++ b/testapp/exam/code_evaluator.py
@@ -0,0 +1,206 @@
+import sys
+from SimpleXMLRPCServer import SimpleXMLRPCServer
+import pwd
+import os
+import stat
+from os.path import isdir, dirname, abspath, join, isfile
+import signal
+from multiprocessing import Process, Queue
+import subprocess
+import re
+import json
+# Local imports.
+from settings import SERVER_PORTS, SERVER_TIMEOUT, SERVER_POOL_PORT
+
+
+MY_DIR = abspath(dirname(__file__))
+
+
+# Raised when the code times-out.
+# c.f. http://pguides.net/python/timeout-a-function
+class TimeoutException(Exception):
+ pass
+
+
+def timeout_handler(signum, frame):
+ """A handler for the ALARM signal."""
+ raise TimeoutException('Code took too long to run.')
+
+
+def create_signal_handler():
+ """Add a new signal handler for the execution of this code."""
+ prev_handler = signal.signal(signal.SIGALRM, timeout_handler)
+ signal.alarm(SERVER_TIMEOUT)
+ return prev_handler
+
+
+def set_original_signal_handler(old_handler=None):
+ """Set back any original signal handler."""
+ if old_handler is not None:
+ signal.signal(signal.SIGALRM, old_handler)
+ return
+ else:
+ raise Exception("Signal Handler: object cannot be NoneType")
+
+
+def delete_signal_handler():
+ signal.alarm(0)
+ return
+
+
+class CodeEvaluator(object):
+ """Tests the code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ msg = 'Code took more than %s seconds to run. You probably '\
+ 'have an infinite loop in your code.' % SERVER_TIMEOUT
+ self.timeout_msg = msg
+ self.test_case_data = test_case_data
+ self.language = language.lower()
+ self.user_answer = user_answer
+ self.ref_code_path = ref_code_path
+ self.test = test
+ self.in_dir = in_dir
+ self.test_case_args = None
+
+ # Public Protocol ##########
+ @classmethod
+ def from_json(cls, language, json_data, in_dir):
+ json_data = json.loads(json_data)
+ test_case_data = json_data.get("test_case_data")
+ user_answer = json_data.get("user_answer")
+ ref_code_path = json_data.get("ref_code_path")
+ test = json_data.get("test")
+
+ instance = cls(test_case_data, test, language, user_answer, ref_code_path,
+ in_dir)
+ return instance
+
+ def evaluate(self):
+ """Evaluates given code with the test cases based on
+ given arguments in test_case_data.
+
+ The ref_code_path is a path to the reference code.
+ The reference code will call the function submitted by the student.
+ The reference code will check for the expected output.
+
+ If the path's start with a "/" then we assume they are absolute paths.
+ If not, we assume they are relative paths w.r.t. the location of this
+ code_server script.
+
+ If the optional `in_dir` keyword argument is supplied it changes the
+ directory to that directory (it does not change it back to the original
+ when done).
+
+ Returns
+ -------
+
+ A tuple: (success, error message).
+ """
+
+ self._setup()
+ success, err = self._evaluate(self.test_case_args)
+ self._teardown()
+
+ result = {'success': success, 'error': err}
+ return result
+
+ # Private Protocol ##########
+ def _setup(self):
+ self._change_dir(self.in_dir)
+
+ def _evaluate(self, args):
+ # Add a new signal handler for the execution of this code.
+ prev_handler = create_signal_handler()
+ success = False
+ args = args or []
+
+ # Do whatever testing needed.
+ try:
+ success, err = self._check_code(*args)
+
+ except TimeoutException:
+ err = self.timeout_msg
+ except:
+ _type, value = sys.exc_info()[:2]
+ err = "Error: {0}".format(repr(value))
+ finally:
+ # Set back any original signal handler.
+ set_original_signal_handler(prev_handler)
+
+ return success, err
+
+ def _teardown(self):
+ # Cancel the signal
+ delete_signal_handler()
+
+ def _check_code(self):
+ raise NotImplementedError("check_code method not implemented")
+
+ def create_submit_code_file(self, file_name):
+ """ Write the code (`answer`) to a file and set the file path"""
+ submit_f = open(file_name, 'w')
+ submit_f.write(self.user_answer.lstrip())
+ submit_f.close()
+ submit_path = abspath(submit_f.name)
+
+ return submit_path
+
+ def _set_file_as_executable(self, fname):
+ os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
+ | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
+ | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
+
+ def _set_test_code_file_path(self, ref_path=None, test_case_path=None):
+ if ref_path and not ref_path.startswith('/'):
+ ref_path = join(MY_DIR, ref_path)
+
+ if test_case_path and not test_case_path.startswith('/'):
+ test_case_path = join(MY_DIR, test_case_path)
+
+ return ref_path, test_case_path
+
+ def _run_command(self, cmd_args, *args, **kw):
+ """Run a command in a subprocess while blocking, the process is killed
+ if it takes more than 2 seconds to run. Return the Popen object, the
+ stdout and stderr.
+ """
+ try:
+ proc = subprocess.Popen(cmd_args, *args, **kw)
+ stdout, stderr = proc.communicate()
+ except TimeoutException:
+ # Runaway code, so kill it.
+ proc.kill()
+ # Re-raise exception.
+ raise
+ return proc, stdout, stderr
+
+ def _compile_command(self, cmd, *args, **kw):
+ """Compiles C/C++/java code and returns errors if any.
+ Run a command in a subprocess while blocking, the process is killed
+ if it takes more than 2 seconds to run. Return the Popen object, the
+ stderr.
+ """
+ try:
+ proc_compile = subprocess.Popen(cmd, shell=True, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = proc_compile.communicate()
+ except TimeoutException:
+ # Runaway code, so kill it.
+ proc_compile.kill()
+ # Re-raise exception.
+ raise
+ return proc_compile, err
+
+ def _change_dir(self, in_dir):
+ if in_dir is not None and isdir(in_dir):
+ os.chdir(in_dir)
+
+ def _remove_null_substitute_char(self, string):
+ """Returns a string without any null and substitute characters"""
+ stripped = ""
+ for c in string:
+ if ord(c) is not 26 and ord(c) is not 0:
+ stripped = stripped + c
+ return ''.join(stripped)
diff --git a/testapp/exam/code_server.py b/testapp/exam/code_server.py
index 792197d..580379f 100755
--- a/testapp/exam/code_server.py
+++ b/testapp/exam/code_server.py
@@ -19,7 +19,6 @@ settings.py:SERVER_POOL_PORT. This port exposes a `get_server_port` function
that returns an available server.
"""
import sys
-import traceback
from SimpleXMLRPCServer import SimpleXMLRPCServer
import pwd
import os
@@ -29,12 +28,16 @@ import signal
from multiprocessing import Process, Queue
import subprocess
import re
+import json
# Local imports.
from settings import SERVER_PORTS, SERVER_TIMEOUT, SERVER_POOL_PORT
+from language_registry import set_registry
+
MY_DIR = abspath(dirname(__file__))
+# Private Protocol ##########
def run_as_nobody():
"""Runs the current process as nobody."""
# Set the effective uid and to that of nobody.
@@ -43,17 +46,6 @@ def run_as_nobody():
os.seteuid(nobody.pw_uid)
-# Raised when the code times-out.
-# c.f. http://pguides.net/python/timeout-a-function
-class TimeoutException(Exception):
- pass
-
-
-def timeout_handler(signum, frame):
- """A handler for the ALARM signal."""
- raise TimeoutException('Code took too long to run.')
-
-
###############################################################################
# `CodeServer` class.
###############################################################################
@@ -64,736 +56,38 @@ class CodeServer(object):
def __init__(self, port, queue):
self.port = port
self.queue = queue
- msg = 'Code took more than %s seconds to run. You probably '\
- 'have an infinite loop in your code.' % SERVER_TIMEOUT
- self.timeout_msg = msg
-
- def run_python_code(self, answer, test_code, in_dir=None):
- """Tests given Python function (`answer`) with the `test_code`
- supplied. If the optional `in_dir` keyword argument is supplied
- it changes the directory to that directory (it does not change
- it back to the original when done). This function also timesout
- when the function takes more than SERVER_TIMEOUT seconds to run
- to prevent runaway code.
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- success = False
- tb = None
- try:
- submitted = compile(answer, '<string>', mode='exec')
- g = {}
- exec submitted in g
- _tests = compile(test_code, '<string>', mode='exec')
- exec _tests in g
- except TimeoutException:
- err = self.timeout_msg
- except AssertionError:
- type, value, tb = sys.exc_info()
- info = traceback.extract_tb(tb)
- fname, lineno, func, text = info[-1]
- text = str(test_code).splitlines()[lineno-1]
- err = "{0} {1} in: {2}".format(type.__name__, str(value), text)
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- else:
- success = True
- err = 'Correct answer'
- finally:
- del tb
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def run_bash_code(self, answer, test_code, in_dir=None):
- """Tests given Bash code (`answer`) with the `test_code` supplied.
-
- The testcode should typically contain two lines, the first is a path to
- the reference script we are to compare against. The second is a path
- to the arguments to be supplied to the reference and submitted script.
- The output of these will be compared for correctness.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- If the optional `in_dir` keyword argument is supplied it changes the
- directory to that directory (it does not change it back to the original
- when done).
-
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- def _set_exec(fname):
- os.chmod(fname, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
- | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
- | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
- submit_f = open('submit.sh', 'w')
- submit_f.write(answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
- _set_exec(submit_path)
-
- ref_path, test_case_path = test_code.strip().splitlines()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
- if not test_case_path.startswith('/'):
- test_case_path = join(MY_DIR, test_case_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- success, err = self.check_bash_script(ref_path, submit_path,
- test_case_path)
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def _run_command(self, cmd_args, *args, **kw):
- """Run a command in a subprocess while blocking, the process is killed
- if it takes more than 2 seconds to run. Return the Popen object, the
- stdout and stderr.
- """
- try:
- proc = subprocess.Popen(cmd_args, *args, **kw)
- stdout, stderr = proc.communicate()
- except TimeoutException:
- # Runaway code, so kill it.
- proc.kill()
- # Re-raise exception.
- raise
- return proc, stdout, stderr
-
- def check_bash_script(self, ref_script_path, submit_script_path,
- test_case_path=None):
- """ Function validates student script using instructor script as
- reference. Test cases can optionally be provided. The first argument
- ref_script_path, is the path to instructor script, it is assumed to
- have executable permission. The second argument submit_script_path, is
- the path to the student script, it is assumed to have executable
- permission. The Third optional argument is the path to test the
- scripts. Each line in this file is a test case and each test case is
- passed to the script as standard arguments.
-
- Returns
- --------
-
- returns (True, "Correct answer") : If the student script passes all
- test cases/have same output, when compared to the instructor script
-
- returns (False, error_msg): If the student script fails a single
- test/have dissimilar output, when compared to the instructor script.
-
- Returns (False, error_msg): If mandatory arguments are not files or if
- the required permissions are not given to the file(s).
-
- """
- if not isfile(ref_script_path):
- return False, "No file at %s" % ref_script_path
- if not isfile(submit_script_path):
- return False, 'No file at %s' % submit_script_path
- if not os.access(ref_script_path, os.X_OK):
- return False, 'Script %s is not executable' % ref_script_path
- if not os.access(submit_script_path, os.X_OK):
- return False, 'Script %s is not executable' % submit_script_path
-
- if test_case_path is None:
- ret = self._run_command(ref_script_path, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, inst_stdout, inst_stderr = ret
- ret = self._run_command(submit_script_path, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdnt_stdout, stdnt_stderr = ret
- if inst_stdout == stdnt_stdout:
- return True, 'Correct answer'
- else:
- err = "Error: expected %s, got %s" % (inst_stderr,
- stdnt_stderr)
- return False, err
- else:
- if not isfile(test_case_path):
- return False, "No test case at %s" % test_case_path
- if not os.access(ref_script_path, os.R_OK):
- return False, "Test script %s, not readable" % test_case_path
- valid_answer = True # We initially make it one, so that we can
- # stop once a test case fails
- loop_count = 0 # Loop count has to be greater than or
- # equal to one.
- # Useful for caching things like empty
- # test files,etc.
- test_cases = open(test_case_path).readlines()
- num_lines = len(test_cases)
- for test_case in test_cases:
- loop_count += 1
- if valid_answer:
- args = [ref_script_path] + [x for x in test_case.split()]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, inst_stdout, inst_stderr = ret
- args = [submit_script_path]+[x for x in test_case.split()]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdnt_stdout, stdnt_stderr = ret
- valid_answer = inst_stdout == stdnt_stdout
- if valid_answer and (num_lines == loop_count):
- return True, "Correct answer"
- else:
- err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
- stdnt_stdout+stdnt_stderr)
- return False, err
-
- def run_c_code(self, answer, test_code, in_dir=None):
- """Tests given C code (`answer`) with the `test_code` supplied.
-
- The testcode is a path to the reference code.
- The reference code will call the function submitted by the student.
- The reference code will check for the expected output.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- If the optional `in_dir` keyword argument is supplied it changes the
- directory to that directory (it does not change it back to the original
- when done).
-
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # File extension must be .c
- submit_f = open('submit.c', 'w')
- submit_f.write(answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
-
- ref_path = test_code.strip()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- success, err = self._check_c_cpp_code(ref_path, submit_path)
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def _compile_command(self, cmd, *args, **kw):
- """Compiles C/C++/java code and returns errors if any.
- Run a command in a subprocess while blocking, the process is killed
- if it takes more than 2 seconds to run. Return the Popen object, the
- stderr.
- """
- try:
- proc_compile = subprocess.Popen(cmd, shell=True, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = proc_compile.communicate()
- except TimeoutException:
- # Runaway code, so kill it.
- proc_compile.kill()
- # Re-raise exception.
- raise
- return proc_compile, err
-
- def _check_c_cpp_code(self, ref_code_path, submit_code_path):
- """ Function validates student code using instructor code as
- reference.The first argument ref_code_path, is the path to
- instructor code, it is assumed to have executable permission.
- The second argument submit_code_path, is the path to the student
- code, it is assumed to have executable permission.
-
- Returns
- --------
-
- returns (True, "Correct answer") : If the student function returns
- expected output when called by reference code.
-
- returns (False, error_msg): If the student function fails to return
- expected output when called by reference code.
-
- Returns (False, error_msg): If mandatory arguments are not files or
- if the required permissions are not given to the file(s).
-
- """
- if not isfile(ref_code_path):
- return False, "No file at %s" % ref_code_path
- if not isfile(submit_code_path):
- return False, 'No file at %s' % submit_code_path
-
- success = False
- output_path = os.getcwd() + '/output'
- compile_command = "g++ %s -c -o %s" % (submit_code_path, output_path)
- ret = self._compile_command(compile_command)
- proc, stdnt_stderr = ret
-
- # Only if compilation is successful, the program is executed
- # And tested with testcases
- if stdnt_stderr == '':
- executable = os.getcwd() + '/executable'
- compile_main = "g++ %s %s -o %s" % (ref_code_path, output_path,
- executable)
- ret = self._compile_command(compile_main)
- proc, main_err = ret
- if main_err == '':
- args = [executable]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdout, stderr = ret
- if proc.returncode == 0:
- success, err = True, "Correct answer"
- else:
- err = stdout + "\n" + stderr
- os.remove(executable)
- else:
- err = "Error:"
- try:
- error_lines = main_err.splitlines()
- for e in error_lines:
- err = err + "\n" + e.split(":", 1)[1]
- except:
- err = err + "\n" + main_err
- os.remove(output_path)
- else:
- err = "Compilation Error:"
- try:
- error_lines = stdnt_stderr.splitlines()
- for e in error_lines:
- if ':' in e:
- err = err + "\n" + e.split(":", 1)[1]
- else:
- err = err + "\n" + e
- except:
- err = err + "\n" + stdnt_stderr
- return success, err
-
- def run_cplus_code(self, answer, test_code, in_dir=None):
- """Tests given C++ code (`answer`) with the `test_code` supplied.
-
- The testcode is a path to the reference code.
- The reference code will call the function submitted by the student.
- The reference code will check for the expected output.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- If the optional `in_dir` keyword argument is supplied it changes the
- directory to that directory (it does not change it back to the original
- when done).
-
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # The file extension must be .cpp
- submit_f = open('submitstd.cpp', 'w')
- submit_f.write(answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
-
- ref_path = test_code.strip()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- success, err = self._check_c_cpp_code(ref_path, submit_path)
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def run_java_code(self, answer, test_code, in_dir=None):
- """Tests given java code (`answer`) with the `test_code` supplied.
-
- The testcode is a path to the reference code.
- The reference code will call the function submitted by the student.
- The reference code will check for the expected output.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- If the optional `in_dir` keyword argument is supplied it changes the
- directory to that directory (it does not change it back to the original
- when done).
-
- Returns
- -------
-
- A tuple: (success, error message).
-
- """
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # The file extension must be .java
- # The class name and file name must be same in java
- submit_f = open('Test.java', 'w')
- submit_f.write(answer.lstrip())
- submit_f.close()
- submit_path = abspath(submit_f.name)
-
- ref_path = test_code.strip()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- success, err = self._check_java_code(ref_path, submit_path)
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
-
- # Put us back into the server pool queue since we are free now.
- self.queue.put(self.port)
-
- return success, err
-
- def _check_java_code(self, ref_code_path, submit_code_path):
- """ Function validates student code using instructor code as
- reference.The first argument ref_code_path, is the path to
- instructor code, it is assumed to have executable permission.
- The second argument submit_code_path, is the path to the student
- code, it is assumed to have executable permission.
-
- Returns
- --------
-
- returns (True, "Correct answer") : If the student function returns
- expected output when called by reference code.
-
- returns (False, error_msg): If the student function fails to return
- expected output when called by reference code.
-
- Returns (False, error_msg): If mandatory arguments are not files or
- if the required permissions are not given to the file(s).
-
- """
- if not isfile(ref_code_path):
- return False, "No file at %s" % ref_code_path
- if not isfile(submit_code_path):
- return False, 'No file at %s' % submit_code_path
-
- success = False
- compile_command = "javac %s" % (submit_code_path)
- ret = self._compile_command(compile_command)
- proc, stdnt_stderr = ret
- stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
-
- # Only if compilation is successful, the program is executed
- # And tested with testcases
- if stdnt_stderr == '':
- student_directory = os.getcwd() + '/'
- student_file_name = "Test"
- compile_main = "javac %s -classpath %s -d %s" % (ref_code_path,
- student_directory,
- student_directory)
- ret = self._compile_command(compile_main)
- proc, main_err = ret
- main_err = self._remove_null_substitute_char(main_err)
-
- if main_err == '':
- main_file_name = (ref_code_path.split('/')[-1]).split('.')[0]
- run_command = "java -cp %s %s" % (student_directory,
- main_file_name)
- ret = self._run_command(run_command,
- stdin=None,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdout, stderr = ret
- if proc.returncode == 0:
- success, err = True, "Correct answer"
- else:
- err = stdout + "\n" + stderr
- success = False
- os.remove("%s%s.class" % (student_directory, main_file_name))
- else:
- err = "Error:\n"
- try:
- error_lines = main_err.splitlines()
- for e in error_lines:
- if ':' in e:
- err = err + "\n" + e.split(":", 1)[1]
- else:
- err = err + "\n" + e
- except:
- err = err + "\n" + main_err
- os.remove("%s%s.class" % (student_directory, student_file_name))
- else:
- err = "Compilation Error:\n"
- try:
- error_lines = stdnt_stderr.splitlines()
- for e in error_lines:
- if ':' in e:
- err = err + "\n" + e.split(":", 1)[1]
- else:
- err = err + "\n" + e
- except:
- err = err + "\n" + stdnt_stderr
- return success, err
-
- def _remove_null_substitute_char(self, string):
- """Returns a string without any null and substitute characters"""
- stripped = ""
- for c in string:
- if ord(c) is not 26 and ord(c) is not 0:
- stripped = stripped + c
- return ''.join(stripped)
-
- def run_scilab_code(self, answer, test_code, in_dir=None):
- """Tests given Scilab function (`answer`) with the `test_code`
- supplied. If the optional `in_dir` keyword argument is supplied
- it changes the directory to that directory (it does not change
- it back to the original when done). This function also timesout
- when the function takes more than SERVER_TIMEOUT seconds to run
- to prevent runaway code.
-
- The testcode is a path to the reference code.
- The reference code will call the function submitted by the student.
- The reference code will check for the expected output.
-
- If the path's start with a "/" then we assume they are absolute paths.
- If not, we assume they are relative paths w.r.t. the location of this
- code_server script.
-
- Returns
- -------
-
- A tuple: (success, error message).
+ # Public Protocol ##########
+ def check_code(self, language, json_data, in_dir=None):
+ """Calls relevant EvaluateCode class based on language to check the
+ answer code
"""
- if in_dir is not None and isdir(in_dir):
- os.chdir(in_dir)
-
- # Removes all the commands that terminates scilab
- answer,i = self._remove_scilab_exit(answer.lstrip())
-
- # Throw message if there are commmands that terminates scilab
- add_err=""
- if i > 0:
- add_err = "Please do not use exit, quit and abort commands in your\
- code.\n Otherwise your code will not be evaluated\
- correctly.\n"
-
- # The file extension should be .sci
- submit_f = open('function.sci','w')
- submit_f.write(answer)
- submit_f.close()
- submit_path = abspath(submit_f.name)
-
- ref_path = test_code.strip()
- if not ref_path.startswith('/'):
- ref_path = join(MY_DIR, ref_path)
-
- # Add a new signal handler for the execution of this code.
- old_handler = signal.signal(signal.SIGALRM, timeout_handler)
- signal.alarm(SERVER_TIMEOUT)
-
- # Do whatever testing needed.
- success = False
- try:
- cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(ref_path)
- cmd += ' | timeout 8 scilab-cli -nb'
- ret = self._run_command(cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdout, stderr = ret
-
- # Get only the error.
- stderr = self._get_error(stdout)
- if stderr is None:
- # Clean output
- stdout = self._strip_output(stdout)
- if proc.returncode == 5:
- success, err = True, "Correct answer"
- else:
- err = add_err + stdout
- else:
- err = add_err + stderr
- except TimeoutException:
- err = self.timeout_msg
- except:
- type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
- finally:
- # Set back any original signal handler.
- signal.signal(signal.SIGALRM, old_handler)
-
- # Delete the created file.
- os.remove(submit_path)
-
- # Cancel the signal if any, see signal.alarm documentation.
- signal.alarm(0)
+ code_evaluator = self._create_evaluator_instance(language, json_data,
+ in_dir)
+ result = code_evaluator.evaluate()
# Put us back into the server pool queue since we are free now.
self.queue.put(self.port)
- return success, err
-
- def _remove_scilab_exit(self, string):
- """
- Removes exit, quit and abort from the scilab code
- """
- new_string = ""
- i=0
- for line in string.splitlines():
- new_line = re.sub(r"exit.*$","",line)
- new_line = re.sub(r"quit.*$","",new_line)
- new_line = re.sub(r"abort.*$","",new_line)
- if line != new_line:
- i=i+1
- new_string = new_string +'\n'+ new_line
- return new_string, i
-
- def _get_error(self, string):
- """
- Fetches only the error from the string.
- Returns None if no error.
- """
- obj = re.search("!.+\n.+",string);
- if obj:
- return obj.group()
- return None
-
- def _strip_output(self, out):
- """
- Cleans whitespace from the output
- """
- strip_out = "Message"
- for l in out.split('\n'):
- if l.strip():
- strip_out = strip_out+"\n"+l.strip()
- return strip_out
+ return json.dumps(result)
def run(self):
- """Run XMLRPC server, serving our methods.
- """
+ """Run XMLRPC server, serving our methods."""
server = SimpleXMLRPCServer(("localhost", self.port))
self.server = server
server.register_instance(self)
self.queue.put(self.port)
server.serve_forever()
+ # Private Protocol ##########
+ def _create_evaluator_instance(self, language, json_data, in_dir):
+ """Create instance of relevant EvaluateCode class based on language"""
+ set_registry()
+ registry = get_registry()
+ cls = registry.get_class(language)
+ instance = cls.from_json(language, json_data, in_dir)
+ return instance
+
###############################################################################
# `ServerPool` class.
@@ -825,6 +119,8 @@ class ServerPool(object):
p.start()
self.servers = servers
+ # Public Protocol ##########
+
def get_server_port(self):
"""Get available server port from ones in the pool. This will block
till it gets an available server.
diff --git a/testapp/exam/cpp_code_evaluator.py b/testapp/exam/cpp_code_evaluator.py
new file mode 100644
index 0000000..15e2b13
--- /dev/null
+++ b/testapp/exam/cpp_code_evaluator.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+import subprocess
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class CppCodeEvaluator(CodeEvaluator):
+ """Tests the C code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(CppCodeEvaluator, self).__init__(test_case_data, test, language,
+ user_answer, ref_code_path,
+ in_dir)
+ self.submit_path = self.create_submit_code_file('submit.c')
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(CppCodeEvaluator, self)._setup()
+
+ get_ref_path = self.ref_code_path
+ ref_path, test_case_path = self._set_test_code_file_path(get_ref_path)
+
+ # Set file paths
+ c_user_output_path = os.getcwd() + '/output'
+ c_ref_output_path = os.getcwd() + '/executable'
+
+ # Set command variables
+ compile_command = 'g++ {0} -c -o {1}'.format(self.submit_path,
+ c_user_output_path)
+ compile_main = 'g++ {0} {1} -o {2}'.format(ref_path,
+ c_user_output_path,
+ c_ref_output_path)
+ run_command_args = [c_ref_output_path]
+ remove_user_output = c_user_output_path
+ remove_ref_output = c_ref_output_path
+
+ return (ref_path, self.submit_path, compile_command, compile_main,
+ run_command_args, remove_user_output, remove_ref_output)
+
+ def _teardown(self):
+ # Delete the created file.
+ super(CppCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_code_path, submit_code_path, compile_command,
+ compile_main, run_command_args, remove_user_output,
+ remove_ref_output):
+ """ Function validates student code using instructor code as
+ reference.The first argument ref_code_path, is the path to
+ instructor code, it is assumed to have executable permission.
+ The second argument submit_code_path, is the path to the student
+ code, it is assumed to have executable permission.
+
+ Returns
+ --------
+
+ returns (True, "Correct answer") : If the student function returns
+ expected output when called by reference code.
+
+ returns (False, error_msg): If the student function fails to return
+ expected output when called by reference code.
+
+ Returns (False, error_msg): If mandatory arguments are not files or
+ if the required permissions are not given to the file(s).
+
+ """
+ if not isfile(ref_code_path):
+ return False, "No file at %s or Incorrect path" % ref_code_path
+ if not isfile(submit_code_path):
+ return False, 'No file at %s or Incorrect path' % submit_code_path
+
+ success = False
+ ret = self._compile_command(compile_command)
+ proc, stdnt_stderr = ret
+ stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
+
+ # Only if compilation is successful, the program is executed
+ # And tested with testcases
+ if stdnt_stderr == '':
+ ret = self._compile_command(compile_main)
+ proc, main_err = ret
+ main_err = self._remove_null_substitute_char(main_err)
+
+ if main_err == '':
+ ret = self._run_command(run_command_args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdout, stderr = ret
+ if proc.returncode == 0:
+ success, err = True, "Correct answer"
+ else:
+ err = stdout + "\n" + stderr
+ os.remove(remove_ref_output)
+ else:
+ err = "Error:"
+ try:
+ error_lines = main_err.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + main_err
+ os.remove(remove_user_output)
+ else:
+ err = "Compilation Error:"
+ try:
+ error_lines = stdnt_stderr.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + stdnt_stderr
+
+ return success, err
diff --git a/testapp/exam/forms.py b/testapp/exam/forms.py
index 1f12a3b..93584a6 100644
--- a/testapp/exam/forms.py
+++ b/testapp/exam/forms.py
@@ -1,5 +1,5 @@
from django import forms
-from testapp.exam.models import Profile, Quiz, Question
+from exam.models import Profile, Quiz, Question, TestCase
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
@@ -8,6 +8,7 @@ from taggit.forms import TagField
from taggit_autocomplete_modified.managers import TaggableManagerAutocomplete
from taggit_autocomplete_modified.widgets import TagAutocomplete
from taggit_autocomplete_modified import settings
+from django.forms.models import inlineformset_factory
from string import letters, punctuation, digits
import datetime
@@ -177,7 +178,7 @@ class QuizForm(forms.Form):
new_quiz.save()
-class QuestionForm(forms.Form):
+class QuestionForm(forms.ModelForm):
"""Creates a form to add or edit a Question.
It has the related fields and functions required."""
@@ -186,8 +187,8 @@ class QuestionForm(forms.Form):
description = forms.CharField(widget=forms.Textarea\
(attrs={'cols': 40, 'rows': 1}))
points = forms.FloatField()
- test = forms.CharField(widget=forms.Textarea\
- (attrs={'cols': 40, 'rows': 1}))
+ solution = forms.CharField(widget=forms.Textarea\
+ (attrs={'cols': 40, 'rows': 1}), required=False)
options = forms.CharField(widget=forms.Textarea\
(attrs={'cols': 40, 'rows': 1}), required=False)
language = forms.CharField(max_length=20, widget=forms.Select\
@@ -198,29 +199,37 @@ class QuestionForm(forms.Form):
tags = TagField(widget=TagAutocomplete(), required=False)
snippet = forms.CharField(widget=forms.Textarea\
(attrs={'cols': 40, 'rows': 1}), required=False)
-
- def save(self):
- summary = self.cleaned_data["summary"]
- description = self.cleaned_data["description"]
- points = self.cleaned_data['points']
- test = self.cleaned_data["test"]
- options = self.cleaned_data['options']
- language = self.cleaned_data['language']
- type = self.cleaned_data["type"]
- active = self.cleaned_data["active"]
- snippet = self.cleaned_data["snippet"]
+ ref_code_path = forms.CharField(widget=forms.Textarea\
+ (attrs={'cols': 40, 'rows': 1}), required=False)
+
+ def save(self, commit=True):
+ summary = self.cleaned_data.get("summary")
+ description = self.cleaned_data.get("description")
+ points = self.cleaned_data.get("points")
+ options = self.cleaned_data.get("options")
+ language = self.cleaned_data.get("language")
+ type = self.cleaned_data.get("type")
+ active = self.cleaned_data.get("active")
+ snippet = self.cleaned_data.get("snippet")
new_question = Question()
new_question.summary = summary
new_question.description = description
new_question.points = points
- new_question.test = test
+ # new_question.test = test
new_question.options = options
new_question.language = language
new_question.type = type
new_question.active = active
new_question.snippet = snippet
- new_question.save()
+ new_question = super(QuestionForm, self).save(commit=False)
+ if commit:
+ new_question.save()
+
+ return new_question
+
+ class Meta:
+ model = Question
class RandomQuestionForm(forms.Form):
@@ -229,3 +238,6 @@ class RandomQuestionForm(forms.Form):
marks = forms.CharField(max_length=8, widget=forms.Select\
(choices=(('select', 'Select Marks'),)))
shuffle_questions = forms.BooleanField(required=False)
+
+TestCaseFormSet = inlineformset_factory(Question, TestCase,\
+ can_order=False, can_delete=False, extra=1)
diff --git a/testapp/exam/java_code_evaluator.py b/testapp/exam/java_code_evaluator.py
new file mode 100644
index 0000000..08ae208
--- /dev/null
+++ b/testapp/exam/java_code_evaluator.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+import traceback
+import pwd
+import os
+from os.path import join, isfile
+import subprocess
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class JavaCodeEvaluator(CodeEvaluator):
+ """Tests the Java code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(JavaCodeEvaluator, self).__init__(test_case_data, test,
+ language, user_answer,
+ ref_code_path, in_dir)
+ self.submit_path = self.create_submit_code_file('Test.java')
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(JavaCodeEvaluator, self)._setup()
+
+ ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path)
+
+ # Set file paths
+ java_student_directory = os.getcwd() + '/'
+ java_ref_file_name = (ref_path.split('/')[-1]).split('.')[0]
+
+ # Set command variables
+ compile_command = 'javac {0}'.format(self.submit_path),
+ compile_main = ('javac {0} -classpath '
+ '{1} -d {2}').format(ref_path,
+ java_student_directory,
+ java_student_directory)
+ run_command_args = "java -cp {0} {1}".format(java_student_directory,
+ java_ref_file_name)
+ remove_user_output = "{0}{1}.class".format(java_student_directory,
+ 'Test')
+ remove_ref_output = "{0}{1}.class".format(java_student_directory,
+ java_ref_file_name)
+
+ return (ref_path, self.submit_path, compile_command, compile_main,
+ run_command_args, remove_user_output, remove_ref_output)
+
+ def _teardown(self):
+ # Delete the created file.
+ super(JavaCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_code_path, submit_code_path, compile_command,
+ compile_main, run_command_args, remove_user_output,
+ remove_ref_output):
+ """ Function validates student code using instructor code as
+ reference.The first argument ref_code_path, is the path to
+ instructor code, it is assumed to have executable permission.
+ The second argument submit_code_path, is the path to the student
+ code, it is assumed to have executable permission.
+
+ Returns
+ --------
+
+ returns (True, "Correct answer") : If the student function returns
+ expected output when called by reference code.
+
+ returns (False, error_msg): If the student function fails to return
+ expected output when called by reference code.
+
+ Returns (False, error_msg): If mandatory arguments are not files or
+ if the required permissions are not given to the file(s).
+
+ """
+ if not isfile(ref_code_path):
+ return False, "No file at %s or Incorrect path" % ref_code_path
+ if not isfile(submit_code_path):
+ return False, 'No file at %s or Incorrect path' % submit_code_path
+
+ success = False
+ ret = self._compile_command(compile_command)
+ proc, stdnt_stderr = ret
+ stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
+
+ # Only if compilation is successful, the program is executed
+ # And tested with testcases
+ if stdnt_stderr == '':
+ ret = self._compile_command(compile_main)
+ proc, main_err = ret
+ main_err = self._remove_null_substitute_char(main_err)
+
+ if main_err == '':
+ ret = self._run_command(run_command_args, stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdout, stderr = ret
+ if proc.returncode == 0:
+ success, err = True, "Correct answer"
+ else:
+ err = stdout + "\n" + stderr
+ os.remove(remove_ref_output)
+ else:
+ err = "Error:"
+ try:
+ error_lines = main_err.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + main_err
+ os.remove(remove_user_output)
+ else:
+ err = "Compilation Error:"
+ try:
+ error_lines = stdnt_stderr.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + stdnt_stderr
+
+ return success, err
diff --git a/testapp/java_files/main_array_sum.java b/testapp/exam/java_files/main_array_sum.java
index 5eae299..5eae299 100644
--- a/testapp/java_files/main_array_sum.java
+++ b/testapp/exam/java_files/main_array_sum.java
diff --git a/testapp/java_files/main_fact.java b/testapp/exam/java_files/main_fact.java
index 325dab6..325dab6 100644
--- a/testapp/java_files/main_fact.java
+++ b/testapp/exam/java_files/main_fact.java
diff --git a/testapp/java_files/main_great.java b/testapp/exam/java_files/main_great.java
index 4bfcb1f..4bfcb1f 100644
--- a/testapp/java_files/main_great.java
+++ b/testapp/exam/java_files/main_great.java
diff --git a/testapp/java_files/main_hello_name.java b/testapp/exam/java_files/main_hello_name.java
index 84bb282..84bb282 100644
--- a/testapp/java_files/main_hello_name.java
+++ b/testapp/exam/java_files/main_hello_name.java
diff --git a/testapp/java_files/main_lastDigit.java b/testapp/exam/java_files/main_lastDigit.java
index 05439e2..05439e2 100644
--- a/testapp/java_files/main_lastDigit.java
+++ b/testapp/exam/java_files/main_lastDigit.java
diff --git a/testapp/java_files/main_moreThan30.java b/testapp/exam/java_files/main_moreThan30.java
index 7da31cb..7da31cb 100644
--- a/testapp/java_files/main_moreThan30.java
+++ b/testapp/exam/java_files/main_moreThan30.java
diff --git a/testapp/java_files/main_palindrome.java b/testapp/exam/java_files/main_palindrome.java
index c0745f9..c0745f9 100644
--- a/testapp/java_files/main_palindrome.java
+++ b/testapp/exam/java_files/main_palindrome.java
diff --git a/testapp/java_files/main_square.java b/testapp/exam/java_files/main_square.java
index 5cb8c35..5cb8c35 100644
--- a/testapp/java_files/main_square.java
+++ b/testapp/exam/java_files/main_square.java
diff --git a/testapp/exam/language_registry.py b/testapp/exam/language_registry.py
new file mode 100644
index 0000000..76a23d7
--- /dev/null
+++ b/testapp/exam/language_registry.py
@@ -0,0 +1,36 @@
+from settings import code_evaluators
+import importlib
+
+registry = None
+
+def set_registry():
+ global registry
+ registry = _LanguageRegistry()
+
+def get_registry():
+ return registry
+
+class _LanguageRegistry(object):
+ def __init__(self):
+ self._register = {}
+ for language, module in code_evaluators.iteritems():
+ self._register[language] = None
+
+ # Public Protocol ##########
+ def get_class(self, language):
+ """ Get the code evaluator class for the given language """
+ if not self._register.get(language):
+ self._register[language] = code_evaluators.get(language)
+
+ cls = self._register[language]
+ module_name, class_name = cls.rsplit(".", 1)
+ # load the module, will raise ImportError if module cannot be loaded
+ get_module = importlib.import_module(module_name)
+ # get the class, will raise AttributeError if class cannot be found
+ get_class = getattr(get_module, class_name)
+ return get_class
+
+ def register(self, language, class_name):
+ """ Register a new code evaluator class for language"""
+ self._register[language] = class_name
+
diff --git a/testapp/exam/models.py b/testapp/exam/models.py
index 72fb51b..c5043dc 100644
--- a/testapp/exam/models.py
+++ b/testapp/exam/models.py
@@ -1,4 +1,5 @@
import datetime
+import json
from random import sample, shuffle
from django.db import models
from django.contrib.auth.models import User
@@ -19,8 +20,8 @@ class Profile(models.Model):
languages = (
("python", "Python"),
("bash", "Bash"),
- ("C", "C Language"),
- ("C++", "C++ Language"),
+ ("c", "C Language"),
+ ("cpp", "C++ Language"),
("java", "Java Language"),
("scilab", "Scilab"),
)
@@ -59,9 +60,13 @@ class Question(models.Model):
# Number of points for the question.
points = models.FloatField(default=1.0)
- # Test cases for the question in the form of code that is run.
+ # Answer for MCQs.
test = models.TextField(blank=True)
+ # Test cases file paths (comma seperated for reference code path and test case code path)
+ # Applicable for CPP, C, Java and Scilab
+ ref_code_path = models.TextField(blank=True)
+
# Any multiple choice options. Place one option per line.
options = models.TextField(blank=True)
@@ -82,6 +87,41 @@ class Question(models.Model):
# Tags for the Question.
tags = TaggableManager()
+ def consolidate_answer_data(self, test_cases, user_answer):
+ test_case_data_dict = []
+ question_info_dict = {}
+
+ for test_case in test_cases:
+ kw_args_dict = {}
+ pos_args_list = []
+
+ test_case_data = {}
+ test_case_data['test_id'] = test_case.id
+ test_case_data['func_name'] = test_case.func_name
+ test_case_data['expected_answer'] = test_case.expected_answer
+
+ if test_case.kw_args:
+ for args in test_case.kw_args.split(","):
+ arg_name, arg_value = args.split("=")
+ kw_args_dict[arg_name.strip()] = arg_value.strip()
+
+ if test_case.pos_args:
+ for args in test_case.pos_args.split(","):
+ pos_args_list.append(args.strip())
+
+ test_case_data['kw_args'] = kw_args_dict
+ test_case_data['pos_args'] = pos_args_list
+ test_case_data_dict.append(test_case_data)
+
+ # question_info_dict['language'] = self.language
+ question_info_dict['id'] = self.id
+ question_info_dict['user_answer'] = user_answer
+ question_info_dict['test_parameter'] = test_case_data_dict
+ question_info_dict['ref_code_path'] = self.ref_code_path
+ question_info_dict['test'] = self.test
+
+ return json.dumps(question_info_dict)
+
def __unicode__(self):
return self.summary
@@ -396,3 +436,20 @@ class AssignmentUpload(models.Model):
user = models.ForeignKey(Profile)
assignmentQuestion = models.ForeignKey(Question)
assignmentFile = models.FileField(upload_to=get_assignment_dir)
+
+
+################################################################################
+class TestCase(models.Model):
+ question = models.ForeignKey(Question, blank=True, null = True)
+
+ # Test case function name
+ func_name = models.CharField(blank=True, null = True, max_length=200)
+
+ # Test case Keyword arguments in dict form
+ kw_args = models.TextField(blank=True, null = True)
+
+ # Test case Positional arguments in list form
+ pos_args = models.TextField(blank=True, null = True)
+
+ # Test case Expected answer in list form
+ expected_answer = models.TextField(blank=True, null = True)
diff --git a/testapp/exam/python_code_evaluator.py b/testapp/exam/python_code_evaluator.py
new file mode 100644
index 0000000..0c473cf
--- /dev/null
+++ b/testapp/exam/python_code_evaluator.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+import sys
+import traceback
+import os
+from os.path import join
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class PythonCodeEvaluator(CodeEvaluator):
+ """Tests the Python code obtained from Code Server"""
+ # Private Protocol ##########
+ def _check_code(self):
+ success = False
+
+ try:
+ tb = None
+ test_code = self._create_test_case()
+ submitted = compile(self.user_answer, '<string>', mode='exec')
+ g = {}
+ exec submitted in g
+ _tests = compile(test_code, '<string>', mode='exec')
+ exec _tests in g
+ except AssertionError:
+ type, value, tb = sys.exc_info()
+ info = traceback.extract_tb(tb)
+ fname, lineno, func, text = info[-1]
+ text = str(test_code).splitlines()[lineno-1]
+ err = "{0} {1} in: {2}".format(type.__name__, str(value), text)
+ else:
+ success = True
+ err = 'Correct answer'
+
+ del tb
+ return success, err
+
+ def _create_test_case(self):
+ """
+ Create assert based test cases in python
+ """
+ test_code = ""
+ if self.test:
+ return self.test
+ elif self.test_case_data:
+ for test_case in self.test_case_data:
+ pos_args = ", ".join(str(i) for i in test_case.get('pos_args')) \
+ if test_case.get('pos_args') else ""
+ kw_args = ", ".join(str(k+"="+a) for k, a
+ in test_case.get('kw_args').iteritems()) \
+ if test_case.get('kw_args') else ""
+ args = pos_args + ", " + kw_args if pos_args and kw_args \
+ else pos_args or kw_args
+ function_name = test_case.get('func_name')
+ expected_answer = test_case.get('expected_answer')
+
+ tcode = "assert {0}({1}) == {2}".format(function_name, args,
+ expected_answer)
+ test_code += tcode + "\n"
+ return test_code
diff --git a/testapp/exam/scilab_code_evaluator.py b/testapp/exam/scilab_code_evaluator.py
new file mode 100644
index 0000000..53640cc
--- /dev/null
+++ b/testapp/exam/scilab_code_evaluator.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+import traceback
+import os
+from os.path import join, isfile
+import subprocess
+import re
+import importlib
+
+# local imports
+from code_evaluator import CodeEvaluator
+
+
+class ScilabCodeEvaluator(CodeEvaluator):
+ """Tests the Scilab code obtained from Code Server"""
+ def __init__(self, test_case_data, test, language, user_answer,
+ ref_code_path=None, in_dir=None):
+ super(ScilabCodeEvaluator, self).__init__(test_case_data, test,
+ language, user_answer,
+ ref_code_path, in_dir)
+ self.submit_path = self.create_submit_code_file('function.sci')
+ self.test_case_args = self._setup()
+
+ # Private Protocol ##########
+ def _setup(self):
+ super(ScilabCodeEvaluator, self)._setup()
+
+ ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path)
+
+ return ref_path, # Return as a tuple
+
+ def _teardown(self):
+ # Delete the created file.
+ super(ScilabCodeEvaluator, self)._teardown()
+ os.remove(self.submit_path)
+
+ def _check_code(self, ref_path):
+ success = False
+
+ cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(ref_path)
+ cmd += ' | timeout 8 scilab-cli -nb'
+ ret = self._run_command(cmd,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc, stdout, stderr = ret
+
+ # Get only the error.
+ stderr = self._get_error(stdout)
+ if stderr is None:
+ # Clean output
+ stdout = self._strip_output(stdout)
+ if proc.returncode == 5:
+ success, err = True, "Correct answer"
+ else:
+ err = add_err + stdout
+ else:
+ err = add_err + stderr
+
+ return success, err
+
+ def _remove_scilab_exit(self, string):
+ """
+ Removes exit, quit and abort from the scilab code
+ """
+ new_string = ""
+ i = 0
+ for line in string.splitlines():
+ new_line = re.sub(r"exit.*$", "", line)
+ new_line = re.sub(r"quit.*$", "", new_line)
+ new_line = re.sub(r"abort.*$", "", new_line)
+ if line != new_line:
+ i = i + 1
+ new_string = new_string + '\n' + new_line
+ return new_string, i
+
+ def _get_error(self, string):
+ """
+ Fetches only the error from the string.
+ Returns None if no error.
+ """
+ obj = re.search("!.+\n.+", string)
+ if obj:
+ return obj.group()
+ return None
+
+ def _strip_output(self, out):
+ """
+ Cleans whitespace from the output
+ """
+ strip_out = "Message"
+ for l in out.split('\n'):
+ if l.strip():
+ strip_out = strip_out+"\n"+l.strip()
+ return strip_out
+
diff --git a/testapp/scilab_files/test_add.sce b/testapp/exam/scilab_files/test_add.sce
index a317cdb..a317cdb 100644
--- a/testapp/scilab_files/test_add.sce
+++ b/testapp/exam/scilab_files/test_add.sce
diff --git a/testapp/exam/settings.py b/testapp/exam/settings.py
index 682516f..93f90a9 100644
--- a/testapp/exam/settings.py
+++ b/testapp/exam/settings.py
@@ -18,3 +18,11 @@ SERVER_TIMEOUT = 2
# reason set this to the root you have to serve at. In the above example
# host.org/foo/exam set URL_ROOT='/foo'
URL_ROOT = ''
+
+code_evaluators = {"python": "python_code_evaluator.PythonCodeEvaluator",
+ "c": "c_cpp_code_evaluator.CCPPCodeEvaluator",
+ "cpp": "c_cpp_code_evaluator.CCPPCodeEvaluator",
+ "java": "java_evaluator.JavaCodeEvaluator",
+ "bash": "bash_evaluator.BashCodeEvaluator",
+ "scilab": "scilab_evaluator.ScilabCodeEvaluator",
+ }
diff --git a/testapp/exam/static/exam/js/add_question.js b/testapp/exam/static/exam/js/add_question.js
index 267cdb2..946c139 100644
--- a/testapp/exam/static/exam/js/add_question.js
+++ b/testapp/exam/static/exam/js/add_question.js
@@ -153,8 +153,7 @@ function textareaformat()
if(value == 'mcq' || value == 'mcc')
{
document.getElementById('id_options').style.visibility='visible';
- document.getElementById('label_option').innerHTML="Options :"
-
+ document.getElementById('label_option').innerHTML="Options :";
}
else
{
@@ -168,7 +167,6 @@ function textareaformat()
{
document.getElementById('id_options').style.visibility='visible';
document.getElementById('label_option').innerHTML="Options :"
-
}
else
{
@@ -189,8 +187,9 @@ function autosubmit()
if(type.value == 'select')
{
type.style.border = 'solid red';
- return false;
- }
+ return false;
+ }
+
if (type.value == 'mcq' || type.value == 'mcc')
{
diff --git a/testapp/exam/templates/exam/add_question.html b/testapp/exam/templates/exam/add_question.html
index b0b22b1..43f09e1 100644
--- a/testapp/exam/templates/exam/add_question.html
+++ b/testapp/exam/templates/exam/add_question.html
@@ -27,14 +27,25 @@
<tr><td>Points:<td><button class="btn-mini" type="button" onClick="increase(frm);">+</button>{{ form.points }}<button class="btn-mini" type="button" onClick="decrease(frm);">-</button>{{ form.points.errors }}
<tr><td><strong>Rendered: </strong><td><p id='my'></p>
<tr><td>Description: <td>{{ form.description}} {{form.description.errors}}
- <tr><td>Test: <td>{{ form.test }}{{form.test.errors}}
<tr><td>Snippet: <td>{{ form.snippet }}{{ form.snippet.errors }}</td></tD></td></tr>
<tr><td>Tags: <td>{{ form.tags }}
- <tr><td id='label_option'>Options: <td>{{ form.options }} {{form.options.errors}}
-
-
+ <tr><td id='label_option'>Options: <td>{{ form.options }} {{form.options.errors}}
+ <tr><td id='label_solution'>Test: <td>{{ form.solution }} {{form.solution.errors}}
+ <tr><td id='label_ref_code_path'>Reference Code Path: <td>{{ form.ref_code_path }} {{form.ref_code_path.errors}}
+
+ <form method="post" action="">
+ {% if formset%}
+ {{ formset.management_form }}
+ {% for form in formset %}
+ {{ form }}
+ {% endfor %}
+ {% endif %}
+ </form>
</table></center>
- <center><button class="btn" type="submit" name="savequestion">Save</button>
+ <center><button class="btn" type="submit" name="add_test">Add Test Case</button>
+ <button class="btn" type="submit" name="delete_test">Remove Test Case</button>
+ </center><br>
+ <center><button class="btn" type="submit" name="save_question">Save</button>
<button class="btn" type="button" name="button" onClick='location.replace("{{URL_ROOT}}/exam/manage/questions/");'>Cancel</button> </center>
</form>
{% endblock %}
diff --git a/testapp/exam/templates/exam/edit_question.html b/testapp/exam/templates/exam/edit_question.html
index b28cc3e..6deca4a 100644
--- a/testapp/exam/templates/exam/edit_question.html
+++ b/testapp/exam/templates/exam/edit_question.html
@@ -21,29 +21,37 @@
<table>
- {% for form in forms %}
+ {% for question, test in data_list %}
- <tr><td height=10><a id='a{{forloop.counter}}' onClick="data('contentDiv{{forloop.counter}}','myContent{{forloop.counter}}','a{{forloop.counter}}','{{form.summary.value}}');" style='cursor:pointer;'>{{form.summary.value}}</a>
-
+ <tr><td height=10><a id='a{{forloop.counter}}' onClick="data('contentDiv{{forloop.counter}}','myContent{{forloop.counter}}','a{{forloop.counter}}','{{question.summary.value}}');" style='cursor:pointer;'>{{question.summary.value}}</a>
+
<div id="contentDiv{{forloop.counter}}" style="display:none;">
<div id="myContent{{forloop.counter}}" style="display: none;">
-
- <center><table class=span1>
- <tr><td><b>Summary:</b> <td>{{ form.summary }}{{ form.summary.errors }}
- <tr><td><b> Language: </b><td> {{form.language}}{{form.language.errors}}
- <tr><td><b> Active: </b><td>&nbsp; {{ form.active }}{{form.active.errors}} &nbsp; Type: &nbsp;{{ form.type }}{{form.type.errors}}
- <tr><td><b>Points:<td><button class="btn-mini" name={{forloop.counter}} type="button" onClick="increase(frm,{{forloop.counter}});">+</button>{{ form.points }}<button class="btn-mini" type="button" onClick="decrease(frm,{{forloop.counter}});">-</button>{{ form.points.errors }}
+
+ <center><table class=span1>
+ <tr><td><b>Summary:</b> <td>{{ question.summary }}{{ question.summary.errors }}
+ <tr><td><b> Language: </b><td> {{question.language}}{{question.language.errors}}
+ <tr><td><b> Active: </b><td>&nbsp; {{ question.active }}{{question.active.errors}} &nbsp; Type: &nbsp;{{ question.type }}{{question.type.errors}}
+ <tr><td><b>Points:<td><button class="btn-mini" name={{forloop.counter}} type="button" onClick="increase(frm,{{forloop.counter}});">+</button>{{ question.points }}<button class="btn-mini" type="button" onClick="decrease(frm,{{forloop.counter}});">-</button>{{ question.points.errors }}
<tr><td><strong>Rendered: </strong><td><p id='my{{forloop.counter}}'></p>
- <tr><td><b>Description: <td>{{ form.description }} {{form.description.errors}}
- <tr><td><b>Test: <td>{{ form.test }}{{form.test.errors}}
- <tr><td><b>Snippet: <td>{{ form.snippet }}{{ form.snippet.errors }}</td></b></td></tr>
- <tr><td><b>Tags: </b><td>{{ form.tags }}
- <tr><td id='label_option{{forloop.counter}}'><b>Options:<td>{{ form.options }} {{form.options.errors}} {{form.options.helptext}}
+ <tr><td><b>Description: <td>{{ question.description }}
+ {{question.description.errors}} <tr><td><b>Test: <td>
+ {{ question.test }}{{question.test.errors}}
+ <tr><td><b>Snippet: <td>{{ question.snippet }}{{ question.snippet.errors }}
+ </td></b></td></tr>
+ <tr><td><b>Tags: </b><td>{{ question.tags }}
+ <tr><td id='label_option{{forloop.counter}}'><b>Options:<td>{{ question.options }}
+ {{question.options.errors}} {{question.options.helptext}}
+ </table></center>
+ <center><table class=span1>
+ {{ test }}
</table></center>
</div>
</div>
{% endfor %}
</table></center>
+
+
{% for i in data %}
<input type=hidden name='questions' value="{{ i }}" />
{% endfor %}
diff --git a/testapp/exam/tests.py b/testapp/exam/tests.py
index d76e4f8..ff48c25 100644
--- a/testapp/exam/tests.py
+++ b/testapp/exam/tests.py
@@ -1,8 +1,7 @@
from django.utils import unittest
from exam.models import User, Profile, Question, Quiz, QuestionPaper,\
- QuestionSet, AnswerPaper, Answer
-import datetime
-
+ QuestionSet, AnswerPaper, Answer, TestCase
+import datetime, json
def setUpModule():
# create user profile
@@ -51,12 +50,31 @@ class ProfileTestCases(unittest.TestCase):
class QuestionTestCases(unittest.TestCase):
def setUp(self):
# Single question details
+ # self.question = Question(summary='Demo question', language='Python',
+ # type='Code', active=True,
+ # description='Write a function', points=1.0,
+ # test='Test Cases', snippet='def myfunc()')
self.question = Question(summary='Demo question', language='Python',
type='Code', active=True,
description='Write a function', points=1.0,
- test='Test Cases', snippet='def myfunc()')
+ snippet='def myfunc()')
self.question.save()
self.question.tags.add('python', 'function')
+ self.testcase = TestCase(question=self.question,
+ func_name='def myfunc', kw_args='a=10,b=11',
+ pos_args='12,13', expected_answer='15')
+ answer_data = {"user_answer": "demo_answer",
+ "test_parameter": [{"func_name": "def myfunc",
+ "expected_answer": "15",
+ "test_id": self.testcase.id,
+ "pos_args": ["12", "13"],
+ "kw_args": {"a": "10",
+ "b": "11"}
+ }],
+ "id": self.question.id,
+ "language": "Python"}
+ self.answer_data_json = json.dumps(answer_data)
+ self.user_answer = "demo_answer"
def test_question(self):
""" Test question """
@@ -67,13 +85,40 @@ class QuestionTestCases(unittest.TestCase):
self.assertEqual(self.question.description, 'Write a function')
self.assertEqual(self.question.points, 1.0)
self.assertTrue(self.question.active)
- self.assertEqual(self.question.test, 'Test Cases')
self.assertEqual(self.question.snippet, 'def myfunc()')
tag_list = []
for tag in self.question.tags.all():
tag_list.append(tag.name)
self.assertEqual(tag_list, ['python', 'function'])
+ def test_consolidate_answer_data(self):
+ """ Test consolidate_answer_data function """
+ result = self.question.consolidate_answer_data([self.testcase],
+ self.user_answer)
+ self.assertEqual(result, self.answer_data_json)
+
+
+
+###############################################################################
+class TestCaseTestCases(unittest.TestCase):
+ def setUp(self):
+ self.question = Question(summary='Demo question', language='Python',
+ type='Code', active=True,
+ description='Write a function', points=1.0,
+ snippet='def myfunc()')
+ self.question.save()
+ self.testcase = TestCase(question=self.question,
+ func_name='def myfunc', kw_args='a=10,b=11',
+ pos_args='12,13', expected_answer='15')
+
+ def test_testcase(self):
+ """ Test question """
+ self.assertEqual(self.testcase.question, self.question)
+ self.assertEqual(self.testcase.func_name, 'def myfunc')
+ self.assertEqual(self.testcase.kw_args, 'a=10,b=11')
+ self.assertEqual(self.testcase.pos_args, '12,13')
+ self.assertEqual(self.testcase.expected_answer, '15')
+
###############################################################################
class QuizTestCases(unittest.TestCase):
diff --git a/testapp/exam/views.py b/testapp/exam/views.py
index 11aca06..5b7baac 100644
--- a/testapp/exam/views.py
+++ b/testapp/exam/views.py
@@ -14,12 +14,13 @@ from django.db.models import Sum
from django.views.decorators.csrf import csrf_exempt
from taggit.models import Tag
from itertools import chain
+import json
# Local imports.
from testapp.exam.models import Quiz, Question, QuestionPaper, QuestionSet
-from testapp.exam.models import Profile, Answer, AnswerPaper, User
+from testapp.exam.models import Profile, Answer, AnswerPaper, User, TestCase
from testapp.exam.forms import UserRegisterForm, UserLoginForm, QuizForm,\
- QuestionForm, RandomQuestionForm
-from testapp.exam.xmlrpc_clients import code_server
+ QuestionForm, RandomQuestionForm, TestCaseFormSet
+from exam.xmlrpc_clients import code_server
from settings import URL_ROOT
from testapp.exam.models import AssignmentUpload
@@ -281,16 +282,14 @@ def edit_quiz(request):
def edit_question(request):
- """Edit the list of questions seleted by the user for editing."""
+ """Edit the list of questions selected by the user for editing."""
user = request.user
if not user.is_authenticated() or not is_moderator(user):
raise Http404('You are not allowed to view this page!')
-
question_list = request.POST.getlist('questions')
summary = request.POST.getlist('summary')
description = request.POST.getlist('description')
points = request.POST.getlist('points')
- test = request.POST.getlist('test')
options = request.POST.getlist('options')
type = request.POST.getlist('type')
active = request.POST.getlist('active')
@@ -298,14 +297,21 @@ def edit_question(request):
snippet = request.POST.getlist('snippet')
for j, question_id in enumerate(question_list):
question = Question.objects.get(id=question_id)
+ test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=question)
+ if test_case_formset.is_valid():
+ test_case_instance = test_case_formset.save(commit=False)
+ for i in test_case_instance:
+ i.save()
+
question.summary = summary[j]
question.description = description[j]
question.points = points[j]
- question.test = test[j]
question.options = options[j]
question.active = active[j]
question.language = language[j]
question.snippet = snippet[j]
+ question.ref_code_path = ref_code_path[j]
+ question.test = test[j]
question.type = type[j]
question.save()
return my_redirect("/exam/manage/questions")
@@ -314,6 +320,16 @@ def edit_question(request):
def add_question(request, question_id=None):
"""To add a new question in the database.
Create a new question and store it."""
+
+ def add_or_delete_test_form(post_request, instance):
+ request_copy = post_request.copy()
+ if 'add_test' in post_request:
+ request_copy['test-TOTAL_FORMS'] = int(request_copy['test-TOTAL_FORMS']) + 1
+ elif 'delete_test' in post_request:
+ request_copy['test-TOTAL_FORMS'] = int(request_copy['test-TOTAL_FORMS']) - 1
+ test_case_formset = TestCaseFormSet(request_copy, prefix='test', instance=instance)
+ return test_case_formset
+
user = request.user
ci = RequestContext(request)
if not user.is_authenticated() or not is_moderator(user):
@@ -321,44 +337,88 @@ def add_question(request, question_id=None):
if request.method == "POST":
form = QuestionForm(request.POST)
if form.is_valid():
- data = form.cleaned_data
if question_id is None:
- form.save()
- question = Question.objects.order_by("-id")[0]
- tags = form['tags'].data.split(',')
- for i in range(0, len(tags)-1):
- tag = tags[i].strip()
- question.tags.add(tag)
- return my_redirect("/exam/manage/questions")
+ test_case_formset = add_or_delete_test_form(request.POST, form.save(commit=False))
+ if 'save_question' in request.POST:
+ qtn = form.save(commit=False)
+ test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=qtn)
+ form.save()
+ question = Question.objects.order_by("-id")[0]
+ tags = form['tags'].data.split(',')
+ for i in range(0, len(tags)-1):
+ tag = tags[i].strip()
+ question.tags.add(tag)
+ if test_case_formset.is_valid():
+ test_case_formset.save()
+ else:
+ return my_render_to_response('exam/add_question.html',
+ {'form': form,
+ 'formset': test_case_formset},
+ context_instance=ci)
+
+ return my_redirect("/exam/manage/questions")
+
+ return my_render_to_response('exam/add_question.html',
+ {'form': form,
+ 'formset': test_case_formset},
+ context_instance=ci)
+
else:
d = Question.objects.get(id=question_id)
- d.summary = form['summary'].data
- d.description = form['description'].data
- d.points = form['points'].data
- d.test = form['test'].data
- d.options = form['options'].data
- d.type = form['type'].data
- d.active = form['active'].data
- d.language = form['language'].data
- d.snippet = form['snippet'].data
- d.save()
- question = Question.objects.get(id=question_id)
- for tag in question.tags.all():
- question.tags.remove(tag)
- tags = form['tags'].data.split(',')
- for i in range(0, len(tags)-1):
- tag = tags[i].strip()
- question.tags.add(tag)
- return my_redirect("/exam/manage/questions")
+ test_case_formset = add_or_delete_test_form(request.POST, d)
+ if 'save_question' in request.POST:
+ d.summary = form['summary'].data
+ d.description = form['description'].data
+ d.points = form['points'].data
+ d.options = form['options'].data
+ d.type = form['type'].data
+ d.active = form['active'].data
+ d.language = form['language'].data
+ d.snippet = form['snippet'].data
+ d.ref_code_path = form['ref_code_path'].data
+ d.test = form['test'].data
+ d.save()
+ question = Question.objects.get(id=question_id)
+ for tag in question.tags.all():
+ question.tags.remove(tag)
+ tags = form['tags'].data.split(',')
+ for i in range(0, len(tags)-1):
+ tag = tags[i].strip()
+ question.tags.add(tag)
+
+ test_case_formset = TestCaseFormSet(request.POST, prefix='test', instance=question)
+ if test_case_formset.is_valid():
+ test_case_instance = test_case_formset.save(commit=False)
+ for i in test_case_instance:
+ i.save()
+ else:
+ return my_render_to_response('exam/add_question.html',
+ {'form': form,
+ 'formset': test_case_formset},
+ context_instance=ci)
+
+
+ return my_redirect("/exam/manage/questions")
+ return my_render_to_response('exam/add_question.html',
+ {'form': form,
+ 'formset': test_case_formset},
+ context_instance=ci)
+
else:
+ test_case_formset = add_or_delete_test_form(request.POST, form.save(commit=False))
return my_render_to_response('exam/add_question.html',
- {'form': form},
+ {'form': form,
+ 'formset': test_case_formset},
context_instance=ci)
else:
+ form = QuestionForm()
+ test_case_formset = TestCaseFormSet(prefix='test', instance=Question())
if question_id is None:
form = QuestionForm()
+ test_case_formset = TestCaseFormSet(prefix='test', instance=Question())
return my_render_to_response('exam/add_question.html',
- {'form': form},
+ {'form': form,
+ 'formset': test_case_formset},
context_instance=ci)
else:
d = Question.objects.get(id=question_id)
@@ -366,12 +426,13 @@ def add_question(request, question_id=None):
form.initial['summary'] = d.summary
form.initial['description'] = d.description
form.initial['points'] = d.points
- form.initial['test'] = d.test
form.initial['options'] = d.options
form.initial['type'] = d.type
form.initial['active'] = d.active
form.initial['language'] = d.language
form.initial['snippet'] = d.snippet
+ form.initial['ref_code_path'] = d.ref_code_path
+ form.initial['test'] = d.test
form_tags = d.tags.all()
form_tags_split = form_tags.values('name')
initial_tags = ""
@@ -380,8 +441,13 @@ def add_question(request, question_id=None):
if (initial_tags == ","):
initial_tags = ""
form.initial['tags'] = initial_tags
+
+ test_case_formset = TestCaseFormSet(prefix='test',
+ instance=d)
+
return my_render_to_response('exam/add_question.html',
- {'form': form},
+ {'form': form,
+ 'formset': test_case_formset},
context_instance=ci)
@@ -848,6 +914,10 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
if not user.is_authenticated() or paper.end_time < datetime.datetime.now():
return my_redirect('/exam/login/')
question = get_object_or_404(Question, pk=q_id)
+ q_paper = QuestionPaper.objects.get(id=questionpaper_id)
+ paper = AnswerPaper.objects.get(user=request.user, question_paper=q_paper)
+ test_cases = TestCase.objects.filter(question=question)
+
snippet_code = request.POST.get('snippet')
user_code = request.POST.get('answer')
skip = request.POST.get('skip', None)
@@ -876,7 +946,8 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
assign.save()
user_answer = 'ASSIGNMENT UPLOADED'
else:
- user_answer = snippet_code + "\n" + user_code
+ user_code = request.POST.get('answer')
+ user_answer = snippet_code + "\n" + user_code if snippet_code else user_code
new_answer = Answer(question=question, answer=user_answer,
correct=False)
@@ -887,18 +958,20 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
# questions, we obtain the results via XML-RPC with the code executed
# safely in a separate process (the code_server.py) running as nobody.
if not question.type == 'upload':
- correct, success, err_msg = validate_answer(user, user_answer, question)
+ json_data = question.consolidate_answer_data(test_cases, user_answer) \
+ if question.type == 'code' else None
+ correct, result = validate_answer(user, user_answer, question, json_data)
if correct:
new_answer.correct = correct
new_answer.marks = question.points
- new_answer.error = err_msg
+ new_answer.error = result.get('error')
success_msg = True
else:
- new_answer.error = err_msg
+ new_answer.error = result.get('error')
new_answer.save()
time_left = paper.time_left()
- if not success: # Should only happen for non-mcq questions.
+ if not result.get('success'): # Should only happen for non-mcq questions.
if time_left == 0:
reason = 'Your time is up!'
return complete(request, reason, attempt_num, questionpaper_id)
@@ -913,8 +986,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
if old_answer:
old_answer[0].answer = user_code
old_answer[0].save()
- context = {'question': question, 'questions': questions,
- 'error_message': err_msg,
+ context = {'question': question, 'error_message': result.get('error'),
'paper': paper, 'last_attempt': user_code,
'quiz_name': paper.question_paper.quiz.description,
'time_left': time_left, 'to_attempt': to_attempt,
@@ -933,7 +1005,7 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
questionpaper_id, success_msg)
-def validate_answer(user, user_answer, question):
+def validate_answer(user, user_answer, question, json_data=None):
"""
Checks whether the answer submitted by the user is right or wrong.
If right then returns correct = True, success and
@@ -942,9 +1014,9 @@ def validate_answer(user, user_answer, question):
only one attempt are allowed for them.
For code questions success is True only if the answer is correct.
"""
- success = True
+
+ result = {'success': True, 'error': 'Incorrect answer'}
correct = False
- message = 'Incorrect answer'
if user_answer is not None:
if question.type == 'mcq':
@@ -958,11 +1030,12 @@ def validate_answer(user, user_answer, question):
message = 'Correct answer'
elif question.type == 'code':
user_dir = get_user_dir(user)
- success, message = code_server.run_code(user_answer, question.test,
- user_dir, question.language)
- if success:
+ json_result = code_server.run_code(question.language, json_data, user_dir)
+ result = json.loads(json_result)
+ if result.get('success'):
correct = True
- return correct, success, message
+
+ return correct, result
def quit(request, attempt_num=None, questionpaper_id=None):
@@ -1167,18 +1240,20 @@ def show_all_questions(request):
data = request.POST.getlist('question')
forms = []
+ formsets = []
for j in data:
d = Question.objects.get(id=j)
form = QuestionForm()
form.initial['summary'] = d.summary
form.initial['description'] = d.description
form.initial['points'] = d.points
- form.initial['test'] = d.test
form.initial['options'] = d.options
form.initial['type'] = d.type
form.initial['active'] = d.active
form.initial['language'] = d.language
form.initial['snippet'] = d.snippet
+ form.initial['ref_code_path'] = d.ref_code_path
+ form.initial['test'] = d.test
form_tags = d.tags.all()
form_tags_split = form_tags.values('name')
initial_tags = ""
@@ -1188,8 +1263,13 @@ def show_all_questions(request):
initial_tags = ""
form.initial['tags'] = initial_tags
forms.append(form)
+ test_case_formset = TestCaseFormSet(prefix='test', instance=d)
+ formsets.append(test_case_formset)
+ data_list = zip(forms, formsets)
+
return my_render_to_response('exam/edit_question.html',
- {'forms': forms, 'data': data},
+ {'data': data,
+ 'data_list': data_list},
context_instance=ci)
else:
questions = Question.objects.all()
diff --git a/testapp/exam/xmlrpc_clients.py b/testapp/exam/xmlrpc_clients.py
index 14ebf27..8f5642e 100644
--- a/testapp/exam/xmlrpc_clients.py
+++ b/testapp/exam/xmlrpc_clients.py
@@ -21,15 +21,8 @@ class CodeServerProxy(object):
def __init__(self):
pool_url = 'http://localhost:%d' % (SERVER_POOL_PORT)
self.pool_server = ServerProxy(pool_url)
- self.methods = {"python": 'run_python_code',
- "bash": 'run_bash_code',
- "C": "run_c_code",
- "C++": "run_cplus_code",
- "java": "run_java_code",
- "scilab": "run_scilab_code",
- }
- def run_code(self, answer, test_code, user_dir, language):
+ def run_code(self, language, json_data, user_dir):
"""Tests given code (`answer`) with the `test_code` supplied. If the
optional `in_dir` keyword argument is supplied it changes the directory
to that directory (it does not change it back to the original when
@@ -38,26 +31,28 @@ class CodeServerProxy(object):
Parameters
----------
- answer : str
- The user's answer for the question.
+ json_data contains;
+ user_answer : str
+ The user's answer for the question.
test_code : str
The test code to check the user code with.
- user_dir : str (directory)
- The directory to run the tests inside.
language : str
The programming language to use.
+ user_dir : str (directory)
+ The directory to run the tests inside.
+
+
Returns
-------
- A tuple: (success, error message).
+ A json string of a dict: {success: success, err: error message}.
"""
- method_name = self.methods[language]
+
try:
server = self._get_server()
- method = getattr(server, method_name)
- result = method(answer, test_code, user_dir)
+ result = server.check_code(language, json_data, user_dir)
except ConnectionError:
- result = [False, 'Unable to connect to any code servers!']
+ result = json.dumps({'success': False, 'error': 'Unable to connect to any code servers!'})
return result
def _get_server(self):
diff --git a/testapp/test_server.py b/testapp/test_server.py
deleted file mode 100644
index d22a022..0000000
--- a/testapp/test_server.py
+++ /dev/null
@@ -1,302 +0,0 @@
-"""Simple test suite for the code server. Running this requires that one start
-up the code server as::
-
- $ sudo ./code_server.py
-
-"""
-from exam.xmlrpc_clients import code_server
-
-
-def check_result(result, check='correct answer'):
- if check != 'correct answer':
- assert result[0] == False
- else:
- assert result[0] == True
- if "unable to connect" in result[1].lower():
- assert result[0], result[1]
- assert check in result[1].lower(), result[1]
-
-def test_python():
- """Test if server runs Python code as expected."""
- src = 'while True: pass'
- result = code_server.run_code(src, '', '/tmp', language="python")
- check_result(result, 'more than ')
- src = 'x = 1'
- result = code_server.run_code(src, 'assert x == 1', '/tmp',
- language="python")
- check_result(result, 'correct answer')
-
- result = code_server.run_code(src, 'assert x == 0', '/tmp',
- language="python")
- check_result(result, 'assertionerror')
-
- src = 'abracadabra'
- result = code_server.run_code(src, 'assert x == 0', '/tmp',
- language="python")
- check_result(result, 'nameerror')
-
-
-def test_c():
- """Test if server runs c code as expected."""
- src = """
- #include<stdiol.h>
- int ad(int a, int b)
- {return a+b;}
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C")
- check_result(result, 'error')
-
- src = """
- int add(int a, int b)
- {return a+b}
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C")
- check_result(result, 'compilation error')
-
- src = """
- int add(int a, int b)
- {while(1>0){}
- return a+b;}
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C")
- check_result(result, 'more than')
-
- src = """
- int add(int a, int b)
- {return a+b;}
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C")
- check_result(result, 'correct answer')
-
- src = """
- #include<stdio.h>
- int add(int a, int b)
- {printf("All Correct");}
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C")
- check_result(result, 'incorrect')
-
-
-def test_cpp():
- """Test if server runs c code as expected."""
- src = """
- int add(int a, int b)
- {
- return a+b
- }
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C++")
- check_result(result, 'error')
-
- src = """
- int add(int a, int b)
- {
- return a+b;
- }
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C++")
- check_result(result, 'correct answer')
-
- src = """
- int dd(int a, int b)
- {
- return a+b;
- }
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C++")
- check_result(result, 'error')
-
- src = """
- int add(int a, int b)
- {
- while(0==0)
- {}
- return a+b;
- }
- """
- result = code_server.run_code(src, 'c_cpp_files/main.cpp',
- '/tmp', language="C++")
- check_result(result, 'more than')
-
-
-def test_java():
- """Test if server runs java code as expected."""
- src = """
- class Test
- {
- int square_num(int a)
- {
- return a*a;
- }
- }
- """
- result = code_server.run_code(src, 'java_files/main_square.java',
- '/tmp', language="java")
- check_result(result, 'correct answer')
-
- src = """
- class Test
- {
- int square_num(int a)
- {
- return b*b;
- }
- }
- """
- result = code_server.run_code(src, 'java_files/main_square.java',
- '/tmp', language="java")
- check_result(result, 'error')
-
- src = """
- class Test
- {
- int square_nu(int a)
- {
- return a*a;
- }
- }
- """
- result = code_server.run_code(src, 'java_files/main_square.java',
- '/tmp', language="java")
- check_result(result, 'error')
-
- src = """
- class Test
- {
- int square_num(int a)
- {
- while(0==0)
- {}
- }
- }
- """
- result = code_server.run_code(src, 'java_files/main_square.java',
- '/tmp', language="java")
- check_result(result, 'more than')
-
- src = """
- class Test
- {
- int square_num(int a)
- {
- return a+b
- }
- }
- """
- result = code_server.run_code(src, 'java_files/main_square.java',
- '/tmp', language="java")
- check_result(result, 'error')
-
- src = """
- class Test
- {
- int square_num(int a)
- {
- return a+b
- """
- result = code_server.run_code(src, 'java_files/main_square.java',
- '/tmp', language="java")
- check_result(result, 'error')
-
-def test_scilab():
- """Test if server runs scilab code as expected."""
- src = """
- funcprot(0)
-function[c]=add(a,b)
- c=a+b;
-endfunction
- """
- result = code_server.run_code(src, 'scilab_files/test_add.sce',
- '/tmp', language="scilab")
- check_result(result, 'correct answer')
-
- src = """
- funcprot(0)
-function[c]=add(a,b)
- c=a-b;
-endfunction
- """
- result = code_server.run_code(src, 'scilab_files/test_add.sce',
- '/tmp', language="scilab")
- check_result(result, 'correct answer')
-
- src = """
- funcprot(0)
-function[c]=add(a,b)
- c=a+b;
-dis(
-endfunction
- """
- result = code_server.run_code(src, 'scilab_files/test_add.sce',
- '/tmp', language="scilab")
- check_result(result, 'error')
-
- src = """
- funcprot(0)
-function[c]=add(a,b)
- c=a
- while(1==1)
- end
-endfunction
- """
- result = code_server.run_code(src, 'scilab_files/test_add.sce',
- '/tmp', language="scilab")
- check_result(result, 'error')
-
-def test_bash():
- """Test if server runs Bash code as expected."""
- src = """
-#!/bin/bash
- [[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))
- """
- result = code_server.run_code(src, 'docs/sample.sh\ndocs/sample.args',
- '/tmp', language="bash")
- check_result(result)
-
- src = """
-#!/bin/bash
- [[ $# -eq 2 ]] && echo $(( $1 - $2 )) && exit $(( $1 - $2 ))
- """
- result = code_server.run_code(src, 'docs/sample.sh\ndocs/sample.args',
- '/tmp', language="bash")
- check_result(result, 'error')
-
- src = """\
-#!/bin/bash
- while [ 1 ] ; do echo "" > /dev/null ; done
- """
- result = code_server.run_code(src, 'docs/sample.sh\ndocs/sample.args',
- '/tmp', language="bash")
- check_result(result, 'more than ')
-
- src = '''
-#!/bin/bash
- while [ 1 ] ; do echo "" > /dev/null
- '''
- result = code_server.run_code(src, 'docs/sample.sh\ndocs/sample.args',
- '/tmp', language="bash")
- check_result(result, 'error')
-
- src = '''# Enter your code here.
-#!/bin/bash
- while [ 1 ] ; do echo "" > /dev/null
- '''
- result = code_server.run_code(src, 'docs/sample.sh\ndocs/sample.args',
- '/tmp', language="bash")
- check_result(result, 'oserror')
-
-if __name__ == '__main__':
- test_python()
- test_bash()
- test_c()
- test_cpp()
- test_java()
- test_scilab()
diff --git a/testapp/tests/__init__.py b/testapp/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/testapp/tests/__init__.py
diff --git a/testapp/tests/test_bash_evaluation.py b/testapp/tests/test_bash_evaluation.py
new file mode 100644
index 0000000..3ae3b0b
--- /dev/null
+++ b/testapp/tests/test_bash_evaluation.py
@@ -0,0 +1,41 @@
+import unittest
+import os
+from exam.bash_code_evaluator import BashCodeEvaluator
+from exam.settings import SERVER_TIMEOUT
+
+class BashEvaluationTestCases(unittest.TestCase):
+ def setUp(self):
+ self.language = "bash"
+ self.ref_code_path = "bash_files/sample.sh,bash_files/sample.args"
+ self.in_dir = "/tmp"
+ self.test_case_data = []
+ self.timeout_msg = ("Code took more than {0} seconds to run. "
+ "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ self.test = None
+
+ def test_correct_answer(self):
+ user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
+ get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertTrue(result.get("success"))
+ self.assertEqual(result.get("error"), "Correct answer")
+
+ def test_error(self):
+ user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 - $2 )) && exit $(( $1 - $2 ))"
+ get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertTrue("Error" in result.get("error"))
+
+ def test_infinite_loop(self):
+ user_answer = "#!/bin/bash\nwhile [ 1 ] ; do echo "" > /dev/null ; done"
+ get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertEquals(result.get("error"), self.timeout_msg)
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file
diff --git a/testapp/tests/test_c_cpp_evaluation.py b/testapp/tests/test_c_cpp_evaluation.py
new file mode 100644
index 0000000..b820963
--- /dev/null
+++ b/testapp/tests/test_c_cpp_evaluation.py
@@ -0,0 +1,77 @@
+import unittest
+import os
+from exam.cpp_code_evaluator import CppCodeEvaluator
+from exam.settings import SERVER_TIMEOUT
+
+class CEvaluationTestCases(unittest.TestCase):
+ def setUp(self):
+ self.language = "C"
+ self.ref_code_path = "c_cpp_files/main.cpp"
+ self.in_dir = "/tmp"
+ self.test_case_data = []
+ self.timeout_msg = ("Code took more than {0} seconds to run. "
+ "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ self.test = None
+
+ def test_correct_answer(self):
+ user_answer = "int add(int a, int b)\n{return a+b;}"
+ get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertTrue(result.get("success"))
+ self.assertEqual(result.get("error"), "Correct answer")
+
+ def test_compilation_error(self):
+ user_answer = "int add(int a, int b)\n{return a+b}"
+ get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertTrue("Compilation Error" in result.get("error"))
+
+ def test_infinite_loop(self):
+ user_answer = "int add(int a, int b)\n{while(1>0){}}"
+ get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertEquals(result.get("error"), self.timeout_msg)
+
+
+###############################################################################
+class CppEvaluationTestCases(unittest.TestCase):
+ def setUp(self):
+ self.language = "CPP"
+ self.ref_code_path = "c_cpp_files/main.cpp"
+ self.in_dir = "/tmp"
+ self.test_case_data = []
+ self.timeout_msg = ("Code took more than {0} seconds to run. "
+ "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ self.test = None
+
+ def test_correct_answer(self):
+ user_answer = "int add(int a, int b)\n{return a+b;}"
+ get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertTrue(result.get("success"))
+ self.assertEqual(result.get("error"), "Correct answer")
+
+ def test_compilation_error(self):
+ user_answer = "int add(int a, int b)\n{return a+b}"
+ get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertTrue("Compilation Error" in result.get("error"))
+
+ def test_infinite_loop(self):
+ user_answer = "int add(int a, int b)\n{while(1>0){}}"
+ get_class = CppCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertEquals(result.get("error"), self.timeout_msg)
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file
diff --git a/testapp/tests/test_code_evaluation.py b/testapp/tests/test_code_evaluation.py
new file mode 100644
index 0000000..fa2b1fa
--- /dev/null
+++ b/testapp/tests/test_code_evaluation.py
@@ -0,0 +1,24 @@
+import unittest
+import os
+from exam import python_code_evaluator
+from exam.language_registry import _LanguageRegistry, set_registry, get_registry
+from exam.settings import SERVER_TIMEOUT
+
+
+class RegistryTestCase(unittest.TestCase):
+ def setUp(self):
+ set_registry()
+ self.registry_object = get_registry()
+ self.language_registry = _LanguageRegistry()
+
+ def test_set_register(self):
+ class_name = getattr(python_code_evaluator, 'PythonCodeEvaluator')
+ self.registry_object.register("python", "exam.python_code_evaluator.PythonCodeEvaluator")
+ self.assertEquals(self.registry_object.get_class("python"), class_name)
+
+ def tearDown(self):
+ self.registry_object = None
+
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file
diff --git a/testapp/tests/test_java_evaluation.py b/testapp/tests/test_java_evaluation.py
new file mode 100644
index 0000000..d86d7b3
--- /dev/null
+++ b/testapp/tests/test_java_evaluation.py
@@ -0,0 +1,41 @@
+import unittest
+import os
+from exam.java_code_evaluator import JavaCodeEvaluator
+from exam.settings import SERVER_TIMEOUT
+
+class JavaEvaluationTestCases(unittest.TestCase):
+ def setUp(self):
+ self.language = "java"
+ self.ref_code_path = "java_files/main_square.java"
+ self.in_dir = "/tmp"
+ self.test_case_data = []
+ self.timeout_msg = ("Code took more than {0} seconds to run. "
+ "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ self.test = None
+
+ def test_correct_answer(self):
+ user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}"
+ get_class = JavaCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertTrue(result.get("success"))
+ self.assertEqual(result.get("error"), "Correct answer")
+
+ def test_error(self):
+ user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a"
+ get_class = JavaCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertTrue("Error" in result.get("error"))
+
+ def test_infinite_loop(self):
+ user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}"
+ get_class = JavaCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertEquals(result.get("error"), self.timeout_msg)
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file
diff --git a/testapp/tests/test_python_evaluation.py b/testapp/tests/test_python_evaluation.py
new file mode 100644
index 0000000..57e111c
--- /dev/null
+++ b/testapp/tests/test_python_evaluation.py
@@ -0,0 +1,54 @@
+
+import unittest
+import os
+from exam.python_code_evaluator import PythonCodeEvaluator
+from exam.settings import SERVER_TIMEOUT
+
+class PythonEvaluationTestCases(unittest.TestCase):
+ def setUp(self):
+ self.language = "Python"
+ self.test = None
+ self.test_case_data = [{"func_name": "add",
+ "expected_answer": "5",
+ "test_id": u'null',
+ "pos_args": ["3", "2"],
+ "kw_args": {}
+ }]
+ self.timeout_msg = ("Code took more than {0} seconds to run. "
+ "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+
+ def test_correct_answer(self):
+ user_answer = "def add(a, b):\n\treturn a + b"""
+ get_class = PythonCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, ref_code_path=None, in_dir=None)
+ result = get_class.evaluate()
+ self.assertTrue(result.get("success"))
+ self.assertEqual(result.get("error"), "Correct answer")
+
+ def test_incorrect_answer(self):
+ user_answer = "def add(a, b):\n\treturn a - b"
+ test_case_data = [{"func_name": "add",
+ "expected_answer": "5",
+ "test_id": u'null',
+ "pos_args": ["3", "2"],
+ "kw_args": {}
+ }]
+ get_class = PythonCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, ref_code_path=None, in_dir=None)
+ result = get_class.evaluate()
+ self.assertFalse(result.get("success"))
+ self.assertEqual(result.get("error"), "AssertionError in: assert add(3, 2) == 5")
+
+ def test_infinite_loop(self):
+ user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"""
+ test_case_data = [{"func_name": "add",
+ "expected_answer": "5",
+ "test_id": u'null',
+ "pos_args": ["3", "2"],
+ "kw_args": {}
+ }]
+ get_class = PythonCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, ref_code_path=None, in_dir=None)
+ result = get_class.evaluate()
+ self.assertFalse(result.get("success"))
+ self.assertEquals(result.get("error"), self.timeout_msg)
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file
diff --git a/testapp/tests/test_scilab_evaluation.py b/testapp/tests/test_scilab_evaluation.py
new file mode 100644
index 0000000..072fff6
--- /dev/null
+++ b/testapp/tests/test_scilab_evaluation.py
@@ -0,0 +1,39 @@
+import unittest
+import os
+from exam.scilab_code_evaluator import ScilabCodeEvaluator
+from exam.settings import SERVER_TIMEOUT
+
+class ScilabEvaluationTestCases(unittest.TestCase):
+ def setUp(self):
+ self.language = "scilab"
+ self.ref_code_path = "scilab_files/test_add.sce"
+ self.in_dir = "/tmp"
+ self.test_case_data = []
+ self.test = None
+
+ def test_correct_answer(self):
+ user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\nendfunction"
+ get_class = ScilabCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertTrue(result.get("success"))
+ self.assertEqual(result.get("error"), "Correct answer")
+
+ def test_correct_answer_2(self):
+ user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a-b;\nendfunction"
+ get_class = ScilabCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertTrue(result.get("success"))
+ self.assertEqual(result.get("error"), "Correct answer")
+
+ def test_error(self):
+ user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\t c=a+b;\ndis(\tendfunction"
+ get_class = ScilabCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
+ result = get_class.evaluate()
+
+ self.assertFalse(result.get("success"))
+ self.assertTrue("Error" in result.get("error"))
+
+if __name__ == '__main__':
+ unittest.main() \ No newline at end of file