summaryrefslogtreecommitdiff
path: root/yaksh
diff options
context:
space:
mode:
authorankitjavalkar2016-05-09 13:00:04 +0530
committerankitjavalkar2016-05-10 11:54:34 +0530
commitc384c60c6d7fb5d30f3f929c518e0b41e084c4c4 (patch)
treed5b937e90bc7d3051b9c9128c4e1560b09db1c2c /yaksh
parentd953f6f9e62671eeb5d6ea6498475167301dfe91 (diff)
downloadonline_test-c384c60c6d7fb5d30f3f929c518e0b41e084c4c4.tar.gz
online_test-c384c60c6d7fb5d30f3f929c518e0b41e084c4c4.tar.bz2
online_test-c384c60c6d7fb5d30f3f929c518e0b41e084c4c4.zip
- Adhere to 80 columns
- add docstrings - Fix further tests
Diffstat (limited to 'yaksh')
-rw-r--r--yaksh/admin.py4
-rw-r--r--yaksh/bash_code_evaluator.py68
-rw-r--r--yaksh/code_evaluator.py5
-rwxr-xr-xyaksh/code_server.py7
-rw-r--r--yaksh/cpp_code_evaluator.py148
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py17
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py7
-rw-r--r--yaksh/evaluator_tests/test_code_evaluation.py35
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py7
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py107
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py15
-rw-r--r--yaksh/java_code_evaluator.py47
-rw-r--r--yaksh/language_registry.py1
-rw-r--r--yaksh/models.py52
-rw-r--r--yaksh/python_stdout_evaluator.py4
-rw-r--r--yaksh/scilab_code_evaluator.py20
-rw-r--r--yaksh/settings.py18
-rw-r--r--yaksh/tests.py155
18 files changed, 463 insertions, 254 deletions
diff --git a/yaksh/admin.py b/yaksh/admin.py
index d223cd4..c31b99b 100644
--- a/yaksh/admin.py
+++ b/yaksh/admin.py
@@ -1,5 +1,5 @@
-from yaksh.models import Question, Quiz, TestCase,\
- StandardTestCase, StdoutBasedTestCase
+from yaksh.models import Question, Quiz
+from yaksh.models import TestCase, StandardTestCase, StdoutBasedTestCase
from django.contrib import admin
admin.site.register(Question)
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index 12177f9..a0af0e2 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -50,41 +50,52 @@ class BashCodeEvaluator(CodeEvaluator):
get_ref_path, get_test_case_path = ref_code_path.strip().split(',')
get_ref_path = get_ref_path.strip()
get_test_case_path = get_test_case_path.strip()
- clean_ref_code_path, clean_test_case_path = self._set_test_code_file_path(get_ref_path,
- get_test_case_path)
+ clean_ref_code_path, clean_test_case_path = \
+ self._set_test_code_file_path(get_ref_path, get_test_case_path)
if not isfile(clean_ref_code_path):
- return False, "No file at %s or Incorrect path" % clean_ref_code_path
+ msg = "No file at %s or Incorrect path" % clean_ref_code_path
+ return False, msg
if not isfile(self.submit_code_path):
- return False, "No file at %s or Incorrect path" % self.submit_code_path
+ msg = "No file at %s or Incorrect path" % self.submit_code_path
+ return False, msg
if not os.access(clean_ref_code_path, os.X_OK):
- return False, "Script %s is not executable" % clean_ref_code_path
+ msg = "Script %s is not executable" % clean_ref_code_path
+ return False, msg
if not os.access(self.submit_code_path, os.X_OK):
- return False, "Script %s is not executable" % self.submit_code_path
+ msg = "Script %s is not executable" % self.submit_code_path
+ return False, msg
success = False
self.write_to_submit_code_file(self.submit_code_path, user_answer)
if clean_test_case_path is None or "":
- ret = self._run_command(clean_ref_code_path, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ ret = self._run_command(clean_ref_code_path,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
proc, inst_stdout, inst_stderr = ret
- ret = self._run_command(self.submit_code_path, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ ret = self._run_command(self.submit_code_path,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
return True, "Correct answer"
else:
err = "Error: expected %s, got %s" % (inst_stderr,
- stdnt_stderr)
+ stdnt_stderr
+ )
return False, err
else:
if not isfile(clean_test_case_path):
- return False, "No test case at %s" % clean_test_case_path
+ msg = "No test case at %s" % clean_test_case_path
+ return False, msg
if not os.access(clean_ref_code_path, os.R_OK):
- return False, "Test script %s, not readable" % clean_test_case_path
+ msg = "Test script %s, not readable" % clean_test_case_path
+ return False, msg
# valid_answer is True, so that we can stop once a test case fails
valid_answer = True
# loop_count has to be greater than or equal to one.
@@ -95,20 +106,27 @@ class BashCodeEvaluator(CodeEvaluator):
for test_case in test_cases:
loop_count += 1
if valid_answer:
- args = [clean_ref_code_path] + [x for x in test_case.split()]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ args = [clean_ref_code_path] + \
+ [x for x in test_case.split()]
+ ret = self._run_command(args,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
proc, inst_stdout, inst_stderr = ret
- args = [self.submit_code_path]+[x for x in test_case.split()]
- ret = self._run_command(args, stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ args = [self.submit_code_path] + \
+ [x for x in test_case.split()]
+ ret = self._run_command(args,
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
proc, stdnt_stdout, stdnt_stderr = ret
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
return True, "Correct answer"
else:
- err = "Error:expected %s, got %s" % (inst_stdout+inst_stderr,
- stdnt_stdout+stdnt_stderr)
+ err = ("Error:expected"
+ " %s, got %s").format(inst_stdout+inst_stderr,
+ stdnt_stdout+stdnt_stderr
+ )
return False, err
diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py
index 1c11c00..aab99eb 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/code_evaluator.py
@@ -89,6 +89,11 @@ class CodeEvaluator(object):
self._change_dir(self.in_dir)
def safe_evaluate(self, user_answer, test_case_data):
+ """
+ Handles code evaluation along with compilation, signal handling
+ and Exception handling
+ """
+
# Add a new signal handler for the execution of this code.
prev_handler = create_signal_handler()
success = False
diff --git a/yaksh/code_server.py b/yaksh/code_server.py
index 2397582..2d8567e 100755
--- a/yaksh/code_server.py
+++ b/yaksh/code_server.py
@@ -62,8 +62,11 @@ class CodeServer(object):
"""Calls relevant EvaluateCode class based on language to check the
answer code
"""
- code_evaluator = create_evaluator_instance(language, test_case_type, json_data,
- in_dir)
+ code_evaluator = create_evaluator_instance(language,
+ test_case_type,
+ json_data,
+ in_dir
+ )
data = unpack_json(json_data)
result = code_evaluator.evaluate(**data)
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index becf371..b869442 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -43,91 +43,103 @@ class CppCodeEvaluator(CodeEvaluator):
return None
else:
ref_code_path = test_case
- clean_ref_code_path, clean_test_case_path = self._set_test_code_file_path(ref_code_path)
+ clean_ref_code_path, clean_test_case_path = \
+ self._set_test_code_file_path(ref_code_path)
if not isfile(clean_ref_code_path):
- return False, "No file at %s or Incorrect path" % clean_ref_code_path
+ msg = "No file at %s or Incorrect path" % clean_ref_code_path
+ return False, msg
if not isfile(self.submit_code_path):
- return False, 'No file at %s or Incorrect path' % self.submit_code_path
+ msg = "No file at %s or Incorrect path" % self.submit_code_path
+ return False, msg
self.write_to_submit_code_file(self.submit_code_path, user_answer)
self.user_output_path, self.ref_output_path = self.set_file_paths()
- self.compile_command, self.compile_main = self.get_commands(clean_ref_code_path, self.user_output_path, self.ref_output_path)
- self.compiled_user_answer = self._run_command(self.compile_command,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
- self.compiled_test_code = self._run_command(self.compile_main,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
+ self.compile_command, self.compile_main = self.get_commands(
+ clean_ref_code_path,
+ self.user_output_path,
+ self.ref_output_path
+ )
+ self.compiled_user_answer = self._run_command(
+ self.compile_command,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+
+ self.compiled_test_code = self._run_command(
+ self.compile_main,
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
return self.compiled_user_answer, self.compiled_test_code
def check_code(self, user_answer, test_case):
- """ Function validates student code using instructor code as
- reference.The first argument ref_code_path, is the path to
- instructor code, it is assumed to have executable permission.
- The second argument submit_code_path, is the path to the student
- code, it is assumed to have executable permission.
-
- Returns
- --------
-
- returns (True, "Correct answer") : If the student function returns
- expected output when called by reference code.
-
- returns (False, error_msg): If the student function fails to return
- expected output when called by reference code.
-
- Returns (False, error_msg): If mandatory arguments are not files or
- if the required permissions are not given to the file(s).
-
- """
- success = False
- proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
- stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
-
- # Only if compilation is successful, the program is executed
- # And tested with testcases
- if stdnt_stderr == '':
- proc, main_out, main_err = self.compiled_test_code
- main_err = self._remove_null_substitute_char(main_err)
-
- if main_err == '':
- ret = self._run_command([self.ref_output_path], stdin=None,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc, stdout, stderr = ret
- if proc.returncode == 0:
- success, err = True, "Correct answer"
- else:
- err = stdout + "\n" + stderr
- os.remove(self.ref_output_path)
+ """ Function validates student code using instructor code as
+ reference.The first argument ref_code_path, is the path to
+ instructor code, it is assumed to have executable permission.
+ The second argument submit_code_path, is the path to the student
+ code, it is assumed to have executable permission.
+
+ Returns
+ --------
+
+ returns (True, "Correct answer") : If the student function returns
+ expected output when called by reference code.
+
+ returns (False, error_msg): If the student function fails to return
+ expected output when called by reference code.
+
+ Returns (False, error_msg): If mandatory arguments are not files or
+ if the required permissions are not given to the file(s).
+ """
+
+ success = False
+ proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
+ stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
+
+ # Only if compilation is successful, the program is executed
+ # And tested with testcases
+ if stdnt_stderr == '':
+ proc, main_out, main_err = self.compiled_test_code
+ main_err = self._remove_null_substitute_char(main_err)
+
+ if main_err == '':
+ ret = self._run_command([self.ref_output_path],
+ stdin=None,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+ proc, stdout, stderr = ret
+ if proc.returncode == 0:
+ success, err = True, "Correct answer"
else:
- err = "Error:"
- try:
- error_lines = main_err.splitlines()
- for e in error_lines:
- if ':' in e:
- err = err + "\n" + e.split(":", 1)[1]
- else:
- err = err + "\n" + e
- except:
- err = err + "\n" + main_err
- os.remove(self.user_output_path)
+ err = stdout + "\n" + stderr
+ os.remove(self.ref_output_path)
else:
- err = "Compilation Error:"
+ err = "Error:"
try:
- error_lines = stdnt_stderr.splitlines()
+ error_lines = main_err.splitlines()
for e in error_lines:
if ':' in e:
err = err + "\n" + e.split(":", 1)[1]
else:
err = err + "\n" + e
except:
- err = err + "\n" + stdnt_stderr
+ err = err + "\n" + main_err
+ os.remove(self.user_output_path)
+ else:
+ err = "Compilation Error:"
+ try:
+ error_lines = stdnt_stderr.splitlines()
+ for e in error_lines:
+ if ':' in e:
+ err = err + "\n" + e.split(":", 1)[1]
+ else:
+ err = err + "\n" + e
+ except:
+ err = err + "\n" + stdnt_stderr
- return success, err
+ return success, err
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index 7c58c43..4ff3e0a 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -5,13 +5,18 @@ from yaksh.settings import SERVER_TIMEOUT
class BashEvaluationTestCases(unittest.TestCase):
def setUp(self):
- self.test_case_data = [{"test_case": "bash_files/sample.sh,bash_files/sample.args"}]
+ self.test_case_data = [
+ {"test_case": "bash_files/sample.sh,bash_files/sample.args"}
+ ]
self.in_dir = "/tmp"
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ "You probably have an infinite loop in your"
+ " code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
- user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
+ user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]]"
+ " && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
+ )
get_class = BashCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data
@@ -21,7 +26,8 @@ class BashEvaluationTestCases(unittest.TestCase):
self.assertEquals(result.get('error'), "Correct answer")
def test_error(self):
- user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 - $2 )) && exit $(( $1 - $2 ))"
+ user_answer = ("#!/bin/bash\n[[ $# -eq 2 ]] "
+ "&& echo $(( $1 - $2 )) && exit $(( $1 - $2 ))")
get_class = BashCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data
@@ -31,7 +37,8 @@ class BashEvaluationTestCases(unittest.TestCase):
self.assertTrue("Error" in result.get("error"))
def test_infinite_loop(self):
- user_answer = "#!/bin/bash\nwhile [ 1 ] ; do echo "" > /dev/null ; done"
+ user_answer = ("#!/bin/bash\nwhile [ 1 ] ;"
+ " do echo "" > /dev/null ; done")
get_class = BashCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index a07150d..71fb843 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -8,14 +8,15 @@ class CEvaluationTestCases(unittest.TestCase):
self.test_case_data = [{"test_case": "c_cpp_files/main.cpp"}]
self.in_dir = "/tmp"
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ "You probably have an infinite loop in your"
+ " code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
user_answer = "int add(int a, int b)\n{return a+b;}"
get_class = CppCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertTrue(result.get('success'))
self.assertEquals(result.get('error'), "Correct answer")
diff --git a/yaksh/evaluator_tests/test_code_evaluation.py b/yaksh/evaluator_tests/test_code_evaluation.py
index ace6115..51c0c51 100644
--- a/yaksh/evaluator_tests/test_code_evaluation.py
+++ b/yaksh/evaluator_tests/test_code_evaluation.py
@@ -2,20 +2,43 @@ import unittest
import os
from yaksh import python_assertion_evaluator
from yaksh.language_registry import _LanguageRegistry, get_registry
-from yaksh.settings import SERVER_TIMEOUT
+from yaksh.settings import SERVER_TIMEOUT, code_evaluators
class RegistryTestCase(unittest.TestCase):
def setUp(self):
self.registry_object = get_registry()
self.language_registry = _LanguageRegistry()
+ assertion_evaluator_path = ("yaksh.python_assertion_evaluator"
+ ".PythonAssertionEvaluator"
+ )
+ stdout_evaluator_path = ("yaksh.python_stdout_evaluator."
+ "PythonStdoutEvaluator"
+ )
+ code_evaluators['python'] = \
+ {"standardtestcase": assertion_evaluator_path,
+ "stdoutbasedtestcase": stdout_evaluator_path
+ }
def test_set_register(self):
- class_name = getattr(python_assertion_evaluator, 'PythonAssertionEvaluator')
- self.registry_object.register("python", {"standardtestcase": "yaksh.python_assertion_evaluator.PythonAssertionEvaluator",
- "stdoutbasedtestcase": "python_stdout_evaluator.PythonStdoutEvaluator"
- })
- self.assertEquals(self.registry_object.get_class("python", "standardtestcase"), class_name)
+ evaluator_class = self.registry_object.get_class("python",
+ "standardtestcase"
+ )
+ assertion_evaluator_path = ("yaksh.python_assertion_evaluator"
+ ".PythonAssertionEvaluator"
+ )
+ stdout_evaluator_path = ("yaksh.python_stdout_evaluator."
+ "PythonStdoutEvaluator"
+ )
+ class_name = getattr(python_assertion_evaluator,
+ 'PythonAssertionEvaluator'
+ )
+ self.registry_object.register("python",
+ {"standardtestcase": assertion_evaluator_path,
+ "stdoutbasedtestcase": stdout_evaluator_path
+ }
+ )
+ self.assertEquals(evaluator_class, class_name)
def tearDown(self):
self.registry_object = None
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index 76a3fcf..801277f 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -7,11 +7,14 @@ from yaksh.settings import SERVER_TIMEOUT
class JavaEvaluationTestCases(unittest.TestCase):
def setUp(self):
- self.test_case_data = [{"test_case": "java_files/main_square.java"}]
+ self.test_case_data = [
+ {"test_case": "java_files/main_square.java"}
+ ]
self.in_dir = "/tmp"
evaluator.SERVER_TIMEOUT = 9
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(evaluator.SERVER_TIMEOUT)
+ "You probably have an infinite loop in"
+ " your code.").format(evaluator.SERVER_TIMEOUT)
def tearDown(self):
evaluator.SERVER_TIMEOUT = 2
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index f6ac0bf..1e867a3 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -9,11 +9,12 @@ from textwrap import dedent
class PythonAssertionEvaluationTestCases(unittest.TestCase):
def setUp(self):
self.test_case_data = [{"test_case": 'assert(add(1,2)==3)'},
- {"test_case": 'assert(add(-1,2)==1)'},
- {"test_case": 'assert(add(-1,-2)==-3)'},
- ]
+ {"test_case": 'assert(add(-1,2)==1)'},
+ {"test_case": 'assert(add(-1,-2)==-3)'},
+ ]
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ "You probably have an infinite loop in"
+ " your code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
user_answer = "def add(a,b):\n\treturn a + b"
@@ -33,14 +34,16 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
}
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get('success'))
- self.assertEqual(result.get('error'), "AssertionError in: assert(add(1,2)==3)")
+ self.assertEqual(result.get('error'),
+ "AssertionError in: assert(add(1,2)==3)"
+ )
def test_infinite_loop(self):
user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
get_class = PythonAssertionEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get('success'))
self.assertEqual(result.get('error'), self.timeout_msg)
@@ -50,12 +53,18 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def add(a, b);
return a + b
""")
- syntax_error_msg = ["Traceback", "call", "File", "line", "<string>",
- "SyntaxError", "invalid syntax"]
+ syntax_error_msg = ["Traceback",
+ "call",
+ "File",
+ "line",
+ "<string>",
+ "SyntaxError",
+ "invalid syntax"
+ ]
get_class = PythonAssertionEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
@@ -68,12 +77,17 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def add(a, b):
return a + b
""")
- indent_error_msg = ["Traceback", "call", "File", "line", "<string>",
- "IndentationError", "indented block"]
+ indent_error_msg = ["Traceback", "call",
+ "File",
+ "line",
+ "<string>",
+ "IndentationError",
+ "indented block"
+ ]
get_class = PythonAssertionEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
@@ -83,11 +97,16 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def test_name_error(self):
user_answer = ""
- name_error_msg = ["Traceback", "call", "NameError", "name", "defined"]
+ name_error_msg = ["Traceback",
+ "call",
+ "NameError",
+ "name",
+ "defined"
+ ]
get_class = PythonAssertionEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
@@ -100,12 +119,15 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def add(a, b):
return add(3, 3)
""")
- recursion_error_msg = ["Traceback", "call", "RuntimeError",
- "maximum recursion depth exceeded"]
+ recursion_error_msg = ["Traceback",
+ "call",
+ "RuntimeError",
+ "maximum recursion depth exceeded"
+ ]
get_class = PythonAssertionEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
@@ -118,11 +140,16 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def add(a):
return a + b
""")
- type_error_msg = ["Traceback", "call", "TypeError", "exactly", "argument"]
+ type_error_msg = ["Traceback",
+ "call",
+ "TypeError",
+ "exactly",
+ "argument"
+ ]
get_class = PythonAssertionEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
err = result.get("error").splitlines()
self.assertFalse(result.get("success"))
@@ -136,7 +163,12 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
c = 'a'
return int(a) + int(b) + int(c)
""")
- value_error_msg = ["Traceback", "call", "ValueError", "invalid literal", "base"]
+ value_error_msg = ["Traceback",
+ "call",
+ "ValueError",
+ "invalid literal",
+ "base"
+ ]
get_class = PythonAssertionEvaluator()
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data
@@ -152,14 +184,15 @@ class PythonStdoutEvaluationTestCases(unittest.TestCase):
def setUp(self):
self.test_case_data = [{"expected_output": "0 1 1 2 3"}]
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ "You probably have an infinite loop"
+ " in your code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
user_answer = "a,b=0,1\nfor i in range(5):\n\tprint a,\n\ta,b=b,a+b"
get_class = PythonStdoutEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertEqual(result.get('error'), "Correct answer")
self.assertTrue(result.get('success'))
@@ -168,19 +201,21 @@ class PythonStdoutEvaluationTestCases(unittest.TestCase):
user_answer = "a,b=0,1\nfor i in range(5):\n\tprint b,\n\ta,b=b,a+b"
get_class = PythonStdoutEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get('success'))
self.assertEqual(result.get('error'), "Incorrect Answer")
def test_direct_printed_answer(self):
user_answer = "print '0 1 1 2 3'"
- error_msg = "Incorrect Answer: Please avoid printing the expected output directly"
+ error_msg = ("Incorrect Answer: Please avoid printing"
+ " the expected output directly"
+ )
get_class = PythonStdoutEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get('success'))
self.assertEqual(result.get('error'), error_msg)
@@ -189,8 +224,8 @@ class PythonStdoutEvaluationTestCases(unittest.TestCase):
user_answer = "def add(a, b):\n\twhile True:\n\t\tpass"
get_class = PythonStdoutEvaluator()
kwargs = {'user_answer': user_answer,
- 'test_case_data': self.test_case_data
- }
+ 'test_case_data': self.test_case_data
+ }
result = get_class.evaluate(**kwargs)
self.assertFalse(result.get('success'))
self.assertEqual(result.get('error'), 'Incorrect Answer')
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index 24d6740..242f260 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -9,10 +9,12 @@ class ScilabEvaluationTestCases(unittest.TestCase):
self.test_case_data = [{"test_case": "scilab_files/test_add.sce"}]
self.in_dir = "/tmp"
self.timeout_msg = ("Code took more than {0} seconds to run. "
- "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
+ "You probably have an infinite loop"
+ " in your code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
- user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\nendfunction"
+ user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
+ "\n\tc=a+b;\nendfunction")
get_class = ScilabCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data
@@ -22,7 +24,8 @@ class ScilabEvaluationTestCases(unittest.TestCase):
self.assertTrue(result.get('success'))
def test_error(self):
- user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\ndis(\tendfunction"
+ user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
+ "\n\tc=a+b;\ndis(\tendfunction")
get_class = ScilabCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data
@@ -33,7 +36,8 @@ class ScilabEvaluationTestCases(unittest.TestCase):
def test_incorrect_answer(self):
- user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a-b;\nendfunction"
+ user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
+ "\n\tc=a-b;\nendfunction")
get_class = ScilabCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data
@@ -44,7 +48,8 @@ class ScilabEvaluationTestCases(unittest.TestCase):
self.assertTrue(result.get('error').splitlines > 1)
def test_infinite_loop(self):
- user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a;\nwhile(1==1)\nend\nendfunction"
+ user_answer = ("funcprot(0)\nfunction[c]=add(a,b)"
+ "\n\tc=a;\nwhile(1==1)\nend\nendfunction")
get_class = ScilabCodeEvaluator(self.in_dir)
kwargs = {'user_answer': user_answer,
'test_case_data': self.test_case_data
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index ac2c487..c64aa3b 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -40,31 +40,46 @@ class JavaCodeEvaluator(CodeEvaluator):
return None
else:
ref_code_path = test_case
- clean_ref_code_path, clean_test_case_path = self._set_test_code_file_path(ref_code_path)
+ clean_ref_code_path, clean_test_case_path = \
+ self._set_test_code_file_path(ref_code_path)
if not isfile(clean_ref_code_path):
- return False, "No file at %s or Incorrect path" % clean_ref_code_path
+ msg = "No file at %s or Incorrect path" % clean_ref_code_path
+ return False, msg
if not isfile(self.submit_code_path):
- return False, 'No file at %s or Incorrect path' % self.submit_code_path
+ msg = "No file at %s or Incorrect path" % self.submit_code_path
+ return False, msg
user_code_directory = os.getcwd() + '/'
- self.write_to_submit_code_file(self.submit_code_path, user_answer)
+ self.write_to_submit_code_file(self.submit_code_path,
+ user_answer
+ )
ref_file_name = (clean_ref_code_path.split('/')[-1]).split('.')[0]
- self.user_output_path = self.set_file_paths(user_code_directory, 'Test')
- self.ref_output_path = self.set_file_paths(user_code_directory, ref_file_name)
-
- compile_command, self.compile_main = self.get_commands(clean_ref_code_path, user_code_directory)
- self.run_command_args = "java -cp {0} {1}".format(user_code_directory,
- ref_file_name)
+ self.user_output_path = self.set_file_paths(user_code_directory,
+ 'Test'
+ )
+ self.ref_output_path = self.set_file_paths(user_code_directory,
+ ref_file_name
+ )
+ compile_command, self.compile_main = self.get_commands(
+ clean_ref_code_path,
+ user_code_directory
+ )
+ self.run_command_args = "java -cp {0} {1}".format(
+ user_code_directory,
+ ref_file_name
+ )
self.compiled_user_answer = self._run_command(compile_command,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
self.compiled_test_code = self._run_command(self.compile_main,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
return self.compiled_user_answer, self.compiled_test_code
diff --git a/yaksh/language_registry.py b/yaksh/language_registry.py
index 398e1aa..24aef7d 100644
--- a/yaksh/language_registry.py
+++ b/yaksh/language_registry.py
@@ -36,6 +36,7 @@ class _LanguageRegistry(object):
test_case_register = self._register[language]
cls = test_case_register.get(test_case_type)
module_name, class_name = cls.rsplit(".", 1)
+ import yaksh.python_assertion_evaluator
# load the module, will raise ImportError if module cannot be loaded
get_module = importlib.import_module(module_name)
# get the class, will raise AttributeError if class cannot be found
diff --git a/yaksh/models.py b/yaksh/models.py
index dd2fb5f..32fb0d0 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -201,11 +201,14 @@ class Question(models.Model):
questions = Question.objects.filter(id__in = question_ids, user_id = user.id)
questions_dict = []
for question in questions:
- q_dict = {'summary': question.summary, 'description': question.description,
- 'points': question.points, 'language': question.language,
- 'type': question.type, 'active': question.active,
- 'test_case_type': question.test_case_type,
- 'snippet': question.snippet}
+ q_dict = {'summary': question.summary,
+ 'description': question.description,
+ 'points': question.points,
+ 'language': question.language,
+ 'type': question.type,
+ 'active': question.active,
+ 'test_case_type': question.test_case_type,
+ 'snippet': question.snippet}
questions_dict.append(q_dict)
return json.dumps(questions_dict, indent=2)
@@ -217,14 +220,24 @@ class Question(models.Model):
Question.objects.get_or_create(**question)
def get_test_cases(self, **kwargs):
- test_case_ctype = ContentType.objects.get(app_label="yaksh", model=self.test_case_type)
- test_cases = test_case_ctype.get_all_objects_for_this_type(question=self, **kwargs)
+ test_case_ctype = ContentType.objects.get(app_label="yaksh",
+ model=self.test_case_type
+ )
+ test_cases = test_case_ctype.get_all_objects_for_this_type(
+ question=self,
+ **kwargs
+ )
return test_cases
def get_test_case(self, **kwargs):
- test_case_ctype = ContentType.objects.get(app_label="yaksh", model=self.test_case_type)
- test_case = test_case_ctype.get_object_for_this_type(question=self, **kwargs)
+ test_case_ctype = ContentType.objects.get(app_label="yaksh",
+ model=self.test_case_type
+ )
+ test_case = test_case_ctype.get_object_for_this_type(
+ question=self,
+ **kwargs
+ )
return test_case
@@ -368,10 +381,13 @@ class QuestionPaper(models.Model):
def make_answerpaper(self, user, ip, attempt_num):
"""Creates an answer paper for the user to attempt the quiz"""
- ans_paper = AnswerPaper(user=user, user_ip=ip, attempt_number=attempt_num)
+ ans_paper = AnswerPaper(user=user,
+ user_ip=ip,
+ attempt_number=attempt_num
+ )
ans_paper.start_time = datetime.now()
- ans_paper.end_time = ans_paper.start_time \
- + timedelta(minutes=self.quiz.duration)
+ ans_paper.end_time = ans_paper.start_time + \
+ timedelta(minutes=self.quiz.duration)
ans_paper.question_paper = self
ans_paper.save()
questions = self._get_questions_for_answerpaper()
@@ -750,7 +766,9 @@ class StandardTestCase(TestCase):
return {"test_case": self.test_case}
def __unicode__(self):
- return u'Question: {0} | Test Case: {1}'.format(self.question, self.test_case)
+ return u'Question: {0} | Test Case: {1}'.format(self.question,
+ self.test_case
+ )
class StdoutBasedTestCase(TestCase):
@@ -760,7 +778,9 @@ class StdoutBasedTestCase(TestCase):
return {"expected_output": self.expected_output}
def __unicode__(self):
- return u'Question: {0} | Exp. Output: {1}'.format(self.question, self.expected_output)
+ return u'Question: {0} | Exp. Output: {1}'.format(self.question,
+ self.expected_output
+ )
class McqTestCase(TestCase):
@@ -771,4 +791,6 @@ class McqTestCase(TestCase):
return {"options": self.options, "correct": self.correct}
def __unicode__(self):
- return u'Question: {0} | Correct: {1}'.format(self.question, self.correct)
+ return u'Question: {0} | Correct: {1}'.format(self.question,
+ self.correct
+ )
diff --git a/yaksh/python_stdout_evaluator.py b/yaksh/python_stdout_evaluator.py
index b967024..6606581 100644
--- a/yaksh/python_stdout_evaluator.py
+++ b/yaksh/python_stdout_evaluator.py
@@ -42,7 +42,9 @@ class PythonStdoutEvaluator(CodeEvaluator):
tb = None
if expected_output in user_answer:
success = False
- err = "Incorrect Answer: Please avoid printing the expected output directly"
+ err = ("Incorrect Answer: Please avoid "
+ "printing the expected output directly"
+ )
elif self.output_value == expected_output:
success = True
err = "Correct answer"
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index f4aa5f8..91b4cb3 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -14,7 +14,8 @@ class ScilabCodeEvaluator(CodeEvaluator):
"""Tests the Scilab code obtained from Code Server"""
def setup(self):
super(ScilabCodeEvaluator, self).setup()
- self.submit_code_path = self.create_submit_code_file('function.sci')
+ self.submit_code_path = \
+ self.create_submit_code_file('function.sci')
def teardown(self):
super(ScilabCodeEvaluator, self).teardown()
@@ -23,8 +24,10 @@ class ScilabCodeEvaluator(CodeEvaluator):
def check_code(self, user_answer, test_case):
ref_code_path = test_case
- clean_ref_path, clean_test_case_path = self._set_test_code_file_path(ref_code_path)
- user_answer, terminate_commands = self._remove_scilab_exit(user_answer.lstrip())
+ clean_ref_path, clean_test_case_path = \
+ self._set_test_code_file_path(ref_code_path)
+ user_answer, terminate_commands = \
+ self._remove_scilab_exit(user_answer.lstrip())
success = False
self.write_to_submit_code_file(self.submit_code_path, user_answer)
@@ -35,12 +38,15 @@ class ScilabCodeEvaluator(CodeEvaluator):
code.\n Otherwise your code will not be evaluated\
correctly.\n"
- cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(clean_ref_path)
+ cmd = 'printf "lines(0)\nexec(\'{0}\',2);\nquit();"'.format(
+ clean_ref_path
+ )
cmd += ' | timeout 8 scilab-cli -nb'
ret = self._run_command(cmd,
- shell=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
+ shell=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
proc, stdout, stderr = ret
# Get only the error.
diff --git a/yaksh/settings.py b/yaksh/settings.py
index f8baa2c..70e5471 100644
--- a/yaksh/settings.py
+++ b/yaksh/settings.py
@@ -20,12 +20,12 @@ SERVER_TIMEOUT = 2
URL_ROOT = ''
code_evaluators = {
- "python": {"standardtestcase": "python_assertion_evaluator.PythonAssertionEvaluator",
- "stdoutbasedtestcase": "python_stdout_evaluator.PythonStdoutEvaluator"
- },
- "c": {"standardtestcase": "cpp_code_evaluator.CppCodeEvaluator"},
- "cpp": {"standardtestcase": "cpp_code_evaluator.CppCodeEvaluator"},
- "java": {"standardtestcase": "java_code_evaluator.JavaCodeEvaluator"},
- "bash": {"standardtestcase": "bash_code_evaluator.BashCodeEvaluator"},
- "scilab": {"standardtestcase": "scilab_code_evaluator.ScilabCodeEvaluator"},
- }
+ "python": {"standardtestcase": "python_assertion_evaluator.PythonAssertionEvaluator",
+ "stdoutbasedtestcase": "python_stdout_evaluator.PythonStdoutEvaluator"
+ },
+ "c": {"standardtestcase": "cpp_code_evaluator.CppCodeEvaluator"},
+ "cpp": {"standardtestcase": "cpp_code_evaluator.CppCodeEvaluator"},
+ "java": {"standardtestcase": "java_code_evaluator.JavaCodeEvaluator"},
+ "bash": {"standardtestcase": "bash_code_evaluator.BashCodeEvaluator"},
+ "scilab": {"standardtestcase": "scilab_code_evaluator.ScilabCodeEvaluator"},
+}
diff --git a/yaksh/tests.py b/yaksh/tests.py
index 150a8b0..f2083dd 100644
--- a/yaksh/tests.py
+++ b/yaksh/tests.py
@@ -75,22 +75,33 @@ class QuestionTestCases(unittest.TestCase):
# Single question details
self.user1 = User.objects.get(pk=1)
self.user2 = User.objects.get(pk=2)
- self.question1 = Question(summary='Demo question', language='Python',
- type='Code', active=True,
- test_case_type='standardtestcase',
- description='Write a function', points=1.0,
- snippet='def myfunc()', user=self.user1)
+ self.question1 = Question(summary='Demo question',
+ language='Python',
+ type='Code',
+ active=True,
+ test_case_type='standardtestcase',
+ description='Write a function',
+ points=1.0,
+ snippet='def myfunc()',
+ user=self.user1
+ )
self.question1.save()
- self.question2 = Question(summary='Demo Json', language='python',
- type='code', active=True,
- description='factorial of a no', points=2.0,
- snippet='def fact()', user=self.user2)
+ self.question2 = Question(summary='Demo Json',
+ language='python',
+ type='code',
+ active=True,
+ description='factorial of a no',
+ points=2.0,
+ snippet='def fact()',
+ user=self.user2
+ )
self.question2.save()
self.question1.tags.add('python', 'function')
self.assertion_testcase = StandardTestCase(question=self.question1,
- test_case='assert myfunc(12, 13) == 15')
+ test_case='assert myfunc(12, 13) == 15'
+ )
self.user_answer = "demo_answer"
questions_data = [{"snippet": "def fact()", "active": True, "points": 1.0,
"description": "factorial of a no",
@@ -182,7 +193,9 @@ class QuestionPaperTestCases(unittest.TestCase):
# create question paper
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
- total_marks=0.0, shuffle_questions=True)
+ total_marks=0.0,
+ shuffle_questions=True
+ )
# add fixed set of questions to the question paper
self.question_paper.fixed_questions.add(self.questions[3],
@@ -190,23 +203,29 @@ class QuestionPaperTestCases(unittest.TestCase):
# create two QuestionSet for random questions
# QuestionSet 1
self.question_set_1 = QuestionSet.objects.create(marks=2,
- num_questions=2)
+ num_questions=2
+ )
# add pool of questions for random sampling
- self.question_set_1.questions.add(self.questions[6], self.questions[7],
- self.questions[8], self.questions[9])
+ self.question_set_1.questions.add(self.questions[6],
+ self.questions[7],
+ self.questions[8],
+ self.questions[9]
+ )
# add question set 1 to random questions in Question Paper
self.question_paper.random_questions.add(self.question_set_1)
# QuestionSet 2
self.question_set_2 = QuestionSet.objects.create(marks=3,
- num_questions=3)
+ num_questions=3
+ )
# add pool of questions
self.question_set_2.questions.add(self.questions[11],
- self.questions[12],
- self.questions[13],
- self.questions[14])
+ self.questions[12],
+ self.questions[13],
+ self.questions[14]
+ )
# add question set 2
self.question_paper.random_questions.add(self.question_set_2)
@@ -215,8 +234,10 @@ class QuestionPaperTestCases(unittest.TestCase):
self.user = User.objects.get(pk=1)
- self.attempted_papers = AnswerPaper.objects.filter(question_paper=self.question_paper,
- user=self.user)
+ self.attempted_papers = AnswerPaper.objects.filter(
+ question_paper=self.question_paper,
+ user=self.user
+ )
def test_questionpaper(self):
""" Test question paper"""
@@ -235,7 +256,8 @@ class QuestionPaperTestCases(unittest.TestCase):
""" Test get_random_questions() method of Question Paper"""
random_questions_set_1 = self.question_set_1.get_random_questions()
random_questions_set_2 = self.question_set_2.get_random_questions()
- total_random_questions = len(random_questions_set_1 + random_questions_set_2)
+ total_random_questions = len(random_questions_set_1 + \
+ random_questions_set_2)
self.assertEqual(total_random_questions, 5)
# To check whether random questions are from random_question_set
@@ -286,12 +308,15 @@ class AnswerPaperTestCases(unittest.TestCase):
# create answerpaper
self.answerpaper = AnswerPaper(user=self.user,
- question_paper=self.question_paper,
- start_time=self.start_time,
- end_time=self.end_time,
- user_ip=self.ip)
- self.attempted_papers = AnswerPaper.objects.filter(question_paper=self.question_paper,
- user=self.user)
+ question_paper=self.question_paper,
+ start_time=self.start_time,
+ end_time=self.end_time,
+ user_ip=self.ip
+ )
+ self.attempted_papers = AnswerPaper.objects.filter(
+ question_paper=self.question_paper,
+ user=self.user
+ )
already_attempted = self.attempted_papers.count()
self.answerpaper.attempt_number = already_attempted + 1
self.answerpaper.save()
@@ -300,9 +325,14 @@ class AnswerPaperTestCases(unittest.TestCase):
self.answerpaper.save()
# answers for the Answer Paper
self.answer_right = Answer(question=Question.objects.get(id=1),
- answer="Demo answer", correct=True, marks=1)
+ answer="Demo answer",
+ correct=True, marks=1
+ )
self.answer_wrong = Answer(question=Question.objects.get(id=2),
- answer="My answer", correct=False, marks=0)
+ answer="My answer",
+ correct=False,
+ marks=0
+ )
self.answer_right.save()
self.answer_wrong.save()
self.answerpaper.answers.add(self.answer_right)
@@ -448,12 +478,14 @@ class CourseTestCases(unittest.TestCase):
def test_get_quizzes(self):
""" Test get_quizzes method of Courses"""
- self.assertSequenceEqual(self.course.get_quizzes(), [self.quiz1, self.quiz2])
+ self.assertSequenceEqual(self.course.get_quizzes(),
+ [self.quiz1, self.quiz2])
def test_add_teachers(self):
""" Test to add teachers to a course"""
self.course.add_teachers(self.student1, self.student2)
- self.assertSequenceEqual(self.course.get_teachers(), [self.student1, self.student2])
+ self.assertSequenceEqual(self.course.get_teachers(),
+ [self.student1, self.student2])
def test_remove_teachers(self):
""" Test to remove teachers from a course"""
@@ -472,42 +504,61 @@ class CourseTestCases(unittest.TestCase):
class TestCaseTestCases(unittest.TestCase):
def setUp(self):
self.user = User.objects.get(pk=1)
- self.question1 = Question(summary='Demo question 1', language='Python',
- type='Code', active=True,
- description='Write a function', points=1.0,
- test_case_type="standardtestcase", user=self.user,
- snippet='def myfunc()'
- )
- self.question2 = Question(summary='Demo question 2', language='Python',
- type='Code', active=True,
- description='Write to standard output', points=1.0,
- test_case_type="stdoutbasedtestcase", user=self.user,
- snippet='def myfunc()'
- )
+ self.question1 = Question(summary='Demo question 1',
+ language='Python',
+ type='Code',
+ active=True,
+ description='Write a function',
+ points=1.0,
+ test_case_type="standardtestcase",
+ user=self.user,
+ snippet='def myfunc()'
+ )
+ self.question2 = Question(summary='Demo question 2',
+ language='Python',
+ type='Code',
+ active=True,
+ description='Write to standard output',
+ points=1.0,
+ test_case_type="stdoutbasedtestcase",
+ user=self.user,
+ snippet='def myfunc()'
+ )
self.question1.save()
self.question2.save()
- self.assertion_testcase = StandardTestCase(question=self.question1,
- test_case='assert myfunc(12, 13) == 15')
- self.stdout_based_testcase = StdoutBasedTestCase(question=self.question2,
- expected_output='Hello World')
+ self.assertion_testcase = StandardTestCase(
+ question=self.question1,
+ test_case='assert myfunc(12, 13) == 15'
+ )
+ self.stdout_based_testcase = StdoutBasedTestCase(
+ question=self.question2,
+ expected_output='Hello World'
+ )
self.assertion_testcase.save()
self.stdout_based_testcase.save()
answer_data = {"user_answer": "demo_answer",
- "test_case_data": [{"test_case": "assert myfunc(12, 13) == 15"}],
- }
+ "test_case_data": [
+ {"test_case": "assert myfunc(12, 13) == 15"}
+ ]
+ }
self.answer_data_json = json.dumps(answer_data)
def test_assertion_testcase(self):
""" Test question """
self.assertEqual(self.assertion_testcase.question, self.question1)
- self.assertEqual(self.assertion_testcase.test_case, 'assert myfunc(12, 13) == 15')
+ self.assertEqual(self.assertion_testcase.test_case,
+ 'assert myfunc(12, 13) == 15')
def test_stdout_based_testcase(self):
""" Test question """
self.assertEqual(self.stdout_based_testcase.question, self.question2)
- self.assertEqual(self.stdout_based_testcase.expected_output, 'Hello World')
+ self.assertEqual(self.stdout_based_testcase.expected_output,
+ 'Hello World'
+ )
def test_consolidate_answer_data(self):
""" Test consolidate answer data model method """
- result = self.question1.consolidate_answer_data(user_answer="demo_answer")
+ result = self.question1.consolidate_answer_data(
+ user_answer="demo_answer"
+ )
self.assertEqual(result, self.answer_data_json) \ No newline at end of file