summaryrefslogtreecommitdiff
path: root/yaksh
diff options
context:
space:
mode:
Diffstat (limited to 'yaksh')
-rw-r--r--yaksh/code_evaluator.py61
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py40
-rw-r--r--yaksh/evaluator_tests/test_code_evaluation.py25
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py43
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py3
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py20
-rw-r--r--yaksh/forms.py11
-rw-r--r--yaksh/models.py8
-rw-r--r--yaksh/python_assertion_evaluator.py2
-rw-r--r--yaksh/scilab_code_evaluator.py4
-rw-r--r--yaksh/templates/yaksh/add_question.html2
-rw-r--r--yaksh/tests.py59
-rw-r--r--yaksh/urls.py2
-rw-r--r--yaksh/views.py9
14 files changed, 57 insertions, 232 deletions
diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py
index b7a523c..1c11c00 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/code_evaluator.py
@@ -5,14 +5,13 @@ import os
import stat
from os.path import isdir, dirname, abspath, join, isfile, exists
import signal
+import traceback
from multiprocessing import Process, Queue
import subprocess
import re
-# import json
# Local imports.
from settings import SERVER_TIMEOUT
-
MY_DIR = abspath(dirname(__file__))
@@ -50,36 +49,12 @@ def delete_signal_handler():
class CodeEvaluator(object):
"""Tests the code obtained from Code Server"""
- # def __init__(self, test_case_data, test, language, user_answer,
- # ref_code_path=None, in_dir=None):
def __init__(self, in_dir=None):
msg = 'Code took more than %s seconds to run. You probably '\
'have an infinite loop in your code.' % SERVER_TIMEOUT
self.timeout_msg = msg
- # self.test_case_data = test_case_data
- # self.language = language.lower() #@@@remove
- # self.user_answer = user_answer #@@@specific to check-code
- # self.ref_code_path = ref_code_path #@@@specific to check-code
- # self.test = test #@@@specific to check-code
- self.in_dir = in_dir #@@@Common for all, no change
- # self.test_case_args = None #@@@no change
-
- # Public Protocol ##########
- # @classmethod
- # def from_json(cls, language, json_data, in_dir):
- # json_data = json.loads(json_data)
- # # test_case_data = json_data.get("test_case_data")
- # user_answer = json_data.get("user_answer")
- # ref_code_path = json_data.get("ref_code_path")
- # test = json_data.get("test")
-
- # # instance = cls(test_case_data, test, language, user_answer, ref_code_path,
- # # in_dir)
- # instance = cls(test, language, user_answer, ref_code_path,
- # in_dir)
- # return instance
-
- # def evaluate(self):
+ self.in_dir = in_dir
+
def evaluate(self, **kwargs):
"""Evaluates given code with the test cases based on
given arguments in test_case_data.
@@ -103,7 +78,6 @@ class CodeEvaluator(object):
"""
self.setup()
- # success, err = self.safe_evaluate(self.test_case_args)
success, err = self.safe_evaluate(**kwargs)
self.teardown()
@@ -114,18 +88,13 @@ class CodeEvaluator(object):
def setup(self):
self._change_dir(self.in_dir)
- # def safe_evaluate(self, args):
- # def safe_evaluate(self, **kwargs): #@@@v2
def safe_evaluate(self, user_answer, test_case_data):
# Add a new signal handler for the execution of this code.
prev_handler = create_signal_handler()
success = False
- # args = args or []
# Do whatever testing needed.
try:
- # success, err = self.check_code(*args)
- # success, err = self.check_code(**kwargs) #@@@v2
for test_case in test_case_data:
self.compile_code(user_answer, **test_case)
success, err = self.check_code(user_answer, **test_case)
@@ -134,9 +103,9 @@ class CodeEvaluator(object):
except TimeoutException:
err = self.timeout_msg
- except:
- _type, value = sys.exc_info()[:2]
- err = "Error: {0}".format(repr(value))
+ except Exception:
+ err = "Error: {0}".format(traceback.format_exc(limit=0))
+
finally:
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
@@ -198,24 +167,6 @@ class CodeEvaluator(object):
raise
return proc, stdout, stderr
- # def _compile_command(self, cmd, *args, **kw):
- # """Compiles C/C++/java code and returns errors if any.
- # Run a command in a subprocess while blocking, the process is killed
- # if it takes more than 2 seconds to run. Return the Popen object, the
- # stderr.
- # """
- # try:
- # proc_compile = subprocess.Popen(cmd, shell=True, stdin=None,
- # stdout=subprocess.PIPE,
- # stderr=subprocess.PIPE)
- # out, err = proc_compile.communicate()
- # except TimeoutException:
- # # Runaway code, so kill it.
- # proc_compile.kill()
- # # Re-raise exception.
- # raise
- # return proc_compile, err
-
def _change_dir(self, in_dir):
if in_dir is not None and isdir(in_dir):
os.chdir(in_dir)
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index 39247f7..7c58c43 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -10,15 +10,6 @@ class BashEvaluationTestCases(unittest.TestCase):
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- # def setUp(self):
- # self.language = "bash"
- # self.ref_code_path = "bash_files/sample.sh,bash_files/sample.args"
- # self.in_dir = "/tmp"
- # self.test_case_data = []
- # self.timeout_msg = ("Code took more than {0} seconds to run. "
- # "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- # self.test = None
-
def test_correct_answer(self):
user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
get_class = BashCodeEvaluator(self.in_dir)
@@ -49,37 +40,6 @@ class BashEvaluationTestCases(unittest.TestCase):
self.assertFalse(result.get("success"))
self.assertEquals(result.get("error"), self.timeout_msg)
- # def test_infinite_loop(self):
- # user_answer = "#!/bin/bash\nwhile [ 1 ] ; do echo "" > /dev/null ; done"
- # get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- # result = get_class.evaluate()
-
- # self.assertFalse(result.get("success"))
- # self.assertEquals(result.get("error"), self.timeout_msg)
-
- # def test_correct_answer(self):
- # user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 + $2 )) && exit $(( $1 + $2 ))"
- # get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- # result = get_class.evaluate()
-
- # self.assertTrue(result.get("success"))
- # self.assertEqual(result.get("error"), "Correct answer")
-
- # def test_error(self):
- # user_answer = "#!/bin/bash\n[[ $# -eq 2 ]] && echo $(( $1 - $2 )) && exit $(( $1 - $2 ))"
- # get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- # result = get_class.evaluate()
-
- # self.assertFalse(result.get("success"))
- # self.assertTrue("Error" in result.get("error"))
-
- # def test_infinite_loop(self):
- # user_answer = "#!/bin/bash\nwhile [ 1 ] ; do echo "" > /dev/null ; done"
- # get_class = BashCodeEvaluator(self.test_case_data, self.test, self.language, user_answer, self.ref_code_path, self.in_dir)
- # result = get_class.evaluate()
-
- # self.assertFalse(result.get("success"))
- # self.assertEquals(result.get("error"), self.timeout_msg)
if __name__ == '__main__':
unittest.main()
diff --git a/yaksh/evaluator_tests/test_code_evaluation.py b/yaksh/evaluator_tests/test_code_evaluation.py
index e4f129c..ace6115 100644
--- a/yaksh/evaluator_tests/test_code_evaluation.py
+++ b/yaksh/evaluator_tests/test_code_evaluation.py
@@ -23,28 +23,3 @@ class RegistryTestCase(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
-
-
-# import unittest
-# import os
-# from yaksh import cpp_code_evaluator
-# from yaksh.language_registry import _LanguageRegistry, get_registry
-# from yaksh.settings import SERVER_TIMEOUT
-
-
-# class RegistryTestCase(unittest.TestCase):
-# def setUp(self):
-# self.registry_object = get_registry()
-# self.language_registry = _LanguageRegistry()
-
-# def test_set_register(self):
-# class_name = getattr(cpp_code_evaluator, 'CppCodeEvaluator')
-# self.registry_object.register("c", {"standardtestcase": "cpp_code_evaluator.CppCodeEvaluator"})
-# self.assertEquals(self.registry_object.get_class("c", "standardtestcase"), class_name)
-
-# def tearDown(self):
-# self.registry_object = None
-
-
-# if __name__ == '__main__':
-# unittest.main()
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index eacd62e..76a3fcf 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -57,49 +57,6 @@ class JavaEvaluationTestCases(unittest.TestCase):
self.assertFalse(result.get("success"))
self.assertEquals(result.get("error"), self.timeout_msg)
- # def setUp(self):
- # self.language = "java"
- # self.ref_code_path = "java_files/main_square.java"
- # self.in_dir = "/tmp"
- # self.test_case_data = []
- # evaluator.SERVER_TIMEOUT = 9
- # self.timeout_msg = ("Code took more than {0} seconds to run. "
- # "You probably have an infinite loop in "
- # "your code.").format(evaluator.SERVER_TIMEOUT)
- # self.test = None
-
- # def tearDown(self):
- # evaluator.SERVER_TIMEOUT = 2
-
- # def test_correct_answer(self):
- # user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a;\n\t}\n}"
- # get_class = JavaCodeEvaluator(self.test_case_data, self.test,
- # self.language, user_answer,
- # self.ref_code_path, self.in_dir)
- # result = get_class.evaluate()
-
- # self.assertTrue(result.get("success"))
- # self.assertEqual(result.get("error"), "Correct answer")
-
- # def test_error(self):
- # user_answer = "class Test {\n\tint square_num(int a) {\n\treturn a*a"
- # get_class = JavaCodeEvaluator(self.test_case_data, self.test,
- # self.language, user_answer,
- # self.ref_code_path, self.in_dir)
- # result = get_class.evaluate()
-
- # self.assertFalse(result.get("success"))
- # self.assertTrue("Error" in result.get("error"))
-
- # def test_infinite_loop(self):
- # user_answer = "class Test {\n\tint square_num(int a) {\n\t\twhile(0==0){\n\t\t}\n\t}\n}"
- # get_class = JavaCodeEvaluator(self.test_case_data, self.test,
- # self.language, user_answer,
- # self.ref_code_path, self.in_dir)
- # result = get_class.evaluate()
-
- # self.assertFalse(result.get("success"))
- # self.assertEquals(result.get("error"), self.timeout_msg)
if __name__ == '__main__':
unittest.main()
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 39d4723..f6ac0bf 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -16,7 +16,6 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
"You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
- # {u'user_answer': u'def adder(a,b):\r\n return a', u'test_case_data': [u'assert(adder(1,2)==3)']}
user_answer = "def add(a,b):\n\treturn a + b"
get_class = PythonAssertionEvaluator()
kwargs = {'user_answer': user_answer,
@@ -156,7 +155,6 @@ class PythonStdoutEvaluationTestCases(unittest.TestCase):
"You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
def test_correct_answer(self):
- # user_answer = "a = 'Hello'\nb = 'World'\nprint '{0} {1}'.format(a, b)"
user_answer = "a,b=0,1\nfor i in range(5):\n\tprint a,\n\ta,b=b,a+b"
get_class = PythonStdoutEvaluator()
kwargs = {'user_answer': user_answer,
@@ -197,7 +195,6 @@ class PythonStdoutEvaluationTestCases(unittest.TestCase):
self.assertFalse(result.get('success'))
self.assertEqual(result.get('error'), 'Incorrect Answer')
->>>>>>> - Add test cases for multiple python evaluators
if __name__ == '__main__':
unittest.main()
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index 928cd7e..24d6740 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -11,16 +11,6 @@ class ScilabEvaluationTestCases(unittest.TestCase):
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- # def setUp(self):
- # self.language = "scilab"
- # self.ref_code_path = "scilab_files/test_add.sce"
- # self.in_dir = "/tmp"
- # self.test_case_data = []
- # self.timeout_msg = ("Code took more than {0} seconds to run. "
- # "You probably have an infinite loop in your code.").format(SERVER_TIMEOUT)
- # self.test = None
-
-
def test_correct_answer(self):
user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\nendfunction"
get_class = ScilabCodeEvaluator(self.in_dir)
@@ -31,16 +21,6 @@ class ScilabEvaluationTestCases(unittest.TestCase):
self.assertEquals(result.get('error'), "Correct answer")
self.assertTrue(result.get('success'))
- # def test_correct_answer(self):
- # user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\nendfunction"
- # get_class = ScilabCodeEvaluator(self.test_case_data, self.test,
- # self.language, user_answer,
- # self.ref_code_path, self.in_dir)
- # result = get_class.evaluate()
-
- # self.assertTrue(result.get("success"))
- # self.assertEqual(result.get("error"), "Correct answer")
-
def test_error(self):
user_answer = "funcprot(0)\nfunction[c]=add(a,b)\n\tc=a+b;\ndis(\tendfunction"
get_class = ScilabCodeEvaluator(self.in_dir)
diff --git a/yaksh/forms.py b/yaksh/forms.py
index 2ce2cba..808262b 100644
--- a/yaksh/forms.py
+++ b/yaksh/forms.py
@@ -224,14 +224,3 @@ class ProfileForm(forms.ModelForm):
class UploadFileForm(forms.Form):
file = forms.FileField()
-
-class StandardTestCaseForm(forms.ModelForm):
- class Meta:
- model = StandardTestCase
- fields = ['test_case']
-
-
-class StdoutBasedTestCaseForm(forms.ModelForm):
- class Meta:
- model = StdoutBasedTestCase
- fields = ['output']
diff --git a/yaksh/models.py b/yaksh/models.py
index a200ae1..dd2fb5f 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -176,6 +176,9 @@ class Question(models.Model):
# Tags for the Question.
tags = TaggableManager(blank=True)
+ # Snippet of code provided to the user.
+ snippet = models.CharField(max_length=256, blank=True)
+
# user for particular question
user = models.ForeignKey(User, related_name="user")
@@ -199,10 +202,9 @@ class Question(models.Model):
questions_dict = []
for question in questions:
q_dict = {'summary': question.summary, 'description': question.description,
- 'points': question.points, 'test': question.test,
- 'ref_code_path': question.ref_code_path,
- 'options': question.options, 'language': question.language,
+ 'points': question.points, 'language': question.language,
'type': question.type, 'active': question.active,
+ 'test_case_type': question.test_case_type,
'snippet': question.snippet}
questions_dict.append(q_dict)
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index ff56421..bf6a4be 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -39,8 +39,6 @@ class PythonAssertionEvaluator(CodeEvaluator):
err = "{0} {1} in: {2}".format(type.__name__, str(value), text)
except TimeoutException:
raise
- except Exception:
- err = traceback.format_exc(limit=0)
else:
success = True
err = 'Correct answer'
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index 3af9782..f4aa5f8 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -14,9 +14,7 @@ class ScilabCodeEvaluator(CodeEvaluator):
"""Tests the Scilab code obtained from Code Server"""
def setup(self):
super(ScilabCodeEvaluator, self).setup()
- # ref_path, test_case_path = self._set_test_code_file_path(self.ref_code_path)
self.submit_code_path = self.create_submit_code_file('function.sci')
- # return ref_path, # Return as a tuple
def teardown(self):
super(ScilabCodeEvaluator, self).teardown()
@@ -43,8 +41,6 @@ class ScilabCodeEvaluator(CodeEvaluator):
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
- # success = False
- # proc, stdout, stderr = self.compiled_output
proc, stdout, stderr = ret
# Get only the error.
diff --git a/yaksh/templates/yaksh/add_question.html b/yaksh/templates/yaksh/add_question.html
index eb9419c..d38aa1c 100644
--- a/yaksh/templates/yaksh/add_question.html
+++ b/yaksh/templates/yaksh/add_question.html
@@ -40,7 +40,7 @@
</table></center>
<center>
- <button class="btn" type="submit" name="save_question">Save</button>
+ <button class="btn" type="submit" name="save_question">Save & Add Testcase</button>
<button class="btn" type="button" name="button" onClick='location.replace("{{URL_ROOT}}/exam/manage/questions/");'>Back to Questions</button>
</center>
</form>
diff --git a/yaksh/tests.py b/yaksh/tests.py
index 6e50fc5..150a8b0 100644
--- a/yaksh/tests.py
+++ b/yaksh/tests.py
@@ -79,27 +79,23 @@ class QuestionTestCases(unittest.TestCase):
type='Code', active=True,
test_case_type='standardtestcase',
description='Write a function', points=1.0,
- user=self.user1)
+ snippet='def myfunc()', user=self.user1)
self.question1.save()
self.question2 = Question(summary='Demo Json', language='python',
type='code', active=True,
description='factorial of a no', points=2.0,
- user=self.user2)
+ snippet='def fact()', user=self.user2)
self.question2.save()
self.question1.tags.add('python', 'function')
self.assertion_testcase = StandardTestCase(question=self.question1,
test_case='assert myfunc(12, 13) == 15')
- answer_data = {"user_answer": "demo_answer",
- "test_case_data": ["assert myfunc(12, 13) == 15"],
- }
- self.answer_data_json = json.dumps(answer_data)
self.user_answer = "demo_answer"
questions_data = [{"snippet": "def fact()", "active": True, "points": 1.0,
- "ref_code_path": "", "description": "factorial of a no",
- "language": "Python", "test": "", "type": "Code",
- "options": "", "summary": "Json Demo"}]
+ "description": "factorial of a no",
+ "language": "Python", "type": "Code",
+ "summary": "Json Demo"}]
self.json_questions_data = json.dumps(questions_data)
def test_question(self):
@@ -110,16 +106,12 @@ class QuestionTestCases(unittest.TestCase):
self.assertEqual(self.question1.description, 'Write a function')
self.assertEqual(self.question1.points, 1.0)
self.assertTrue(self.question1.active)
+ self.assertEqual(self.question1.snippet, 'def myfunc()')
tag_list = []
for tag in self.question1.tags.all():
tag_list.append(tag.name)
self.assertEqual(tag_list, ['python', 'function'])
- def test_consolidate_answer_data(self):
- """ Test consolidate_answer_data function """
- result = self.question1.consolidate_answer_data(self.user_answer)
- self.assertEqual(result, self.answer_data_json)
-
def test_dump_questions_into_json(self):
""" Test dump questions into json """
question = Question()
@@ -138,7 +130,7 @@ class QuestionTestCases(unittest.TestCase):
""" Test load questions into database from json """
question = Question()
result = question.load_from_json(self.json_questions_data, self.user1)
- question_data = Question.objects.get(pk=27)
+ question_data = Question.objects.get(pk=25)
self.assertEqual(question_data.summary, 'Json Demo')
self.assertEqual(question_data.language, 'Python')
self.assertEqual(question_data.type, 'Code')
@@ -479,22 +471,43 @@ class CourseTestCases(unittest.TestCase):
###############################################################################
class TestCaseTestCases(unittest.TestCase):
def setUp(self):
- self.question = Question(summary='Demo question', language='Python',
+ self.user = User.objects.get(pk=1)
+ self.question1 = Question(summary='Demo question 1', language='Python',
type='Code', active=True,
description='Write a function', points=1.0,
+ test_case_type="standardtestcase", user=self.user,
+ snippet='def myfunc()'
+ )
+ self.question2 = Question(summary='Demo question 2', language='Python',
+ type='Code', active=True,
+ description='Write to standard output', points=1.0,
+ test_case_type="stdoutbasedtestcase", user=self.user,
+ snippet='def myfunc()'
)
- self.question.save()
- self.assertion_testcase = StandardTestCase(question=self.question,
+ self.question1.save()
+ self.question2.save()
+ self.assertion_testcase = StandardTestCase(question=self.question1,
test_case='assert myfunc(12, 13) == 15')
- self.stdout_based_testcase = StdoutBasedTestCase(question=self.question,
- output='Hello World')
+ self.stdout_based_testcase = StdoutBasedTestCase(question=self.question2,
+ expected_output='Hello World')
+ self.assertion_testcase.save()
+ self.stdout_based_testcase.save()
+ answer_data = {"user_answer": "demo_answer",
+ "test_case_data": [{"test_case": "assert myfunc(12, 13) == 15"}],
+ }
+ self.answer_data_json = json.dumps(answer_data)
def test_assertion_testcase(self):
""" Test question """
- self.assertEqual(self.assertion_testcase.question, self.question)
+ self.assertEqual(self.assertion_testcase.question, self.question1)
self.assertEqual(self.assertion_testcase.test_case, 'assert myfunc(12, 13) == 15')
def test_stdout_based_testcase(self):
""" Test question """
- self.assertEqual(self.stdout_based_testcase.question, self.question)
- self.assertEqual(self.stdout_based_testcase.output, 'Hello World')
+ self.assertEqual(self.stdout_based_testcase.question, self.question2)
+ self.assertEqual(self.stdout_based_testcase.expected_output, 'Hello World')
+
+ def test_consolidate_answer_data(self):
+ """ Test consolidate answer data model method """
+ result = self.question1.consolidate_answer_data(user_answer="demo_answer")
+ self.assertEqual(result, self.answer_data_json) \ No newline at end of file
diff --git a/yaksh/urls.py b/yaksh/urls.py
index 6a33888..feac8c1 100644
--- a/yaksh/urls.py
+++ b/yaksh/urls.py
@@ -43,7 +43,7 @@ urlpatterns += [
url(r'^manage/addquestion/$', views.add_question),
url(r'^manage/addquestion/(?P<question_id>\d+)/$', views.edit_question),
url(r'^manage/addquiz/$', views.add_quiz),
- url(r'^manage/addquiz/(?P<quiz_id>\d+)/$', views.edit_quiz),
+ url(r'^manage/addquiz/(?P<quiz_id>\d+)/$', views.add_quiz),
url(r'^manage/gradeuser/$', views.grade_user),
url(r'^manage/gradeuser/(?P<quiz_id>\d+)/$',views.grade_user),
url(r'^manage/gradeuser/(?P<quiz_id>\d+)/(?P<user_id>\d+)/$',views.grade_user),
diff --git a/yaksh/views.py b/yaksh/views.py
index ffb08d8..2a3adbf 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -153,7 +153,9 @@ def add_question(request):
if request.method == "POST" and 'save_question' in request.POST:
question_form = QuestionForm(request.POST)
if question_form.is_valid():
- new_question = question_form.save()
+ new_question = question_form.save(commit=False)
+ new_question.user = user
+ new_question.save()
return my_redirect("/exam/manage/addquestion/{0}".format(new_question.id))
else:
return my_render_to_response('yaksh/add_question.html',
@@ -190,6 +192,11 @@ def edit_question(request, question_id=None):
test_case_formset.save()
return my_redirect("/exam/manage/addquestion/{0}".format(new_question.id))
else:
+ test_case_type = question_form.cleaned_data.get('test_case_type')
+ test_case_form_class = get_object_form(model=test_case_type, exclude_fields=['question'])
+ test_case_model_class = get_model_class(test_case_type)
+ TestCaseInlineFormSet = inlineformset_factory(Question, test_case_model_class, form=test_case_form_class, extra=1)
+ test_case_formset = TestCaseInlineFormSet(request.POST, request.FILES, instance=question_instance)
return my_render_to_response('yaksh/add_question.html',
{'form': question_form,
'test_case_formset': test_case_formset,