summaryrefslogtreecommitdiff
path: root/exam
diff options
context:
space:
mode:
authorPrabhu Ramachandran2011-11-25 18:48:13 +0530
committerPrabhu Ramachandran2011-11-25 18:48:13 +0530
commitfdc531b561565345847812f409ee44af0a784e82 (patch)
tree447b297d28dccb700dcd244404e6cd748191890d /exam
parentb4023e17d6f97e51ffde740c17d19630b5a9c2d1 (diff)
downloadonline_test-fdc531b561565345847812f409ee44af0a784e82.tar.gz
online_test-fdc531b561565345847812f409ee44af0a784e82.tar.bz2
online_test-fdc531b561565345847812f409ee44af0a784e82.zip
ENH: Adding support for Multiple Choice Questions
Adds simple support for multiple choice questions that are also auto-checked. Many fixes to the templates and useful feature additions. This changes the database.
Diffstat (limited to 'exam')
-rw-r--r--exam/management/commands/dump_user_data.py12
-rw-r--r--exam/management/commands/load_questions_xml.py14
-rw-r--r--exam/models.py15
-rw-r--r--exam/views.py39
4 files changed, 52 insertions, 28 deletions
diff --git a/exam/management/commands/dump_user_data.py b/exam/management/commands/dump_user_data.py
index f081565..6e0ca2a 100644
--- a/exam/management/commands/dump_user_data.py
+++ b/exam/management/commands/dump_user_data.py
@@ -34,12 +34,20 @@ Answers
-------
{% for question, answers in paper.get_question_answers.items %}
Question: {{ question.id }}. {{ question.summary }} (Points: {{ question.points }})
+{% if question.type == "mcq" %}\
+###############################################################################
+Choices: {% for option in question.options.strip.splitlines %} {{option}}, {% endfor %}
+Student answer: {{ answers.0|safe }}
+{% else %}{# non-mcq questions #}\
{% for answer in answers %}\
###############################################################################
-{{ answer.answer|safe }}
+{{ answer.answer.strip|safe }}
# Autocheck: {{ answer.error|safe }}
-# Marks: {{ answer.marks }}
{% endfor %}{# for answer in answers #}\
+{% endif %}\
+{% with answers|last as answer %}\
+Marks: {{answer.marks}}
+{% endwith %}\
{% endfor %}{# for question, answers ... #}\
Teacher comments
diff --git a/exam/management/commands/load_questions_xml.py b/exam/management/commands/load_questions_xml.py
index b4151ae..8bc2701 100644
--- a/exam/management/commands/load_questions_xml.py
+++ b/exam/management/commands/load_questions_xml.py
@@ -35,20 +35,24 @@ def load_questions_xml(filename):
desc_node = question.getElementsByTagName("description")[0]
description = (desc_node.childNodes[0].data).strip()
- lang_node = question.getElementsByTagName("language")[0]
- language = (lang_node.childNodes[0].data).strip()
+ type_node = question.getElementsByTagName("type")[0]
+ type = (type_node.childNodes[0].data).strip()
points_node = question.getElementsByTagName("points")[0]
- points = int((points_node.childNodes[0].data).strip()) \
- if points_node else 1
+ points = float((points_node.childNodes[0].data).strip()) \
+ if points_node else 1.0
test_node = question.getElementsByTagName("test")[0]
test = decode_html((test_node.childNodes[0].data).strip())
+ opt_node = question.getElementsByTagName("options")[0]
+ opt = decode_html((opt_node.childNodes[0].data).strip())
+
new_question = Question(summary=summary,
description=description,
points=points,
- language=language,
+ options=opt,
+ type=type,
test=test)
new_question.save()
diff --git a/exam/models.py b/exam/models.py
index ef4312f..717e02e 100644
--- a/exam/models.py
+++ b/exam/models.py
@@ -12,9 +12,10 @@ class Profile(models.Model):
position = models.CharField(max_length=64)
-LANGUAGE_CHOICES = (
+QUESTION_TYPE_CHOICES = (
("python", "Python"),
("bash", "Bash"),
+ ("mcq", "MultipleChoice"),
)
################################################################################
@@ -28,14 +29,16 @@ class Question(models.Model):
description = models.TextField()
# Number of points for the question.
- points = models.IntegerField(default=1)
+ points = models.FloatField(default=1.0)
# Test cases for the question in the form of code that is run.
- # This is simple Python code.
- test = models.TextField()
+ test = models.TextField(blank=True)
- # The language being tested.
- language = models.CharField(max_length=10, choices=LANGUAGE_CHOICES)
+ # Any multiple choice options. Place one option per line.
+ options = models.TextField(blank=True)
+
+ # The type of question.
+ type = models.CharField(max_length=24, choices=QUESTION_TYPE_CHOICES)
# Is this question active or not. If it is inactive it will not be used
# when creating a QuestionPaper.
diff --git a/exam/views.py b/exam/views.py
index ed73adf..e8e2e73 100644
--- a/exam/views.py
+++ b/exam/views.py
@@ -200,24 +200,31 @@ def check(request, q_id):
new_answer = Answer(question=question, answer=answer, correct=False)
new_answer.save()
paper.answers.add(new_answer)
-
- # Otherwise we were asked to check. We obtain the results via XML-RPC
- # with the code executed safely in a separate process (the python_server.py)
- # running as nobody.
- user_dir = get_user_dir(user)
- success, err_msg = code_server.run_code(answer, question.test,
- user_dir, question.language)
- new_answer.error = err_msg
-
- if success:
- # Note the success and save it along with the marks.
- new_answer.correct = success
- new_answer.marks = question.points
+
+ # If we were not skipped, we were asked to check. For any non-mcq
+ # questions, we obtain the results via XML-RPC with the code executed
+ # safely in a separate process (the code_server.py) running as nobody.
+ if question.type == 'mcq':
+ success = True # Only one attempt allowed for MCQ's.
+ if answer.strip() == question.test.strip():
+ new_answer.correct = True
+ new_answer.marks = question.points
+ new_answer.error = 'Correct answer'
+ else:
+ new_answer.error = 'Incorrect answer'
+ else:
+ user_dir = get_user_dir(user)
+ success, err_msg = code_server.run_code(answer, question.test,
+ user_dir, question.type)
+ new_answer.error = err_msg
+ if success:
+ # Note the success and save it along with the marks.
+ new_answer.correct = success
+ new_answer.marks = question.points
new_answer.save()
- ci = RequestContext(request)
- if not success:
+ if not success: # Should only happen for non-mcq questions.
time_left = paper.time_left()
if time_left == 0:
return complete(request, reason='Your time is up!')
@@ -228,6 +235,7 @@ def check(request, q_id):
'paper': paper, 'last_attempt': answer,
'quiz_name': paper.quiz.description,
'time_left': time_left}
+ ci = RequestContext(request)
return my_render_to_response('exam/question.html', context,
context_instance=ci)
@@ -271,6 +279,7 @@ def monitor(request, quiz_id=None):
quiz = Quiz.objects.get(id=quiz_id)
except Quiz.DoesNotExist:
papers = []
+ quiz = None
else:
papers = QuestionPaper.objects.filter(quiz=quiz,
user__profile__isnull=False)