summaryrefslogtreecommitdiff
path: root/yaksh/models.py
diff options
context:
space:
mode:
Diffstat (limited to 'yaksh/models.py')
-rw-r--r--yaksh/models.py64
1 files changed, 50 insertions, 14 deletions
diff --git a/yaksh/models.py b/yaksh/models.py
index d65970b..ca41885 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1,3 +1,4 @@
+from __future__ import unicode_literals
from datetime import datetime, timedelta
import json
from random import sample, shuffle
@@ -22,6 +23,7 @@ from os.path import join, abspath, dirname, exists
import shutil
import zipfile
import tempfile
+from textwrap import dedent
from .file_utils import extract_files, delete_files
from yaksh.xmlrpc_clients import code_server
from django.conf import settings
@@ -65,6 +67,43 @@ test_status = (
('completed', 'Completed'),
)
+instructions_data = dedent("""\
+ <p>
+ This examination system has been developed with the intention of
+ making you learn programming and be assessed in an interactive and
+ fun manner. You will be presented with a series of programming questions
+ and problems that you will answer online and get immediate
+ feedback for.
+ </p>
+ <p>
+ Here are some important instructions and rules that you should
+ understand carefully.</p>
+ <ul>
+ <li>For any programming questions, you can submit solutions as many
+ times as you want without a penalty. You may skip questions
+ and solve them later.</li>
+ <li> You <strong>may</strong> use your computer's Python/IPython
+ shell or an editor to solve the problem and cut/paste the
+ solution to the web interface.
+ </li>
+ <li> <strong>You are not allowed to use any internet resources,
+ i.e. no google etc.</strong>
+ </li>
+ <li> Do not copy or share the questions or answers with anyone
+ until the exam is complete <strong>for everyone</strong>.
+ </li>
+ <li> <strong>All</strong> your attempts at the questions are logged.
+ Do not try to outsmart and break the testing system.
+ If you do, we know who you are and we will expel you from the
+ course. You have been warned.
+ </li>
+ </ul>
+ <p>
+ We hope you enjoy taking this
+ exam !!!
+ </p>
+ """)
+
def get_assignment_dir(instance, filename):
return '%s/%s/%s' % (instance.user.user, instance.assignmentQuestion.id, filename)
@@ -351,7 +390,7 @@ class Question(models.Model):
for file_name, extract in file_names:
q_file = os.path.join(path, file_name)
if os.path.exists(q_file):
- que_file = open(q_file, 'r')
+ que_file = open(q_file, 'rb')
# Converting to Python file object with
# some Django-specific additions
django_file = File(que_file)
@@ -378,8 +417,7 @@ class Question(models.Model):
self.load_questions(questions_list, user, file_path, files)
def create_demo_questions(self, user):
- zip_file_path = os.path.join(os.getcwd(), 'yaksh',
- 'fixtures', 'demo_questions.zip')
+ zip_file_path = os.path.join(settings.FIXTURE_DIRS, 'demo_questions.zip')
files, extract_path = extract_files(zip_file_path)
self.read_json(extract_path, user, files)
@@ -564,6 +602,7 @@ class Quiz(models.Model):
end_date_time=timezone.now() + timedelta(176590),
duration=30, active=True,
attempts_allowed=-1,
+ instructions=instructions_data,
time_between_attempts=0,
description='Yaksh Demo quiz', pass_criteria=0,
language='Python', prerequisite=None,
@@ -702,7 +741,7 @@ class QuestionPaper(models.Model):
def create_demo_quiz_ppr(self, demo_quiz, user):
question_paper = QuestionPaper.objects.create(quiz=demo_quiz,
- total_marks=7.0,
+ total_marks=6.0,
shuffle_questions=True
)
questions = Question.objects.filter(active=True,
@@ -1059,28 +1098,25 @@ class AnswerPaper(models.Model):
For code questions success is True only if the answer is correct.
"""
- result = {'success': True, 'error': ['Incorrect answer'], 'weight': 0.0}
- correct = False
+ result = {'success': False, 'error': ['Incorrect answer'], 'weight': 0.0}
if user_answer is not None:
if question.type == 'mcq':
expected_answer = question.get_test_case(correct=True).options
if user_answer.strip() == expected_answer.strip():
- correct = True
+ result['success'] = True
result['error'] = ['Correct answer']
elif question.type == 'mcc':
expected_answers = []
for opt in question.get_test_cases(correct=True):
expected_answers.append(opt.options)
if set(user_answer) == set(expected_answers):
+ result['success'] = True
result['error'] = ['Correct answer']
- correct = True
elif question.type == 'code':
user_dir = self.user.profile.get_user_dir()
json_result = code_server.run_code(question.language, json_data, user_dir)
result = json.loads(json_result)
- if result.get('success'):
- correct = True
- return correct, result
+ return result
def regrade(self, question_id):
try:
@@ -1105,10 +1141,10 @@ class AnswerPaper(models.Model):
answer = user_answer.answer
json_data = question.consolidate_answer_data(answer) \
if question.type == 'code' else None
- correct, result = self.validate_answer(answer, question, json_data)
- user_answer.correct = correct
+ result = self.validate_answer(answer, question, json_data)
+ user_answer.correct = result.get('success')
user_answer.error = result.get('error')
- if correct:
+ if result.get('success'):
user_answer.marks = (question.points * result['weight'] /
question.get_maximum_test_case_weight()) \
if question.partial_grading and question.type == 'code' else question.points