summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xyaksh/c_cpp_files/main2.c2
-rw-r--r--yaksh/cpp_code_evaluator.py3
-rw-r--r--yaksh/grader.py6
-rw-r--r--yaksh/java_code_evaluator.py1
-rw-r--r--yaksh/models.py6
-rw-r--r--yaksh/templates/yaksh/grade_user.html1
-rw-r--r--yaksh/test_models.py2
-rw-r--r--yaksh/views.py53
8 files changed, 37 insertions, 37 deletions
diff --git a/yaksh/c_cpp_files/main2.c b/yaksh/c_cpp_files/main2.c
index ccd1768..a62195f 100755
--- a/yaksh/c_cpp_files/main2.c
+++ b/yaksh/c_cpp_files/main2.c
@@ -13,7 +13,7 @@ void check(T expect,T result)
else
{
printf("\nIncorrect:\n Expected %d got %d \n",expect,result);
- exit (0);
+ exit (1);
}
}
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index 418c655..91ba703 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -15,7 +15,7 @@ class CppCodeEvaluator(BaseEvaluator):
"""Tests the C code obtained from Code Server"""
def __init__(self, metadata, test_case_data):
self.files = []
- self.submit_code_path = self.create_submit_code_file('submit.c')
+ self.submit_code_path = ''
self.compiled_user_answer = None
self.compiled_test_code = None
self.user_output_path = ""
@@ -62,6 +62,7 @@ class CppCodeEvaluator(BaseEvaluator):
ref_code_path = self.test_case
clean_ref_code_path, clean_test_case_path = \
self._set_test_code_file_path(ref_code_path)
+ self.submit_code_path = self.create_submit_code_file('submit.c')
if self.file_paths:
self.files = copy_files(self.file_paths)
if not isfile(clean_ref_code_path):
diff --git a/yaksh/grader.py b/yaksh/grader.py
index 41b3ac1..0c057c2 100644
--- a/yaksh/grader.py
+++ b/yaksh/grader.py
@@ -24,7 +24,6 @@ from .language_registry import create_evaluator_instance
MY_DIR = abspath(dirname(__file__))
-
registry = None
# Raised when the code times-out.
@@ -34,7 +33,7 @@ class TimeoutException(Exception):
@contextlib.contextmanager
def change_dir(path):
- cur_dir = os.getcwd()
+ cur_dir = abspath(dirname(MY_DIR))
os.chdir(path)
try:
yield
@@ -77,7 +76,7 @@ class Grader(object):
self.in_dir = in_dir if in_dir else MY_DIR
- def evaluate(self, kwargs): #language, test_case_type,
+ def evaluate(self, kwargs):
"""Evaluates given code with the test cases based on
given arguments in test_case_data.
@@ -98,7 +97,6 @@ class Grader(object):
A tuple: (success, error, weight).
"""
-
self.setup()
test_case_instances = self.get_evaluator_objects(kwargs)
with change_dir(self.in_dir):
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index ab8160c..91e5840 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -18,6 +18,7 @@ class JavaCodeEvaluator(BaseEvaluator):
self.files = []
self.compiled_user_answer = None
self.compiled_test_code = None
+ self.submit_code_path = ""
self.user_output_path = ""
self.ref_output_path = ""
diff --git a/yaksh/models.py b/yaksh/models.py
index 378d7a1..08feab6 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -1064,20 +1064,20 @@ class AnswerPaper(models.Model):
For code questions success is True only if the answer is correct.
"""
- result = {'success': True, 'error': 'Incorrect answer', 'weight': 0.0}
+ result = {'success': True, 'error': ['Incorrect answer'], 'weight': 0.0}
correct = False
if user_answer is not None:
if question.type == 'mcq':
expected_answer = question.get_test_case(correct=True).options
if user_answer.strip() == expected_answer.strip():
correct = True
- result['error'] = 'Correct answer'
+ result['error'] = ['Correct answer']
elif question.type == 'mcc':
expected_answers = []
for opt in question.get_test_cases(correct=True):
expected_answers.append(opt.options)
if set(user_answer) == set(expected_answers):
- result['error'] = 'Correct answer'
+ result['error'] = ['Correct answer']
correct = True
elif question.type == 'code':
user_dir = self.user.profile.get_user_dir()
diff --git a/yaksh/templates/yaksh/grade_user.html b/yaksh/templates/yaksh/grade_user.html
index 4fc5026..516a6d0 100644
--- a/yaksh/templates/yaksh/grade_user.html
+++ b/yaksh/templates/yaksh/grade_user.html
@@ -156,7 +156,6 @@ Status : <b style="color: green;"> Passed </b><br/>
{% else %}
<h5>Student answer: </h5>
{% for answer in answers %}
- {% if not answer.0.skipped %}
{% if answer.0.correct %}
<div class="panel panel-success">
<div class="panel-heading">Correct:
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index dc69024..e5bd104 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -552,7 +552,7 @@ class AnswerPaperTestCases(unittest.TestCase):
# Then
self.assertTrue(correct)
self.assertTrue(result['success'])
- self.assertEqual(result['error'], 'Correct answer')
+ self.assertEqual(result['error'], ['Correct answer'])
self.answer.correct = True
self.answer.marks = 1
diff --git a/yaksh/views.py b/yaksh/views.py
index 802d9d9..35121e7 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -448,67 +448,68 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
user = request.user
paper = get_object_or_404(AnswerPaper, user=request.user, attempt_number=attempt_num,
question_paper=questionpaper_id)
- question = get_object_or_404(Question, pk=q_id)
- if question in paper.questions_answered.all():
+ current_question = get_object_or_404(Question, pk=q_id)
+ if current_question in paper.questions_answered.all():
next_q = paper.next_question(q_id)
return show_question(request, next_q, paper)
if request.method == 'POST':
snippet_code = request.POST.get('snippet')
# Add the answer submitted, regardless of it being correct or not.
- if question.type == 'mcq':
+ if current_question.type == 'mcq':
user_answer = request.POST.get('answer')
- elif question.type == 'mcc':
+ elif current_question.type == 'mcc':
user_answer = request.POST.getlist('answer')
- elif question.type == 'upload':
+ elif current_question.type == 'upload':
assign = AssignmentUpload()
assign.user = user.profile
- assign.assignmentQuestion = question
+ assign.assignmentQuestion = current_question
# if time-up at upload question then the form is submitted without
# validation
if 'assignment' in request.FILES:
assign.assignmentFile = request.FILES['assignment']
assign.save()
user_answer = 'ASSIGNMENT UPLOADED'
- next_q = paper.completed_question(question.id)
+ next_q = paper.completed_question(current_question.id)
return show_question(request, next_q, paper)
else:
user_code = request.POST.get('answer')
user_answer = snippet_code + "\n" + user_code if snippet_code else user_code
- new_answer = Answer(question=question, answer=user_answer,
+ if not user_answer:
+ msg = ["Please submit a valid option or code"]
+ return show_question(request, current_question, paper, msg)
+ new_answer = Answer(question=current_question, answer=user_answer,
correct=False)
new_answer.save()
paper.answers.add(new_answer)
- if not user_answer:
- msg = "Please submit a valid option or code"
- return show_question(request, question, paper, msg)
# If we were not skipped, we were asked to check. For any non-mcq
# questions, we obtain the results via XML-RPC with the code executed
# safely in a separate process (the code_server.py) running as nobody.
- json_data = question.consolidate_answer_data(user_answer) \
- if question.type == 'code' else None
- correct, result = paper.validate_answer(user_answer, question, json_data)
+ json_data = current_question.consolidate_answer_data(user_answer) \
+ if current_question.type == 'code' else None
+ correct, result = paper.validate_answer(user_answer, current_question, json_data)
if correct or result.get('success'):
- new_answer.marks = (question.points * result['weight'] /
- question.get_maximum_test_case_weight()) \
- if question.partial_grading and question.type == 'code' else question.points
+ new_answer.marks = (current_question.points * result['weight'] /
+ current_question.get_maximum_test_case_weight()) \
+ if current_question.partial_grading and current_question.type == 'code' else current_question.points
new_answer.correct = correct
+ error_message = None
new_answer.error = json.dumps(result.get('error'))
+ next_question = paper.completed_question(current_question.id)
else:
+ new_answer.marks = (current_question.points * result['weight'] /
+ current_question.get_maximum_test_case_weight()) \
+ if current_question.partial_grading and current_question.type == 'code' else 0
+ error_message = result.get('error')
new_answer.error = json.dumps(result.get('error'))
- new_answer.marks = (question.points * result['weight'] /
- question.get_maximum_test_case_weight()) \
- if question.partial_grading and question.type == 'code' else 0
+ next_question = current_question if current_question.type == 'code' \
+ else paper.completed_question(current_question.id)
new_answer.save()
paper.update_marks('inprogress')
paper.set_end_time(timezone.now())
- if question.type == 'code': # Should only happen for non-mcq questions.
- return show_question(request, question, paper, result.get('error'))
- else:
- next_q = paper.completed_question(question.id)
- return show_question(request, next_q, paper)
+ return show_question(request, next_question, paper, error_message)
else:
- return show_question(request, question, paper)
+ return show_question(request, current_question, paper)