summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--yaksh/code_evaluator.py30
-rw-r--r--yaksh/models.py35
-rw-r--r--yaksh/python_assertion_evaluator.py10
-rw-r--r--yaksh/templates/yaksh/add_question.html1
-rw-r--r--yaksh/views.py5
-rw-r--r--yaksh/xmlrpc_clients.py4
6 files changed, 59 insertions, 26 deletions
diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py
index 79f616d..b404d57 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/code_evaluator.py
@@ -82,14 +82,14 @@ class CodeEvaluator(object):
Returns
-------
- A tuple: (success, error message).
+ A tuple: (success, error message, marks).
"""
self.setup()
- success, err = self.safe_evaluate(**kwargs)
+ success, error, marks = self.safe_evaluate(**kwargs)
self.teardown()
- result = {'success': success, 'error': err}
+ result = {'success': success, 'error': error, 'marks': marks}
return result
# Private Protocol ##########
@@ -99,7 +99,7 @@ class CodeEvaluator(object):
os.makedirs(self.in_dir)
self._change_dir(self.in_dir)
- def safe_evaluate(self, user_answer, test_case_data, file_paths=None):
+ def safe_evaluate(self, user_answer, partial_grading, test_case_data, file_paths=None):
"""
Handles code evaluation along with compilation, signal handling
and Exception handling
@@ -108,32 +108,40 @@ class CodeEvaluator(object):
# Add a new signal handler for the execution of this code.
prev_handler = create_signal_handler()
success = False
+ error = ""
+ marks = 0.0
# Do whatever testing needed.
try:
for test_case in test_case_data:
success = False
self.compile_code(user_answer, file_paths, **test_case)
- success, err = self.check_code(user_answer, file_paths, **test_case)
- if not success:
- break
+ success, err, test_case_marks = self.check_code(user_answer,
+ file_paths,
+ partial_grading,
+ **test_case
+ )
+ if success:
+ marks += test_case_marks
+ else:
+ error += err + "\n"
except TimeoutException:
- err = self.timeout_msg
+ error = self.timeout_msg
except OSError:
msg = traceback.format_exc(limit=0)
- err = "Error: {0}".format(msg)
+ error = "Error: {0}".format(msg)
except Exception:
exc_type, exc_value, exc_tb = sys.exc_info()
tb_list = traceback.format_exception(exc_type, exc_value, exc_tb)
if len(tb_list) > 2:
del tb_list[1:3]
- err = "Error: {0}".format("".join(tb_list))
+ error = "Error: {0}".format("".join(tb_list))
finally:
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
- return success, err
+ return success, error, marks
def teardown(self):
# Cancel the signal
diff --git a/yaksh/models.py b/yaksh/models.py
index 7f9eead..e7a96c9 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -245,6 +245,9 @@ class Question(models.Model):
# user for particular question
user = models.ForeignKey(User, related_name="user")
+ # Does this question allow partial grading
+ partial_grading = models.BooleanField(default=False)
+
def consolidate_answer_data(self, user_answer):
question_data = {}
test_case_data = []
@@ -257,6 +260,7 @@ class Question(models.Model):
question_data['test_case_data'] = test_case_data
question_data['user_answer'] = user_answer
+ question_data['partial_grading'] = self.partial_grading
files = FileUpload.objects.filter(question=self)
if files:
question_data['file_paths'] = [(file.file.path, file.extract)
@@ -937,11 +941,17 @@ class AnswerPaper(models.Model):
def _update_marks_obtained(self):
"""Updates the total marks earned by student for this paper."""
- marks = sum([x.marks for x in self.answers.filter(marks__gt=0.0)])
- if not marks:
- self.marks_obtained = 0
- else:
- self.marks_obtained = marks
+ # marks = sum([x.marks for x in self.answers.filter(marks__gt=0.0)])
+ # if not marks:
+ # self.marks_obtained = 0
+ # else:
+ # self.marks_obtained = marks
+ marks = 0
+ for question in self.questions.all():
+ max_marks = max([a.marks for a in self.answers.filter(question=question)])
+ marks += max_marks
+ self.marks_obtained = marks
+
def _update_percent(self):
"""Updates the percent gained by the student for this paper."""
@@ -1023,7 +1033,7 @@ class AnswerPaper(models.Model):
For code questions success is True only if the answer is correct.
"""
- result = {'success': True, 'error': 'Incorrect answer'}
+ result = {'success': True, 'error': 'Incorrect answer', 'marks': 0.0}
correct = False
if user_answer is not None:
if question.type == 'mcq':
@@ -1071,11 +1081,16 @@ class AnswerPaper(models.Model):
json_data = question.consolidate_answer_data(answer) \
if question.type == 'code' else None
correct, result = self.validate_answer(answer, question, json_data)
- user_answer.marks = question.points if correct else 0.0
user_answer.correct = correct
user_answer.error = result.get('error')
+ if correct:
+ user_answer.marks = question.points * result['marks'] \
+ if question.partial_grading and question.type == 'code' else question.points
+ else:
+ user_answer.marks = question.points * result['marks'] \
+ if question.partial_grading and question.type == 'code' else 0
user_answer.save()
- self.update_marks('complete')
+ self.update_marks('completed')
return True, msg
def __str__(self):
@@ -1098,9 +1113,11 @@ class TestCase(models.Model):
class StandardTestCase(TestCase):
test_case = models.TextField(blank=True)
+ marks = models.FloatField(default=0.0)
def get_field_value(self):
- return {"test_case": self.test_case}
+ return {"test_case": self.test_case,
+ "marks": self.marks}
def __str__(self):
return u'Question: {0} | Test Case: {1}'.format(self.question,
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index dd1c041..350bc38 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -17,6 +17,7 @@ class PythonAssertionEvaluator(CodeEvaluator):
def setup(self):
super(PythonAssertionEvaluator, self).setup()
self.exec_scope = None
+ self.files = []
def teardown(self):
# Delete the created file.
@@ -24,8 +25,7 @@ class PythonAssertionEvaluator(CodeEvaluator):
delete_files(self.files)
super(PythonAssertionEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, test_case):
- self.files = []
+ def compile_code(self, user_answer, file_paths, test_case, marks):
if file_paths:
self.files = copy_files(file_paths)
if self.exec_scope:
@@ -36,8 +36,9 @@ class PythonAssertionEvaluator(CodeEvaluator):
exec(submitted, self.exec_scope)
return self.exec_scope
- def check_code(self, user_answer, file_paths, test_case):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, marks):
success = False
+ test_case_marks = 0.0
try:
tb = None
_tests = compile(test_case, '<string>', mode='exec')
@@ -53,5 +54,6 @@ class PythonAssertionEvaluator(CodeEvaluator):
else:
success = True
err = 'Correct answer'
+ test_case_marks = float(marks) if partial_grading else 0.0
del tb
- return success, err
+ return success, err, test_case_marks
diff --git a/yaksh/templates/yaksh/add_question.html b/yaksh/templates/yaksh/add_question.html
index c0d53f8..9822333 100644
--- a/yaksh/templates/yaksh/add_question.html
+++ b/yaksh/templates/yaksh/add_question.html
@@ -24,6 +24,7 @@
<tr><td>Description: <td>{{ form.description}} {{form.description.errors}}
<tr><td>Tags: <td>{{ form.tags }}
<tr><td>Snippet: <td>{{ form.snippet }}
+ <tr><td>Partial Grading: <td>{{ form.partial_grading }}
<tr><td> Test Case Type: <td> {{ form.test_case_type }}{{ form.test_case_type.errors }}
<tr><td> File: <td> {{ upload_form.file_field }}{{ upload_form.file_field.errors }}
{% if uploaded_files %}<br><b>Uploaded files:</b><br>Check the box to delete or extract files<br>
diff --git a/yaksh/views.py b/yaksh/views.py
index 1afcef7..2478544 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -517,11 +517,14 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
if question.type == 'code' else None
correct, result = paper.validate_answer(user_answer, question, json_data)
if correct:
+ new_answer.marks = question.points * result['marks'] if question.partial_grading \
+ and question.type == 'code' else question.points
new_answer.correct = correct
- new_answer.marks = question.points
new_answer.error = result.get('error')
else:
new_answer.error = result.get('error')
+ new_answer.marks = question.points * result['marks'] if question.partial_grading \
+ and question.type == 'code' else question.points
new_answer.save()
paper.update_marks('inprogress')
paper.set_end_time(timezone.now())
diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py
index 4da70dd..ff0a2a7 100644
--- a/yaksh/xmlrpc_clients.py
+++ b/yaksh/xmlrpc_clients.py
@@ -62,7 +62,9 @@ class CodeServerProxy(object):
server = self._get_server()
result = server.check_code(language, test_case_type, json_data, user_dir)
except ConnectionError:
- result = json.dumps({'success': False, 'error': 'Unable to connect to any code servers!'})
+ result = json.dumps({'success': False,
+ 'marks': 0.0,
+ 'error': 'Unable to connect to any code servers!'})
return result
def _get_server(self):