summaryrefslogtreecommitdiff
path: root/yaksh/models.py
diff options
context:
space:
mode:
authorankitjavalkar2016-03-11 12:11:49 +0530
committerankitjavalkar2016-05-05 18:59:22 +0530
commit1e993bee18028c59d809f49d853b60e41326991c (patch)
treee1af06404a634e54f9ad8a27c6948b131481b127 /yaksh/models.py
parentceb4f2cbc1a03835a3c7e34d806ec21e47e3f059 (diff)
downloadonline_test-1e993bee18028c59d809f49d853b60e41326991c.tar.gz
online_test-1e993bee18028c59d809f49d853b60e41326991c.tar.bz2
online_test-1e993bee18028c59d809f49d853b60e41326991c.zip
Add a python standard out evaluator
Diffstat (limited to 'yaksh/models.py')
-rw-r--r--yaksh/models.py44
1 files changed, 23 insertions, 21 deletions
diff --git a/yaksh/models.py b/yaksh/models.py
index c4f3561..6fa96bf 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -33,7 +33,7 @@ enrollment_methods = (
test_case_types = (
("assert_based", "Assertion Based Testcase"),
# ("argument_based", "Multiple Correct Choices"),
- # ("stdout_based", "Code"),
+ ("stdout_based", "Stdout Based Testcase"),
)
attempts = [(i, i) for i in range(1, 6)]
@@ -170,6 +170,7 @@ class Question(models.Model):
# The type of evaluator
test_case_type = models.CharField(max_length=24, choices=test_case_types)
+
# Is this question active or not. If it is inactive it will not be used
# when creating a QuestionPaper.
active = models.BooleanField(default=True)
@@ -183,38 +184,39 @@ class Question(models.Model):
# user for particular question
user = models.ForeignKey(User, related_name="user")
- def consolidate_answer_data(self, test_cases, user_answer):
+ def consolidate_answer_data(self, user_answer):
test_case_data_dict = []
question_info_dict = {}
- for test_case in test_cases:
- kw_args_dict = {}
- pos_args_list = []
+ # for test_case in test_cases:
+ # kw_args_dict = {}
+ # pos_args_list = []
- test_case_data = {}
- test_case_data['test_id'] = test_case.id
- test_case_data['func_name'] = test_case.func_name
- test_case_data['expected_answer'] = test_case.expected_answer
+ # test_case_data = {}
+ # test_case_data['test_id'] = test_case.id
+ # test_case_data['func_name'] = test_case.func_name
+ # test_case_data['expected_answer'] = test_case.expected_answer
- if test_case.kw_args:
- for args in test_case.kw_args.split(","):
- arg_name, arg_value = args.split("=")
- kw_args_dict[arg_name.strip()] = arg_value.strip()
+ # if test_case.kw_args:
+ # for args in test_case.kw_args.split(","):
+ # arg_name, arg_value = args.split("=")
+ # kw_args_dict[arg_name.strip()] = arg_value.strip()
- if test_case.pos_args:
- for args in test_case.pos_args.split(","):
- pos_args_list.append(args.strip())
+ # if test_case.pos_args:
+ # for args in test_case.pos_args.split(","):
+ # pos_args_list.append(args.strip())
- test_case_data['kw_args'] = kw_args_dict
- test_case_data['pos_args'] = pos_args_list
- test_case_data_dict.append(test_case_data)
+ # test_case_data['kw_args'] = kw_args_dict
+ # test_case_data['pos_args'] = pos_args_list
+ # test_case_data_dict.append(test_case_data)
# question_info_dict['language'] = self.language
- question_info_dict['id'] = self.id
+ # question_info_dict['id'] = self.id
question_info_dict['user_answer'] = user_answer
- question_info_dict['test_parameter'] = test_case_data_dict
+ # question_info_dict['test_parameter'] = test_case_data_dict
question_info_dict['ref_code_path'] = self.ref_code_path
question_info_dict['test'] = self.test
+ # question_info_dict['test_case_type'] = self.test_case_type
return json.dumps(question_info_dict)