summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--yaksh/bash_code_evaluator.py12
-rw-r--r--yaksh/bash_stdio_evaluator.py10
-rw-r--r--yaksh/code_evaluator.py14
-rw-r--r--yaksh/cpp_code_evaluator.py10
-rw-r--r--yaksh/cpp_stdio_evaluator.py10
-rw-r--r--yaksh/evaluator_tests/test_bash_evaluation.py12
-rw-r--r--yaksh/evaluator_tests/test_c_cpp_evaluation.py18
-rw-r--r--yaksh/evaluator_tests/test_java_evaluation.py14
-rw-r--r--yaksh/evaluator_tests/test_python_evaluation.py26
-rw-r--r--yaksh/evaluator_tests/test_scilab_evaluation.py2
-rw-r--r--yaksh/java_code_evaluator.py10
-rw-r--r--yaksh/java_stdio_evaluator.py10
-rw-r--r--yaksh/models.py26
-rw-r--r--yaksh/python_assertion_evaluator.py10
-rw-r--r--yaksh/python_stdio_evaluator.py10
-rw-r--r--yaksh/scilab_code_evaluator.py8
-rw-r--r--yaksh/test_models.py4
-rw-r--r--yaksh/tests/test_code_server.py8
-rw-r--r--yaksh/views.py8
-rw-r--r--yaksh/xmlrpc_clients.py6
20 files changed, 114 insertions, 114 deletions
diff --git a/yaksh/bash_code_evaluator.py b/yaksh/bash_code_evaluator.py
index dd4445c..7575725 100644
--- a/yaksh/bash_code_evaluator.py
+++ b/yaksh/bash_code_evaluator.py
@@ -28,7 +28,7 @@ class BashCodeEvaluator(CodeEvaluator):
delete_files(self.files)
super(BashCodeEvaluator, self).teardown()
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
""" Function validates student script using instructor script as
reference. Test cases can optionally be provided. The first argument
ref_path, is the path to instructor script, it is assumed to
@@ -52,7 +52,7 @@ class BashCodeEvaluator(CodeEvaluator):
"""
ref_code_path = test_case
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
get_ref_path, get_test_case_path = ref_code_path.strip().split(',')
get_ref_path = get_ref_path.strip()
@@ -92,8 +92,8 @@ class BashCodeEvaluator(CodeEvaluator):
)
proc, stdnt_stdout, stdnt_stderr = ret
if inst_stdout == stdnt_stdout:
- test_case_weightage = float(weightage) if partial_grading else 0.0
- return True, "Correct answer", test_case_weightage
+ test_case_weight = float(weight) if partial_grading else 0.0
+ return True, "Correct answer", test_case_weight
else:
err = "Error: expected %s, got %s" % (inst_stderr,
stdnt_stderr
@@ -135,8 +135,8 @@ class BashCodeEvaluator(CodeEvaluator):
proc, stdnt_stdout, stdnt_stderr = ret
valid_answer = inst_stdout == stdnt_stdout
if valid_answer and (num_lines == loop_count):
- test_case_weightage = float(weightage) if partial_grading else 0.0
- return True, "Correct answer", test_case_weightage
+ test_case_weight = float(weight) if partial_grading else 0.0
+ return True, "Correct answer", test_case_weight
else:
err = ("Error:expected"
" {0}, got {1}").format(inst_stdout+inst_stderr,
diff --git a/yaksh/bash_stdio_evaluator.py b/yaksh/bash_stdio_evaluator.py
index fab19bf..1dd9fd5 100644
--- a/yaksh/bash_stdio_evaluator.py
+++ b/yaksh/bash_stdio_evaluator.py
@@ -23,7 +23,7 @@ class BashStdioEvaluator(StdIOEvaluator):
delete_files(self.files)
super(BashStdioEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage):
+ def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
if file_paths:
self.files = copy_files(file_paths)
if not isfile(self.submit_code_path):
@@ -34,9 +34,9 @@ class BashStdioEvaluator(StdIOEvaluator):
self.write_to_submit_code_file(self.submit_code_path, user_answer)
def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weightage):
+ expected_input, expected_output, weight):
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
expected_input = str(expected_input).replace('\r', '')
proc = subprocess.Popen("bash ./Test.sh",
@@ -49,5 +49,5 @@ class BashStdioEvaluator(StdIOEvaluator):
expected_input,
expected_output
)
- test_case_weightage = float(weightage) if partial_grading and success else 0.0
- return success, err, test_case_weightage
+ test_case_weight = float(weight) if partial_grading and success else 0.0
+ return success, err, test_case_weight
diff --git a/yaksh/code_evaluator.py b/yaksh/code_evaluator.py
index b39a1d7..50fc546 100644
--- a/yaksh/code_evaluator.py
+++ b/yaksh/code_evaluator.py
@@ -82,14 +82,14 @@ class CodeEvaluator(object):
Returns
-------
- A tuple: (success, error message, weightage).
+ A tuple: (success, error message, weight).
"""
self.setup()
- success, error, weightage = self.safe_evaluate(**kwargs)
+ success, error, weight = self.safe_evaluate(**kwargs)
self.teardown()
- result = {'success': success, 'error': error, 'weightage': weightage}
+ result = {'success': success, 'error': error, 'weight': weight}
return result
# Private Protocol ##########
@@ -109,20 +109,20 @@ class CodeEvaluator(object):
prev_handler = create_signal_handler()
success = False
error = ""
- weightage = 0
+ weight = 0
# Do whatever testing needed.
try:
for test_case in test_case_data:
success = False
self.compile_code(user_answer, file_paths, **test_case)
- success, err, test_case_weightage = self.check_code(user_answer,
+ success, err, test_case_weight = self.check_code(user_answer,
file_paths,
partial_grading,
**test_case
)
if success:
- weightage += test_case_weightage
+ weight += test_case_weight
error = err
else:
error += err + "\n"
@@ -142,7 +142,7 @@ class CodeEvaluator(object):
# Set back any original signal handler.
set_original_signal_handler(prev_handler)
- return success, error, weightage
+ return success, error, weight
def teardown(self):
# Cancel the signal
diff --git a/yaksh/cpp_code_evaluator.py b/yaksh/cpp_code_evaluator.py
index f069b03..716a522 100644
--- a/yaksh/cpp_code_evaluator.py
+++ b/yaksh/cpp_code_evaluator.py
@@ -50,7 +50,7 @@ class CppCodeEvaluator(CodeEvaluator):
ref_output_path)
return compile_command, compile_main
- def compile_code(self, user_answer, file_paths, test_case, weightage):
+ def compile_code(self, user_answer, file_paths, test_case, weight):
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
@@ -89,7 +89,7 @@ class CppCodeEvaluator(CodeEvaluator):
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -109,7 +109,7 @@ class CppCodeEvaluator(CodeEvaluator):
if the required permissions are not given to the file(s).
"""
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
@@ -129,7 +129,7 @@ class CppCodeEvaluator(CodeEvaluator):
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
- test_case_weightage = float(weightage) if partial_grading else 0.0
+ test_case_weight = float(weight) if partial_grading else 0.0
else:
err = "{0} \n {1}".format(stdout, stderr)
else:
@@ -155,4 +155,4 @@ class CppCodeEvaluator(CodeEvaluator):
except:
err = "{0} \n {1}".format(err, stdnt_stderr)
- return success, err, test_case_weightage
+ return success, err, test_case_weight
diff --git a/yaksh/cpp_stdio_evaluator.py b/yaksh/cpp_stdio_evaluator.py
index 050cec8..00fad92 100644
--- a/yaksh/cpp_stdio_evaluator.py
+++ b/yaksh/cpp_stdio_evaluator.py
@@ -35,7 +35,7 @@ class CppStdioEvaluator(StdIOEvaluator):
ref_output_path)
return compile_command, compile_main
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage):
+ def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
if file_paths:
self.files = copy_files(file_paths)
if not isfile(self.submit_code_path):
@@ -62,9 +62,9 @@ class CppStdioEvaluator(StdIOEvaluator):
return self.compiled_user_answer, self.compiled_test_code
def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weightage):
+ expected_input, expected_output, weight):
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
@@ -106,5 +106,5 @@ class CppStdioEvaluator(StdIOEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- test_case_weightage = float(weightage) if partial_grading and success else 0.0
- return success, err, test_case_weightage
+ test_case_weight = float(weight) if partial_grading and success else 0.0
+ return success, err, test_case_weight
diff --git a/yaksh/evaluator_tests/test_bash_evaluation.py b/yaksh/evaluator_tests/test_bash_evaluation.py
index 6c90d3c..df24d4d 100644
--- a/yaksh/evaluator_tests/test_bash_evaluation.py
+++ b/yaksh/evaluator_tests/test_bash_evaluation.py
@@ -15,7 +15,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
self.test_case_data = [
{"test_case": "bash_files/sample.sh,bash_files/sample.args",
- "weightage": 0.0
+ "weight": 0.0
}
]
self.in_dir = tempfile.mkdtemp()
@@ -72,7 +72,7 @@ class BashAssertionEvaluationTestCases(unittest.TestCase):
self.file_paths = [('/tmp/test.txt', False)]
self.test_case_data = [
{"test_case": "bash_files/sample1.sh,bash_files/sample1.args",
- "weightage": 0.0
+ "weight": 0.0
}
]
user_answer = ("#!/bin/bash\ncat $1")
@@ -102,7 +102,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
- 'weightage': 0.0
+ 'weight': 0.0
}]
get_class = BashStdioEvaluator()
kwargs = {"user_answer": user_answer,
@@ -124,7 +124,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '1 2 3\n4 5 6\n7 8 9\n',
'expected_input': '1,2,3\n4,5,6\n7,8,9',
- 'weightage': 0.0
+ 'weight': 0.0
}]
get_class = BashStdioEvaluator()
kwargs = {"user_answer": user_answer,
@@ -144,7 +144,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
- 'weightage': 0.0
+ 'weight': 0.0
}]
get_class = BashStdioEvaluator()
kwargs = {"user_answer": user_answer,
@@ -164,7 +164,7 @@ class BashStdioEvaluationTestCases(unittest.TestCase):
)
test_case_data = [{'expected_output': '10',
'expected_input': '',
- 'weightage': 0.0
+ 'weight': 0.0
}]
get_class = BashStdioEvaluator()
kwargs = {"user_answer": user_answer,
diff --git a/yaksh/evaluator_tests/test_c_cpp_evaluation.py b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
index f58833a..87b2653 100644
--- a/yaksh/evaluator_tests/test_c_cpp_evaluation.py
+++ b/yaksh/evaluator_tests/test_c_cpp_evaluation.py
@@ -15,7 +15,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [{"test_case": "c_cpp_files/main.cpp",
- "weightage": 0.0
+ "weight": 0.0
}]
self.in_dir = tmp_in_dir_path
self.timeout_msg = ("Code took more than {0} seconds to run. "
@@ -80,7 +80,7 @@ class CAssertionEvaluationTestCases(unittest.TestCase):
def test_file_based_assert(self):
self.file_paths = [('/tmp/test.txt', False)]
self.test_case_data = [{"test_case": "c_cpp_files/file_data.c",
- "weightage": 0.0
+ "weight": 0.0
}]
user_answer = dedent("""
#include<stdio.h>
@@ -108,7 +108,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
def setUp(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
- 'weightage': 0.0
+ 'weight': 0.0
}]
self.in_dir = tempfile.mkdtemp()
self.timeout_msg = ("Code took more than {0} seconds to run. "
@@ -135,7 +135,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
def test_array_input(self):
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
#include<stdio.h>
@@ -158,7 +158,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
def test_string_input(self):
self.test_case_data = [{'expected_output': 'abc',
'expected_input': 'abc',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
#include<stdio.h>
@@ -229,7 +229,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
#include<stdio.h>
@@ -267,7 +267,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
def test_cpp_array_input(self):
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
#include<iostream>
@@ -291,7 +291,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
def test_cpp_string_input(self):
self.test_case_data = [{'expected_output': 'abc',
'expected_input': 'abc',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
#include<iostream>
@@ -366,7 +366,7 @@ class CppStdioEvaluationTestCases(unittest.TestCase):
def test_cpp_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
#include<iostream>
diff --git a/yaksh/evaluator_tests/test_java_evaluation.py b/yaksh/evaluator_tests/test_java_evaluation.py
index 142f0bf..d410052 100644
--- a/yaksh/evaluator_tests/test_java_evaluation.py
+++ b/yaksh/evaluator_tests/test_java_evaluation.py
@@ -17,7 +17,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [
{"test_case": "java_files/main_square.java",
- "weightage": 0.0
+ "weight": 0.0
}
]
self.in_dir = tmp_in_dir_path
@@ -86,7 +86,7 @@ class JavaAssertionEvaluationTestCases(unittest.TestCase):
self.file_paths = [("/tmp/test.txt", False)]
self.test_case_data = [
{"test_case": "java_files/read_file.java",
- "weightage": 0.0
+ "weight": 0.0
}
]
user_answer = dedent("""
@@ -126,7 +126,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
self.in_dir = tmp_in_dir_path
self.test_case_data = [{'expected_output': '11',
'expected_input': '5\n6',
- 'weightage': 0.0
+ 'weight': 0.0
}]
evaluator.SERVER_TIMEOUT = 4
self.timeout_msg = ("Code took more than {0} seconds to run. "
@@ -161,7 +161,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
self.test_case_data = [{'expected_output': '561',
'expected_input': '5\n6\n1',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
import java.util.Scanner;
@@ -241,7 +241,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
def test_only_stdout(self):
self.test_case_data = [{'expected_output': '11',
'expected_input': '',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
class Test
@@ -262,7 +262,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
def test_string_input(self):
self.test_case_data = [{'expected_output': 'HelloWorld',
'expected_input': 'Hello\nWorld',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
import java.util.Scanner;
@@ -286,7 +286,7 @@ class JavaStdioEvaluationTestCases(unittest.TestCase):
self.file_paths = [("/tmp/test.txt", False)]
self.test_case_data = [{'expected_output': '2',
'expected_input': '',
- 'weightage': 0.0
+ 'weight': 0.0
}]
user_answer = dedent("""
import java.io.BufferedReader;
diff --git a/yaksh/evaluator_tests/test_python_evaluation.py b/yaksh/evaluator_tests/test_python_evaluation.py
index 690f474..a0e3713 100644
--- a/yaksh/evaluator_tests/test_python_evaluation.py
+++ b/yaksh/evaluator_tests/test_python_evaluation.py
@@ -17,9 +17,9 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
f.write('2'.encode('ascii'))
tmp_in_dir_path = tempfile.mkdtemp()
self.in_dir = tmp_in_dir_path
- self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weightage': 0.0},
- {"test_case": 'assert(add(-1,2)==1)', 'weightage': 0.0},
- {"test_case": 'assert(add(-1,-2)==-3)', 'weightage': 0.0},
+ self.test_case_data = [{"test_case": 'assert(add(1,2)==3)', 'weight': 0.0},
+ {"test_case": 'assert(add(-1,2)==1)', 'weight': 0.0},
+ {"test_case": 'assert(add(-1,-2)==-3)', 'weight': 0.0},
]
self.timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
@@ -260,7 +260,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
def test_file_based_assert(self):
# Given
- self.test_case_data = [{"test_case": "assert(ans()=='2')", "weightage": 0.0}]
+ self.test_case_data = [{"test_case": "assert(ans()=='2')", "weight": 0.0}]
self.file_paths = [('/tmp/test.txt', False)]
user_answer = dedent("""
def ans():
@@ -287,7 +287,7 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
test_case_data = [{"test_case": 's="abbb"\nasert palindrome(s)==False',
- "weightage": 0.0
+ "weight": 0.0
}
]
syntax_error_msg = ["Traceback",
@@ -322,10 +322,10 @@ class PythonAssertionEvaluationTestCases(unittest.TestCase):
# Given
user_answer = "def palindrome(a):\n\treturn a == a[::-1]"
test_case_data = [{"test_case": 'assert(palindrome("abba")==True)',
- "weightage": 0.0
+ "weight": 0.0
},
{"test_case": 's="abbb"\nassert palindrome(S)==False',
- "weightage": 0.0
+ "weight": 0.0
}
]
name_error_msg = ["Traceback",
@@ -364,7 +364,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Given
self.test_case_data = [{"expected_input": "1\n2",
"expected_output": "3",
- "weightage": 0.0
+ "weight": 0.0
}]
user_answer = dedent("""
a = int(input())
@@ -389,7 +389,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Given
self.test_case_data = [{"expected_input": "1,2,3\n5,6,7",
"expected_output": "[1, 2, 3, 5, 6, 7]",
- "weightage": 0.0
+ "weight": 0.0
}]
user_answer = dedent("""
from six.moves import input
@@ -417,7 +417,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Given
self.test_case_data = [{"expected_input": ("the quick brown fox jumps over the lazy dog\nthe"),
"expected_output": "2",
- "weightage": 0.0
+ "weight": 0.0
}]
user_answer = dedent("""
from six.moves import input
@@ -443,7 +443,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Given
self.test_case_data = [{"expected_input": "1\n2",
"expected_output": "3",
- "weightage": 0.0
+ "weight": 0.0
}]
user_answer = dedent("""
a = int(input())
@@ -468,7 +468,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Given
self.test_case_data = [{"expected_input": "",
"expected_output": "2",
- "weightage": 0.0
+ "weight": 0.0
}]
self.file_paths = [('/tmp/test.txt', False)]
@@ -496,7 +496,7 @@ class PythonStdIOEvaluationTestCases(unittest.TestCase):
# Given
test_case_data = [{"expected_input": "1\n2",
"expected_output": "3",
- "weightage": 0.0
+ "weight": 0.0
}]
timeout_msg = ("Code took more than {0} seconds to run. "
"You probably have an infinite loop in"
diff --git a/yaksh/evaluator_tests/test_scilab_evaluation.py b/yaksh/evaluator_tests/test_scilab_evaluation.py
index bc03e04..5f02601 100644
--- a/yaksh/evaluator_tests/test_scilab_evaluation.py
+++ b/yaksh/evaluator_tests/test_scilab_evaluation.py
@@ -12,7 +12,7 @@ class ScilabEvaluationTestCases(unittest.TestCase):
def setUp(self):
tmp_in_dir_path = tempfile.mkdtemp()
self.test_case_data = [{"test_case": "scilab_files/test_add.sce",
- "weightage": 0.0
+ "weight": 0.0
}]
self.in_dir = tmp_in_dir_path
self.timeout_msg = ("Code took more than {0} seconds to run. "
diff --git a/yaksh/java_code_evaluator.py b/yaksh/java_code_evaluator.py
index 05e6405..d87e6e3 100644
--- a/yaksh/java_code_evaluator.py
+++ b/yaksh/java_code_evaluator.py
@@ -47,7 +47,7 @@ class JavaCodeEvaluator(CodeEvaluator):
output_path = "{0}{1}.class".format(directory, file_name)
return output_path
- def compile_code(self, user_answer, file_paths, test_case, weightage):
+ def compile_code(self, user_answer, file_paths, test_case, weight):
if self.compiled_user_answer and self.compiled_test_code:
return None
else:
@@ -96,7 +96,7 @@ class JavaCodeEvaluator(CodeEvaluator):
return self.compiled_user_answer, self.compiled_test_code
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
""" Function validates student code using instructor code as
reference.The first argument ref_code_path, is the path to
instructor code, it is assumed to have executable permission.
@@ -117,7 +117,7 @@ class JavaCodeEvaluator(CodeEvaluator):
"""
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
@@ -136,7 +136,7 @@ class JavaCodeEvaluator(CodeEvaluator):
proc, stdout, stderr = ret
if proc.returncode == 0:
success, err = True, "Correct answer"
- test_case_weightage = float(weightage) if partial_grading else 0.0
+ test_case_weight = float(weight) if partial_grading else 0.0
else:
err = stdout + "\n" + stderr
else:
@@ -161,4 +161,4 @@ class JavaCodeEvaluator(CodeEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- return success, err, test_case_weightage
+ return success, err, test_case_weight
diff --git a/yaksh/java_stdio_evaluator.py b/yaksh/java_stdio_evaluator.py
index bc50744..88d4c88 100644
--- a/yaksh/java_stdio_evaluator.py
+++ b/yaksh/java_stdio_evaluator.py
@@ -31,7 +31,7 @@ class JavaStdioEvaluator(StdIOEvaluator):
compile_command = 'javac {0}'.format(self.submit_code_path)
return compile_command
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage):
+ def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
if not isfile(self.submit_code_path):
msg = "No file at %s or Incorrect path" % self.submit_code_path
return False, msg
@@ -51,9 +51,9 @@ class JavaStdioEvaluator(StdIOEvaluator):
return self.compiled_user_answer
def check_code(self, user_answer, file_paths, partial_grading,
- expected_input, expected_output, weightage):
+ expected_input, expected_output, weight):
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
proc, stdnt_out, stdnt_stderr = self.compiled_user_answer
stdnt_stderr = self._remove_null_substitute_char(stdnt_stderr)
if stdnt_stderr == '' or "error" not in stdnt_stderr:
@@ -79,5 +79,5 @@ class JavaStdioEvaluator(StdIOEvaluator):
err = err + "\n" + e
except:
err = err + "\n" + stdnt_stderr
- test_case_weightage = float(weightage) if partial_grading and success else 0.0
- return success, err, test_case_weightage
+ test_case_weight = float(weight) if partial_grading and success else 0.0
+ return success, err, test_case_weight
diff --git a/yaksh/models.py b/yaksh/models.py
index bdcc43e..8907df0 100644
--- a/yaksh/models.py
+++ b/yaksh/models.py
@@ -325,12 +325,12 @@ class Question(models.Model):
return test_case
- def get_maximum_test_case_weightage(self, **kwargs):
- max_weightage = 0.0
+ def get_maximum_test_case_weight(self, **kwargs):
+ max_weight = 0.0
for test_case in self.get_test_cases():
- max_weightage += test_case.weightage
+ max_weight += test_case.weight
- return max_weightage
+ return max_weight
def _add_and_get_files(self, zip_file):
files = FileUpload.objects.filter(question=self)
@@ -1036,7 +1036,7 @@ class AnswerPaper(models.Model):
For code questions success is True only if the answer is correct.
"""
- result = {'success': True, 'error': 'Incorrect answer', 'weightage': 0.0}
+ result = {'success': True, 'error': 'Incorrect answer', 'weight': 0.0}
correct = False
if user_answer is not None:
if question.type == 'mcq':
@@ -1087,12 +1087,12 @@ class AnswerPaper(models.Model):
user_answer.correct = correct
user_answer.error = result.get('error')
if correct:
- user_answer.marks = (question.points * result['weightage'] /
- question.get_maximum_test_case_weightage()) \
+ user_answer.marks = (question.points * result['weight'] /
+ question.get_maximum_test_case_weight()) \
if question.partial_grading and question.type == 'code' else question.points
else:
- user_answer.marks = (question.points * result['weightage'] /
- question.get_maximum_test_case_weightage()) \
+ user_answer.marks = (question.points * result['weight'] /
+ question.get_maximum_test_case_weight()) \
if question.partial_grading and question.type == 'code' else 0
user_answer.save()
self.update_marks('completed')
@@ -1118,11 +1118,11 @@ class TestCase(models.Model):
class StandardTestCase(TestCase):
test_case = models.TextField(blank=True)
- weightage = models.FloatField(default=0.0)
+ weight = models.FloatField(default=0.0)
def get_field_value(self):
return {"test_case": self.test_case,
- "weightage": self.weightage}
+ "weight": self.weight}
def __str__(self):
return u'Question: {0} | Test Case: {1}'.format(self.question,
@@ -1133,12 +1133,12 @@ class StandardTestCase(TestCase):
class StdioBasedTestCase(TestCase):
expected_input = models.TextField(blank=True)
expected_output = models.TextField()
- weightage = models.IntegerField(default=0.0)
+ weight = models.IntegerField(default=0.0)
def get_field_value(self):
return {"expected_output": self.expected_output,
"expected_input": self.expected_input,
- "weightage": self.weightage}
+ "weight": self.weight}
def __str__(self):
return u'Question: {0} | Exp. Output: {1} | Exp. Input: {2}'.format(self.question,
diff --git a/yaksh/python_assertion_evaluator.py b/yaksh/python_assertion_evaluator.py
index 6503566..8924643 100644
--- a/yaksh/python_assertion_evaluator.py
+++ b/yaksh/python_assertion_evaluator.py
@@ -25,7 +25,7 @@ class PythonAssertionEvaluator(CodeEvaluator):
delete_files(self.files)
super(PythonAssertionEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, test_case, weightage):
+ def compile_code(self, user_answer, file_paths, test_case, weight):
if file_paths:
self.files = copy_files(file_paths)
if self.exec_scope:
@@ -36,9 +36,9 @@ class PythonAssertionEvaluator(CodeEvaluator):
exec(submitted, self.exec_scope)
return self.exec_scope
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
try:
tb = None
_tests = compile(test_case, '<string>', mode='exec')
@@ -54,6 +54,6 @@ class PythonAssertionEvaluator(CodeEvaluator):
else:
success = True
err = 'Correct answer'
- test_case_weightage = float(weightage) if partial_grading else 0.0
+ test_case_weight = float(weight) if partial_grading else 0.0
del tb
- return success, err, test_case_weightage
+ return success, err, test_case_weight
diff --git a/yaksh/python_stdio_evaluator.py b/yaksh/python_stdio_evaluator.py
index cd8c52a..1506685 100644
--- a/yaksh/python_stdio_evaluator.py
+++ b/yaksh/python_stdio_evaluator.py
@@ -42,7 +42,7 @@ class PythonStdioEvaluator(CodeEvaluator):
super(PythonStdioEvaluator, self).teardown()
- def compile_code(self, user_answer, file_paths, expected_input, expected_output, weightage):
+ def compile_code(self, user_answer, file_paths, expected_input, expected_output, weight):
if file_paths:
self.files = copy_files(file_paths)
submitted = compile(user_answer, '<string>', mode='exec')
@@ -58,15 +58,15 @@ class PythonStdioEvaluator(CodeEvaluator):
return self.output_value
def check_code(self, user_answer, file_paths, partial_grading, expected_input,
- expected_output, weightage):
+ expected_output, weight):
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
tb = None
if self.output_value == expected_output:
success = True
err = "Correct answer"
- test_case_weightage = weightage
+ test_case_weight = weight
else:
success = False
err = dedent("""
@@ -80,4 +80,4 @@ class PythonStdioEvaluator(CodeEvaluator):
)
)
del tb
- return success, err, test_case_weightage
+ return success, err, test_case_weight
diff --git a/yaksh/scilab_code_evaluator.py b/yaksh/scilab_code_evaluator.py
index 927b84d..3c2d44c 100644
--- a/yaksh/scilab_code_evaluator.py
+++ b/yaksh/scilab_code_evaluator.py
@@ -27,7 +27,7 @@ class ScilabCodeEvaluator(CodeEvaluator):
delete_files(self.files)
super(ScilabCodeEvaluator, self).teardown()
- def check_code(self, user_answer, file_paths, partial_grading, test_case, weightage):
+ def check_code(self, user_answer, file_paths, partial_grading, test_case, weight):
if file_paths:
self.files = copy_files(file_paths)
ref_code_path = test_case
@@ -37,7 +37,7 @@ class ScilabCodeEvaluator(CodeEvaluator):
self._remove_scilab_exit(user_answer.lstrip())
success = False
- test_case_weightage = 0.0
+ test_case_weight = 0.0
self.write_to_submit_code_file(self.submit_code_path, user_answer)
# Throw message if there are commmands that terminates scilab
@@ -65,12 +65,12 @@ class ScilabCodeEvaluator(CodeEvaluator):
stdout = self._strip_output(stdout)
if proc.returncode == 5:
success, err = True, "Correct answer"
- test_case_weightage = float(weightage) if partial_grading else 0.0
+ test_case_weight = float(weight) if partial_grading else 0.0
else:
err = add_err + stdout
else:
err = add_err + stderr
- return success, err, test_case_weightage
+ return success, err, test_case_weight
def _remove_scilab_exit(self, string):
"""
diff --git a/yaksh/test_models.py b/yaksh/test_models.py
index d05fac3..e7f3016 100644
--- a/yaksh/test_models.py
+++ b/yaksh/test_models.py
@@ -142,7 +142,7 @@ class QuestionTestCases(unittest.TestCase):
self.upload_test_case.save()
self.user_answer = "demo_answer"
self.test_case_upload_data = [{"test_case": "assert fact(3)==6",
- "weightage": 0.0
+ "weight": 0.0
}]
questions_data = [{"snippet": "def fact()", "active": True,
"points": 1.0,
@@ -880,7 +880,7 @@ class TestCaseTestCases(unittest.TestCase):
answer_data = {"user_answer": "demo_answer",
"test_case_data": [
{"test_case": "assert myfunc(12, 13) == 15",
- "weightage": 0.0
+ "weight": 0.0
}
]
}
diff --git a/yaksh/tests/test_code_server.py b/yaksh/tests/test_code_server.py
index 19560e4..1984c6c 100644
--- a/yaksh/tests/test_code_server.py
+++ b/yaksh/tests/test_code_server.py
@@ -40,7 +40,7 @@ class TestCodeServer(unittest.TestCase):
testdata = {'user_answer': 'while True: pass',
'partial_grading': False,
'test_case_data': [{'test_case':'assert 1==2',
- 'weightage': 0.0
+ 'weight': 0.0
}
]}
@@ -59,7 +59,7 @@ class TestCodeServer(unittest.TestCase):
testdata = {'user_answer': 'def f(): return 1',
'partial_grading': False,
'test_case_data': [{'test_case':'assert f() == 1',
- 'weightage': 0.0
+ 'weight': 0.0
}
]}
@@ -78,7 +78,7 @@ class TestCodeServer(unittest.TestCase):
testdata = {'user_answer': 'def f(): return 1',
'partial_grading': False,
'test_case_data': [{'test_case':'assert f() == 2',
- 'weightage': 0.0
+ 'weight': 0.0
}
]}
@@ -101,7 +101,7 @@ class TestCodeServer(unittest.TestCase):
testdata = {'user_answer': 'while True: pass',
'partial_grading': False,
'test_case_data': [{'test_case':'assert 1==2',
- 'weightage': 0.0
+ 'weight': 0.0
}
]}
result = self.code_server.run_code(
diff --git a/yaksh/views.py b/yaksh/views.py
index aca89ef..c3d743b 100644
--- a/yaksh/views.py
+++ b/yaksh/views.py
@@ -517,15 +517,15 @@ def check(request, q_id, attempt_num=None, questionpaper_id=None):
if question.type == 'code' else None
correct, result = paper.validate_answer(user_answer, question, json_data)
if correct:
- new_answer.marks = (question.points * result['weightage'] /
- question.get_maximum_test_case_weightage()) \
+ new_answer.marks = (question.points * result['weight'] /
+ question.get_maximum_test_case_weight()) \
if question.partial_grading and question.type == 'code' else question.points
new_answer.correct = correct
new_answer.error = result.get('error')
else:
new_answer.error = result.get('error')
- new_answer.marks = (question.points * result['weightage'] /
- question.get_maximum_test_case_weightage()) \
+ new_answer.marks = (question.points * result['weight'] /
+ question.get_maximum_test_case_weight()) \
if question.partial_grading and question.type == 'code' else 0
new_answer.save()
paper.update_marks('inprogress')
diff --git a/yaksh/xmlrpc_clients.py b/yaksh/xmlrpc_clients.py
index 437dbcb..bb8260d 100644
--- a/yaksh/xmlrpc_clients.py
+++ b/yaksh/xmlrpc_clients.py
@@ -56,10 +56,10 @@ class CodeServerProxy(object):
Returns
-------
A json string of a dict containing:
- {"success": success, "weightage": weightage, "error": error message}
+ {"success": success, "weight": weight, "error": error message}
success - Boolean, indicating if code was executed successfully, correctly
- weightage - Float, indicating total weightage of all successful test cases
+ weight - Float, indicating total weight of all successful test cases
error - String, error message if success is false
"""
@@ -68,7 +68,7 @@ class CodeServerProxy(object):
result = server.check_code(language, test_case_type, json_data, user_dir)
except ConnectionError:
result = json.dumps({'success': False,
- 'weightage': 0.0,
+ 'weight': 0.0,
'error': 'Unable to connect to any code servers!'})
return result