Skip to content

Commit 73a6f65

Browse files
committed
removed payload check
1 parent e29ef7c commit 73a6f65

2 files changed

Lines changed: 11 additions & 53 deletions

File tree

evaluation_function/domain/evaluators.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,6 @@ def evaluate_with_counterexample(self) -> tuple[bool, dict | None]:
102102
n = len(atoms1)
103103
first_counterexample = None
104104
for perm in permutations(range(n)):
105-
# perm[j] = index in atoms2 that atoms1[j] is renamed to; so atoms1[j] gets value of atoms2[perm[j]]
106105
for assignment_values in product([False, True], repeat=n):
107106
assignment2_dict = {atoms2[i]: assignment_values[i] for i in range(n)}
108107
assignment1_dict = {atoms1[j]: assignment_values[perm[j]] for j in range(n)}
@@ -112,7 +111,6 @@ def evaluate_with_counterexample(self) -> tuple[bool, dict | None]:
112111
v2 = FormulaEvaluator(self._formula2, a2).evaluate()
113112
if v1 != v2:
114113
if first_counterexample is None:
115-
# Use response formula's atom names (atoms1) so feedback speaks in the user's variables
116114
first_counterexample = {
117115
"assignment": {atoms1[j].name: assignment_values[perm[j]] for j in range(n)},
118116
"response_value": v1,
@@ -121,7 +119,6 @@ def evaluate_with_counterexample(self) -> tuple[bool, dict | None]:
121119
break
122120
else:
123121
return True, None
124-
125122
return False, first_counterexample
126123

127124

evaluation_function/evaluation.py

Lines changed: 11 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,6 @@
1010

1111
from evaluation_function.truth_table.evaluate import evaluate_truth_table
1212

13-
_JSON_STRING_NOTE = ("note", "Response was received as a JSON string and was parsed.")
14-
15-
16-
def _feedback_with_json_note(feedback_items: list, response_was_json_string: bool) -> list:
17-
if not response_was_json_string:
18-
return feedback_items
19-
return list(feedback_items) + [_JSON_STRING_NOTE]
20-
2113

2214
def evaluation_function(
2315
response: Any,
@@ -47,37 +39,27 @@ def evaluation_function(
4739
to output the evaluation response.
4840
"""
4941

50-
response_was_json_string = isinstance(response, str)
5142
try:
52-
if response_was_json_string:
43+
if isinstance(response, str):
5344
response = json.loads(response)
5445

5546
if not isinstance(answer, dict):
5647
return Result(
5748
is_correct=False,
58-
feedback_items=_feedback_with_json_note(
59-
[("incorrect input", "missing answer object")],
60-
response_was_json_string,
61-
)
49+
feedback_items=[("incorrect input", "missing answer object")]
6250
)
6351

6452
if not isinstance(response, dict):
6553
return Result(
6654
is_correct=False,
67-
feedback_items=_feedback_with_json_note(
68-
[("incorrect input", "missing response object")],
69-
response_was_json_string,
70-
)
55+
feedback_items=[("incorrect input", "missing response object")]
7156
)
7257

7358
response_formula = response.get("formula", None)
7459
if not isinstance(response_formula, str):
7560
return Result(
7661
is_correct=False,
77-
feedback_items=_feedback_with_json_note(
78-
[("incorrect input", "response must be type String")],
79-
response_was_json_string,
80-
)
62+
feedback_items=[("incorrect input", "response must be type String")]
8163
)
8264

8365
formula = formula_parser(response_formula)
@@ -99,18 +81,12 @@ def evaluation_function(
9981
if num_selected == 0:
10082
return Result(
10183
is_correct=False,
102-
feedback_items=_feedback_with_json_note(
103-
[("invalid param", "please select a param")],
104-
response_was_json_string,
105-
)
84+
feedback_items=[("invalid param", "please select a param")]
10685
)
10786
if num_selected > 1:
10887
return Result(
10988
is_correct=False,
110-
feedback_items=_feedback_with_json_note(
111-
[("invalid param", "please only select 1 param")],
112-
response_was_json_string,
113-
)
89+
feedback_items=[("invalid param", "please only select 1 param")]
11490
)
11591

11692
# Truth table mode: validate response truth table if present
@@ -119,33 +95,21 @@ def evaluation_function(
11995
if response_truth_table is None or not isinstance(response_truth_table, dict):
12096
return Result(
12197
is_correct=False,
122-
feedback_items=_feedback_with_json_note(
123-
[("incorrect input", "truthTable required when answer expects truth table")],
124-
response_was_json_string,
125-
)
98+
feedback_items=[("incorrect input", "truthTable required when answer expects truth table")]
12699
)
127100
variables = response_truth_table.get("variables", [])
128101
cells = response_truth_table.get("cells", [])
129102

130103
if not isinstance(variables, list) or not isinstance(cells, list):
131104
return Result(
132105
is_correct=False,
133-
feedback_items=_feedback_with_json_note(
134-
[("incorrect input", "truthTable must contain 'variables' and 'cells' arrays")],
135-
response_was_json_string,
136-
)
106+
feedback_items=[("incorrect input", "truthTable must contain 'variables' and 'cells' arrays")]
137107
)
138108

139109
num_atoms = len(_extract_atoms(formula))
140110
truth_table_result = evaluate_truth_table(variables, cells, num_atoms)
141111
if not truth_table_result.is_correct:
142-
return Result(
143-
is_correct=False,
144-
feedback_items=_feedback_with_json_note(
145-
getattr(truth_table_result, "feedback_items", []) or [],
146-
response_was_json_string,
147-
)
148-
)
112+
return truth_table_result
149113

150114
is_correct = False
151115
feedback = []
@@ -194,14 +158,11 @@ def evaluation_function(
194158
is_correct = True # already validated above
195159

196160
if feedback:
197-
return Result(
198-
is_correct=False,
199-
feedback_items=_feedback_with_json_note(feedback, response_was_json_string),
200-
)
161+
return Result(is_correct=False, feedback_items=feedback)
201162
return Result(is_correct=is_correct)
202163

203164
except Exception as e:
204165
return Result(
205166
is_correct=False,
206-
feedback_items=_feedback_with_json_note([("Error", str(e))], response_was_json_string),
167+
feedback_items=[("Error", str(e))]
207168
)

0 commit comments

Comments
 (0)