Skip to content

Commit f6e60eb

Browse files
committed
updated evaluation to accept new data structure from front end
1 parent 1313883 commit f6e60eb

4 files changed

Lines changed: 131 additions & 89 deletions

File tree

docs/dev.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,18 @@ Select the sort of evlauation you want to using the check box. Make sure to only
1212
"response":"<str>",
1313
"answer":"<str>",
1414
"params": {
15+
"truthTable": "<bool>",
1516
"equivalence": "<bool>",
1617
"tautology": "<bool>",
1718
"satisfiability": "<bool>",
1819
}
1920
}
2021
```
2122

23+
### `truthTable`
24+
25+
uses the evaluation for truth tables
26+
2227
### `equivalence`
2328

2429
checks if response formula and answer formula are equivalent

evaluation_function/evaluation.py

Lines changed: 60 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -5,10 +5,10 @@
55
from evaluation_function.domain.formula import *
66

77
from evaluation_function.parsing.parser import formula_parser
8-
9-
108
from evaluation_function.parsing.tree_builder_error import BuildError
119

10+
from evaluation_function.truth_table.evaluate import evaluate_truth_table
11+
1212

1313
def evaluation_function(
1414
response: Any,
@@ -39,16 +39,70 @@ def evaluation_function(
3939
"""
4040

4141

42-
# if not isinstance(answer, str):
43-
# raise Exception("Answer must be a string/text.")
44-
42+
if not isinstance(answer, str):
43+
raise Exception("Answer must be a string/text.")
4544

46-
if not isinstance(response, str):
45+
46+
response_formula = response.get("formula", None)
47+
if not isinstance(response_formula, str):
4748
return Result(
4849
is_correct=False,
4950
feedback_items=[("incorrect input", "response must be type String")]
5051
)
5152

53+
# parse response_formula into Formula
54+
try:
55+
formula = formula_parser(response_formula)
56+
57+
except BuildError as e:
58+
return Result(
59+
is_correct=False,
60+
feedback_items=[(BuildError, str(e))]
61+
)
62+
except ValueError as e:
63+
return Result(
64+
is_correct=False,
65+
feedback_items=[(ValueError, str(e))]
66+
)
67+
68+
69+
70+
# check if input is a truth table
71+
truth_table = response.get("truthTable", None)
72+
if truth_table is not None and isinstance(truth_table, dict):
73+
74+
variables = truth_table.get("variables", [])
75+
cells = truth_table.get("cells", [])
76+
77+
if not isinstance(variables, list) or not isinstance(cells, list):
78+
return Result(
79+
is_correct=False,
80+
feedback_items=[("incorrect input", "truthTable must contain 'variables' and 'cells' arrays")]
81+
)
82+
83+
# tokenise answer
84+
try:
85+
answer_formula = formula_parser(answer)
86+
87+
except BuildError as e:
88+
return Result(
89+
is_correct=False,
90+
feedback_items=[("BuildError", str(e))]
91+
)
92+
except ValueError as e:
93+
return Result(
94+
is_correct=False,
95+
feedback_items=[("ValueError", str(e))]
96+
)
97+
98+
num_atoms = len(_extract_atoms(answer_formula))
99+
100+
# Evaluate the truth table
101+
truth_table_result = evaluate_truth_table(variables, cells, num_atoms)
102+
if not truth_table_result.is_correct:
103+
return truth_table_result
104+
105+
52106

53107
# check only one of "equivilance", "tautology", "satisfiability" is selected
54108

@@ -71,28 +125,11 @@ def evaluation_function(
71125
feedback_items=[("invalid param", "please only select 1 param")]
72126
)
73127

74-
75128

76129
feedback = None
77130
is_correct = False
78131

79132

80-
# parse response into Formula
81-
try:
82-
formula = formula_parser(response)
83-
84-
except BuildError as e:
85-
return Result(
86-
is_correct=False,
87-
feedback_items=[(BuildError, str(e))]
88-
)
89-
except ValueError as e:
90-
return Result(
91-
is_correct=False,
92-
feedback_items=[(ValueError, str(e))]
93-
)
94-
95-
96133
if equivalence:
97134

98135
# tokenise answer

evaluation_function/truth_table/evaluate.py

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -14,32 +14,32 @@
1414
from evaluation_function.parsing.tree_builder_error import BuildError
1515

1616

17-
# assume table sent through is of type list[list[str]]
18-
def evaluate_truth_table(input: list[list[str]], num_atoms) -> Result:
17+
def evaluate_truth_table(variables: list[str], cells: list[list[str]], num_atoms) -> Result:
1918
"""
2019
Function used to evaluate truth table response
2120
---
2221
23-
- `input` the 2D array containing the formuals and the cells of the truth table
22+
- `variables` array of formula strings (columns of the truth table)
23+
- `cells` the 2D array containing only the truth/false values
2424
- `num_atoms` the number of atoms in the truth table
2525
2626
returns True if truth table is valid
2727
"""
2828

29-
if len(input) == 0:
29+
if len(variables) == 0:
3030
return Result(
3131
is_correct=False,
32-
feedback_items=[(Exception, "no input was given")]
32+
feedback_items=[(Exception, "no variables provided")]
3333
)
34-
35-
elif len(input) == 1:
34+
35+
if len(cells) == 0:
3636
return Result(
3737
is_correct=False,
38-
feedback_items=[(Exception, "Must provide names and its truth values")]
38+
feedback_items=[(Exception, "no cells provided")]
3939
)
4040

4141
# find the atoms of the formula
42-
formulas = input[0]
42+
formulas = variables
4343
existing_atoms = {}
4444

4545
for i in range(len(formulas)):
@@ -87,12 +87,12 @@ def evaluate_truth_table(input: list[list[str]], num_atoms) -> Result:
8787

8888
# check all the cells are valid:
8989

90-
for i in range(1, len(input)):
91-
for j in range(len(input[i])):
92-
if input[i][j] == "tt":
93-
input[i][j] = True
94-
elif input[i][j] == "ff":
95-
input[i][j] = False
90+
for i in range(len(cells)):
91+
for j in range(len(cells[i])):
92+
if cells[i][j] == "tt":
93+
cells[i][j] = True
94+
elif cells[i][j] == "ff":
95+
cells[i][j] = False
9696
else:
9797
return Result(
9898
is_correct=False,
@@ -107,19 +107,19 @@ def evaluate_truth_table(input: list[list[str]], num_atoms) -> Result:
107107
is_correct=False,
108108
feedback_items=[(Exception, f"missing combinations in truth table")]
109109
)
110-
if len(input) - 1 < 2 ** num_atoms:
110+
if len(cells) < 2 ** num_atoms:
111111
return Result(
112112
is_correct=False,
113113
feedback_items=[(Exception, f"missing combinations in truth table")]
114114
)
115-
if len(input) - 1 > 2 ** num_atoms:
115+
if len(cells) > 2 ** num_atoms:
116116
return Result(
117117
is_correct=False,
118118
feedback_items=[(Exception, f"excessive combinations in truth table")]
119119
)
120120

121121

122-
unique_rows = set(tuple(row[cell] for cell in existing_atoms.values()) for row in input[1:])
122+
unique_rows = set(tuple(row[cell] for cell in existing_atoms.values()) for row in cells)
123123
if len(unique_rows) != 2 ** num_atoms:
124124
return Result(
125125
is_correct=False,
@@ -129,17 +129,17 @@ def evaluate_truth_table(input: list[list[str]], num_atoms) -> Result:
129129

130130
# evaluate truth table row by row
131131

132-
for i in range(1, len(input)):
132+
for i in range(len(cells)):
133133
atoms_mapping = {}
134-
for j in range(len(input[i])):
134+
for j in range(len(cells[i])):
135135
formula = formulas[j]
136136

137137
if isinstance(formula, Atom):
138-
atoms_mapping[formula] = input[i][j]
138+
atoms_mapping[formula] = cells[i][j]
139139
continue
140140

141141
assignment = Assignment(atoms_mapping)
142-
if FormulaEvaluator(formula, assignment).evaluate() != input[i][j]:
142+
if FormulaEvaluator(formula, assignment).evaluate() != cells[i][j]:
143143
return Result(
144144
is_correct=False,
145145
feedback_items=[(Exception, "incorrect cell value")]

0 commit comments

Comments
 (0)