-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathparams.py
More file actions
54 lines (43 loc) · 1.67 KB
/
params.py
File metadata and controls
54 lines (43 loc) · 1.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
"""
Evaluation Parameters Schema
Configuration options for how the FSA evaluation is performed.
"""
from typing import Literal
from pydantic import BaseModel, Field
class Params(BaseModel):
"""
Evaluation parameters.
Example:
{
"evaluation_mode": "lenient",
"expected_type": "DFA",
"feedback_verbosity": "standard"
}
"""
# Evaluation mode
evaluation_mode: Literal["strict", "lenient", "partial"] = Field(
default="lenient",
description="strict: exact match, lenient: language equivalence, partial: partial credit"
)
# Expected automaton type
expected_type: Literal["DFA", "NFA", "any"] = Field(
default="any",
description="Expected automaton type"
)
# Feedback level
feedback_verbosity: Literal["minimal", "standard", "detailed"] = Field(
default="standard",
description="Level of feedback detail"
)
# Validation options
check_minimality: bool = Field(default=False, description="Check if FSA is minimal")
check_completeness: bool = Field(default=False, description="Check if DFA is complete")
# UI options
highlight_errors: bool = Field(default=True, description="Include element IDs for UI highlighting")
show_counterexample: bool = Field(default=True, description="Show counterexample if languages differ")
# Test generation
max_test_length: int = Field(default=10, ge=1, le=50, description="Max length for generated test strings")
is_dev: bool = Field(
default=False,
description="Flag indicating if running in development mode"
)