Skip to content

Commit e650ecf

Browse files
committed
feat(testing): Use EvaluationContext/EvaluationResult test data
1 parent b4a47d7 commit e650ecf

3 files changed

Lines changed: 13 additions & 137 deletions

File tree

.gitmodules

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
[submodule "tests/engine_tests/engine-test-data"]
22
path = tests/engine_tests/engine-test-data
33
url = https://github.com/flagsmith/engine-test-data.git
4-
branch = feat/context-values
4+
branch = feat/context-values-intensifies

tests/engine_tests/test_engine.py

Lines changed: 11 additions & 135 deletions
Original file line numberDiff line numberDiff line change
@@ -1,141 +1,27 @@
11
import json
22
import typing
3-
from dataclasses import dataclass
4-
from operator import itemgetter
53
from pathlib import Path
64

75
import pytest
8-
from pytest_mock import MockerFixture
96

10-
from flag_engine.context.types import EvaluationContext, FeatureContext, SegmentRule
7+
from flag_engine.context.types import EvaluationContext
118
from flag_engine.engine import get_evaluation_result
9+
from flag_engine.result.types import EvaluationResult
1210

1311
MODULE_PATH = Path(__file__).parent.resolve()
1412

1513
EnvironmentDocument = dict[str, typing.Any]
16-
APIResponse = dict[str, typing.Any]
17-
18-
19-
@dataclass
20-
class EngineTestCase:
21-
context: EvaluationContext
22-
response: APIResponse
2314

2415

2516
def _extract_test_cases(
2617
file_path: Path,
27-
) -> typing.Iterable[tuple[EvaluationContext, APIResponse]]:
28-
"""
29-
Extract the test cases from the json data file which should be in the following
30-
format.
31-
32-
{
33-
"environment": {...}, // the environment document as found in DynamoDB
34-
"identities_and_responses": [
35-
{
36-
"identity": {...}, // the identity as found in DynamoDB,
37-
"response": {...}, // the response that was obtained from the current API
38-
}
39-
]
40-
}
41-
42-
:param file_path: the path to the json data file
43-
:return: a list of tuples containing the environment, identity and api response
44-
"""
18+
) -> typing.Iterable[tuple[EvaluationContext, EvaluationResult]]:
4519
test_data = json.loads(file_path.read_text())
4620

47-
environment_document = test_data["environment"]
48-
49-
def _extract_segment_rules(rules: list[dict[str, typing.Any]]) -> list[SegmentRule]:
50-
return [
51-
{
52-
"type": rule["type"],
53-
"conditions": [
54-
{
55-
"property": condition.get("property_"),
56-
"operator": condition["operator"],
57-
"value": condition["value"],
58-
}
59-
for condition in rule.get("conditions", [])
60-
],
61-
"rules": _extract_segment_rules(rule.get("rules", [])),
62-
}
63-
for rule in rules
64-
]
65-
66-
def _extract_feature_contexts(
67-
feature_states: list[dict[str, typing.Any]],
68-
) -> typing.Iterable[FeatureContext]:
69-
for feature_state in feature_states:
70-
feature_context = FeatureContext(
71-
key=str(feature_state["django_id"]),
72-
feature_key=str(feature_state["feature"]["id"]),
73-
name=feature_state["feature"]["name"],
74-
enabled=feature_state["enabled"],
75-
value=feature_state["feature_state_value"],
76-
)
77-
if multivariate_feature_state_values := feature_state.get(
78-
"multivariate_feature_state_values"
79-
):
80-
feature_context["variants"] = [
81-
{
82-
"value": multivariate_feature_state_value[
83-
"multivariate_feature_option"
84-
]["value"],
85-
"weight": multivariate_feature_state_value[
86-
"percentage_allocation"
87-
],
88-
}
89-
for multivariate_feature_state_value in sorted(
90-
multivariate_feature_state_values,
91-
key=itemgetter("id"),
92-
)
93-
]
94-
if (
95-
priority := (feature_state.get("feature_segment") or {}).get("priority")
96-
is not None
97-
):
98-
feature_context["priority"] = priority
99-
100-
yield feature_context
101-
102-
for case in test_data["identities_and_responses"]:
103-
identity_data = case["identity"]
104-
response = case["response"]
105-
106-
context: EvaluationContext = {
107-
"environment": {
108-
"key": environment_document["api_key"],
109-
"name": "Test Environment",
110-
},
111-
"features": {
112-
feature["name"]: feature
113-
for feature in _extract_feature_contexts(
114-
environment_document["feature_states"]
115-
)
116-
},
117-
"segments": {
118-
str(segment["id"]): {
119-
"key": str(segment["id"]),
120-
"name": segment["name"],
121-
"rules": _extract_segment_rules(segment["rules"]),
122-
"overrides": [
123-
*_extract_feature_contexts(segment.get("feature_states", []))
124-
],
125-
}
126-
for segment in environment_document["project"]["segments"]
127-
},
128-
"identity": {
129-
"identifier": identity_data["identifier"],
130-
"key": identity_data.get("django_id") or identity_data["composite_key"],
131-
"traits": {
132-
trait["trait_key"]: trait["trait_value"]
133-
for trait in identity_data["identity_traits"]
134-
},
135-
},
136-
}
137-
138-
yield context, response
21+
for case in test_data["test_cases"]:
22+
context: EvaluationContext = case["context"]
23+
result: EvaluationResult = case["result"]
24+
yield context, result
13925

14026

14127
TEST_CASES = list(
@@ -146,28 +32,18 @@ def _extract_feature_contexts(
14632

14733

14834
@pytest.mark.parametrize(
149-
"context, response",
35+
"context, expected_result",
15036
TEST_CASES,
15137
)
15238
def test_engine(
15339
context: EvaluationContext,
154-
response: APIResponse,
155-
mocker: MockerFixture,
40+
expected_result: EvaluationResult,
15641
) -> None:
15742
# When
158-
engine_response = get_evaluation_result(context)
43+
result = get_evaluation_result(context)
15944

16045
# Then
161-
assert {flag["feature_key"]: flag for flag in engine_response["flags"]} == {
162-
(feature_key := str(flag["feature"]["id"])): {
163-
"name": flag["feature"]["name"],
164-
"feature_key": feature_key,
165-
"enabled": flag["enabled"],
166-
"value": flag["feature_state_value"],
167-
"reason": mocker.ANY,
168-
}
169-
for flag in response["flags"]
170-
}
46+
assert result == expected_result
17147

17248

17349
@pytest.mark.benchmark

0 commit comments

Comments
 (0)