11import json
22import typing
3- from dataclasses import dataclass
4- from operator import itemgetter
53from pathlib import Path
64
75import pytest
8- from pytest_mock import MockerFixture
96
10- from flag_engine .context .types import EvaluationContext , FeatureContext , SegmentRule
7+ from flag_engine .context .types import EvaluationContext
118from flag_engine .engine import get_evaluation_result
9+ from flag_engine .result .types import EvaluationResult
1210
1311MODULE_PATH = Path (__file__ ).parent .resolve ()
1412
1513EnvironmentDocument = dict [str , typing .Any ]
16- APIResponse = dict [str , typing .Any ]
17-
18-
19- @dataclass
20- class EngineTestCase :
21- context : EvaluationContext
22- response : APIResponse
2314
2415
2516def _extract_test_cases (
2617 file_path : Path ,
27- ) -> typing .Iterable [tuple [EvaluationContext , APIResponse ]]:
28- """
29- Extract the test cases from the json data file which should be in the following
30- format.
31-
32- {
33- "environment": {...}, // the environment document as found in DynamoDB
34- "identities_and_responses": [
35- {
36- "identity": {...}, // the identity as found in DynamoDB,
37- "response": {...}, // the response that was obtained from the current API
38- }
39- ]
40- }
41-
42- :param file_path: the path to the json data file
43- :return: a list of tuples containing the environment, identity and api response
44- """
18+ ) -> typing .Iterable [tuple [EvaluationContext , EvaluationResult ]]:
4519 test_data = json .loads (file_path .read_text ())
4620
47- environment_document = test_data ["environment" ]
48-
49- def _extract_segment_rules (rules : list [dict [str , typing .Any ]]) -> list [SegmentRule ]:
50- return [
51- {
52- "type" : rule ["type" ],
53- "conditions" : [
54- {
55- "property" : condition .get ("property_" ),
56- "operator" : condition ["operator" ],
57- "value" : condition ["value" ],
58- }
59- for condition in rule .get ("conditions" , [])
60- ],
61- "rules" : _extract_segment_rules (rule .get ("rules" , [])),
62- }
63- for rule in rules
64- ]
65-
66- def _extract_feature_contexts (
67- feature_states : list [dict [str , typing .Any ]],
68- ) -> typing .Iterable [FeatureContext ]:
69- for feature_state in feature_states :
70- feature_context = FeatureContext (
71- key = str (feature_state ["django_id" ]),
72- feature_key = str (feature_state ["feature" ]["id" ]),
73- name = feature_state ["feature" ]["name" ],
74- enabled = feature_state ["enabled" ],
75- value = feature_state ["feature_state_value" ],
76- )
77- if multivariate_feature_state_values := feature_state .get (
78- "multivariate_feature_state_values"
79- ):
80- feature_context ["variants" ] = [
81- {
82- "value" : multivariate_feature_state_value [
83- "multivariate_feature_option"
84- ]["value" ],
85- "weight" : multivariate_feature_state_value [
86- "percentage_allocation"
87- ],
88- }
89- for multivariate_feature_state_value in sorted (
90- multivariate_feature_state_values ,
91- key = itemgetter ("id" ),
92- )
93- ]
94- if (
95- priority := (feature_state .get ("feature_segment" ) or {}).get ("priority" )
96- is not None
97- ):
98- feature_context ["priority" ] = priority
99-
100- yield feature_context
101-
102- for case in test_data ["identities_and_responses" ]:
103- identity_data = case ["identity" ]
104- response = case ["response" ]
105-
106- context : EvaluationContext = {
107- "environment" : {
108- "key" : environment_document ["api_key" ],
109- "name" : "Test Environment" ,
110- },
111- "features" : {
112- feature ["name" ]: feature
113- for feature in _extract_feature_contexts (
114- environment_document ["feature_states" ]
115- )
116- },
117- "segments" : {
118- str (segment ["id" ]): {
119- "key" : str (segment ["id" ]),
120- "name" : segment ["name" ],
121- "rules" : _extract_segment_rules (segment ["rules" ]),
122- "overrides" : [
123- * _extract_feature_contexts (segment .get ("feature_states" , []))
124- ],
125- }
126- for segment in environment_document ["project" ]["segments" ]
127- },
128- "identity" : {
129- "identifier" : identity_data ["identifier" ],
130- "key" : identity_data .get ("django_id" ) or identity_data ["composite_key" ],
131- "traits" : {
132- trait ["trait_key" ]: trait ["trait_value" ]
133- for trait in identity_data ["identity_traits" ]
134- },
135- },
136- }
137-
138- yield context , response
21+ for case in test_data ["test_cases" ]:
22+ context : EvaluationContext = case ["context" ]
23+ result : EvaluationResult = case ["result" ]
24+ yield context , result
13925
14026
14127TEST_CASES = list (
@@ -146,28 +32,18 @@ def _extract_feature_contexts(
14632
14733
14834@pytest .mark .parametrize (
149- "context, response " ,
35+ "context, expected_result " ,
15036 TEST_CASES ,
15137)
15238def test_engine (
15339 context : EvaluationContext ,
154- response : APIResponse ,
155- mocker : MockerFixture ,
40+ expected_result : EvaluationResult ,
15641) -> None :
15742 # When
158- engine_response = get_evaluation_result (context )
43+ result = get_evaluation_result (context )
15944
16045 # Then
161- assert {flag ["feature_key" ]: flag for flag in engine_response ["flags" ]} == {
162- (feature_key := str (flag ["feature" ]["id" ])): {
163- "name" : flag ["feature" ]["name" ],
164- "feature_key" : feature_key ,
165- "enabled" : flag ["enabled" ],
166- "value" : flag ["feature_state_value" ],
167- "reason" : mocker .ANY ,
168- }
169- for flag in response ["flags" ]
170- }
46+ assert result == expected_result
17147
17248
17349@pytest .mark .benchmark
0 commit comments