Skip to content

Commit ae59756

Browse files
danjuvgruebel
andauthored
feat: add logging hook (#577)
* feat: add logging hook Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * refactor: split logginghook into separate module Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * chore: update README.md Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * fix: consistent appending to args Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * chore: fmt Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * revert: refactor of hook package Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * fix: parameterise stage key Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * refactor: parameterise stage value Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * fix: use asdict() Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * fix: change log message to not duplicate stage Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> * fix: propogate error_message to avoid silently dropping it Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> --------- Signed-off-by: Danju Visvanathan <danju.visvanathan@gmail.com> Co-authored-by: Anton Grübel <anton.gruebel@gmail.com>
1 parent ade3870 commit ae59756

File tree

3 files changed

+276
-0
lines changed

3 files changed

+276
-0
lines changed

README.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,17 @@ client.get_boolean_flag("my-flag", False, flag_evaluation_options=options)
190190

191191
The OpenFeature SDK logs to the `openfeature` logger using the `logging` package from the Python Standard Library.
192192

193+
#### Logging Hook
194+
195+
The Python SDK includes a `LoggingHook`, which logs detailed information at key points during flag evaluation, using the `logging` package. This hook can be particularly helpful for troubleshooting and debugging; simply attach it at the global, client or invocation level and ensure your log level is set to "debug".
196+
197+
```python
198+
from openfeature import api
199+
from openfeature.hook.logging_hook import LoggingHook
200+
201+
api.add_hooks([LoggingHook()])
202+
```
203+
193204
### Domains
194205

195206
Clients can be assigned to a domain.

openfeature/hook/logging_hook.py

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
import json
2+
import logging
3+
from dataclasses import asdict
4+
5+
from openfeature.evaluation_context import EvaluationContext
6+
from openfeature.exception import ErrorCode, OpenFeatureError
7+
from openfeature.flag_evaluation import FlagEvaluationDetails, FlagValueType
8+
from openfeature.hook import Hook, HookContext, HookHints
9+
10+
11+
class LoggingHook(Hook):
12+
def __init__(
13+
self,
14+
include_evaluation_context: bool = False,
15+
logger: logging.Logger | None = None,
16+
):
17+
self.logger = logger or logging.getLogger("openfeature")
18+
self.include_evaluation_context = include_evaluation_context
19+
20+
def _build_args(self, hook_context: HookContext, stage: str) -> dict:
21+
args = {
22+
"domain": hook_context.client_metadata.domain
23+
if hook_context.client_metadata
24+
else None,
25+
"provider_name": hook_context.provider_metadata.name
26+
if hook_context.provider_metadata
27+
else None,
28+
"flag_key": hook_context.flag_key,
29+
"default_value": hook_context.default_value,
30+
"stage": stage,
31+
}
32+
if self.include_evaluation_context:
33+
args["evaluation_context"] = json.dumps(
34+
asdict(hook_context.evaluation_context),
35+
default=str,
36+
)
37+
return args
38+
39+
def before(
40+
self, hook_context: HookContext, hints: HookHints
41+
) -> EvaluationContext | None:
42+
args = self._build_args(hook_context, "before")
43+
self.logger.debug("Flag evaluation %s", args)
44+
return None
45+
46+
def after(
47+
self,
48+
hook_context: HookContext,
49+
details: FlagEvaluationDetails[FlagValueType],
50+
hints: HookHints,
51+
) -> None:
52+
args = self._build_args(hook_context, "after")
53+
args["reason"] = details.reason
54+
args["variant"] = details.variant
55+
args["value"] = details.value
56+
self.logger.debug("Flag evaluation %s", args)
57+
58+
def error(
59+
self, hook_context: HookContext, exception: Exception, hints: HookHints
60+
) -> None:
61+
args = self._build_args(hook_context, "error")
62+
if isinstance(exception, OpenFeatureError):
63+
args["error_code"] = exception.error_code
64+
args["error_message"] = exception.error_message
65+
else:
66+
args["error_code"] = ErrorCode.GENERAL
67+
args["error_message"] = str(exception)
68+
self.logger.error("Flag evaluation %s", args)

tests/hook/test_logging_hook.py

Lines changed: 197 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,197 @@
1+
from unittest.mock import MagicMock
2+
3+
import pytest
4+
5+
from openfeature.client import ClientMetadata
6+
from openfeature.evaluation_context import EvaluationContext
7+
from openfeature.exception import ErrorCode, FlagNotFoundError
8+
from openfeature.flag_evaluation import FlagEvaluationDetails, FlagType
9+
from openfeature.hook.logging_hook import HookContext, LoggingHook
10+
from openfeature.provider.metadata import Metadata
11+
12+
13+
@pytest.fixture()
14+
def hook_context():
15+
return HookContext(
16+
flag_key="my-flag",
17+
flag_type=FlagType.BOOLEAN,
18+
default_value=False,
19+
evaluation_context=EvaluationContext("user-1", {"env": "prod"}),
20+
client_metadata=ClientMetadata(domain="my-domain"),
21+
provider_metadata=Metadata(name="my-provider"),
22+
)
23+
24+
25+
def test_before_calls_debug_with_stage(hook_context):
26+
mock_logger = MagicMock()
27+
hook = LoggingHook(logger=mock_logger)
28+
hook.before(hook_context, hints={})
29+
mock_logger.debug.assert_called_with(
30+
"Flag evaluation %s",
31+
{
32+
"stage": "before",
33+
"flag_key": "my-flag",
34+
"default_value": False,
35+
"domain": "my-domain",
36+
"provider_name": "my-provider",
37+
},
38+
)
39+
40+
41+
def test_after_calls_debug_with_stage(hook_context):
42+
mock_logger = MagicMock()
43+
hook = LoggingHook(logger=mock_logger)
44+
details = FlagEvaluationDetails(
45+
flag_key="my-flag",
46+
value=True,
47+
variant="on",
48+
reason="STATIC",
49+
)
50+
hook.after(hook_context, details, hints={})
51+
52+
mock_logger.debug.assert_called_with(
53+
"Flag evaluation %s",
54+
{
55+
"stage": "after",
56+
"flag_key": "my-flag",
57+
"default_value": False,
58+
"domain": "my-domain",
59+
"provider_name": "my-provider",
60+
"reason": "STATIC",
61+
"variant": "on",
62+
"value": True,
63+
},
64+
)
65+
66+
67+
def test_after_calls_debug_with_evaluation_context(hook_context):
68+
mock_logger = MagicMock()
69+
hook = LoggingHook(logger=mock_logger, include_evaluation_context=True)
70+
details = FlagEvaluationDetails(
71+
flag_key="my-flag",
72+
value=True,
73+
variant="on",
74+
reason="STATIC",
75+
)
76+
hook.after(hook_context, details, hints={})
77+
78+
mock_logger.debug.assert_called_with(
79+
"Flag evaluation %s",
80+
{
81+
"stage": "after",
82+
"flag_key": "my-flag",
83+
"default_value": False,
84+
"domain": "my-domain",
85+
"provider_name": "my-provider",
86+
"reason": "STATIC",
87+
"variant": "on",
88+
"value": True,
89+
"evaluation_context": '{"targeting_key": "user-1", "attributes": {"env": "prod"}}',
90+
},
91+
)
92+
93+
94+
def test_error_calls_error_log(hook_context):
95+
mock_logger = MagicMock()
96+
hook = LoggingHook(logger=mock_logger)
97+
exception = Exception("something went wrong")
98+
hook.error(hook_context, exception, hints={})
99+
100+
mock_logger.error.assert_called_with(
101+
"Flag evaluation %s",
102+
{
103+
"stage": "error",
104+
"flag_key": "my-flag",
105+
"default_value": False,
106+
"domain": "my-domain",
107+
"provider_name": "my-provider",
108+
"error_code": ErrorCode.GENERAL,
109+
"error_message": "something went wrong",
110+
},
111+
)
112+
113+
114+
def test_error_extracts_error_code_from_open_feature_error(hook_context):
115+
mock_logger = MagicMock()
116+
hook = LoggingHook(logger=mock_logger)
117+
exception = FlagNotFoundError("flag not found")
118+
hook.error(hook_context, exception, hints={})
119+
120+
mock_logger.error.assert_called_with(
121+
"Flag evaluation %s",
122+
{
123+
"stage": "error",
124+
"flag_key": "my-flag",
125+
"default_value": False,
126+
"domain": "my-domain",
127+
"provider_name": "my-provider",
128+
"error_code": ErrorCode.FLAG_NOT_FOUND,
129+
"error_message": "flag not found",
130+
},
131+
)
132+
133+
134+
def test_build_args_without_metadata():
135+
hook = LoggingHook()
136+
ctx = HookContext(
137+
flag_key="flag",
138+
flag_type=FlagType.STRING,
139+
default_value="default",
140+
evaluation_context=EvaluationContext(None, {}),
141+
client_metadata=None,
142+
provider_metadata=None,
143+
)
144+
result = hook._build_args(ctx, "before")
145+
assert result == {
146+
"flag_key": "flag",
147+
"default_value": "default",
148+
"domain": None,
149+
"provider_name": None,
150+
"stage": "before",
151+
}
152+
153+
154+
def test_build_args_excludes_evaluation_context_by_default(hook_context):
155+
hook = LoggingHook()
156+
result = hook._build_args(hook_context, "before")
157+
assert result == {
158+
"flag_key": "my-flag",
159+
"default_value": False,
160+
"domain": "my-domain",
161+
"provider_name": "my-provider",
162+
"stage": "before",
163+
}
164+
165+
166+
def test_build_args_includes_evaluation_context_when_enabled(hook_context):
167+
hook = LoggingHook(include_evaluation_context=True)
168+
result = hook._build_args(hook_context, "after")
169+
assert result == {
170+
"flag_key": "my-flag",
171+
"default_value": False,
172+
"domain": "my-domain",
173+
"provider_name": "my-provider",
174+
"evaluation_context": '{"targeting_key": "user-1", "attributes": {"env": "prod"}}',
175+
"stage": "after",
176+
}
177+
178+
179+
def test_error_calls_error_log_with_evaluation_context(hook_context):
180+
mock_logger = MagicMock()
181+
hook = LoggingHook(logger=mock_logger, include_evaluation_context=True)
182+
exception = Exception("something went wrong")
183+
hook.error(hook_context, exception, hints={})
184+
185+
mock_logger.error.assert_called_with(
186+
"Flag evaluation %s",
187+
{
188+
"stage": "error",
189+
"flag_key": "my-flag",
190+
"default_value": False,
191+
"domain": "my-domain",
192+
"provider_name": "my-provider",
193+
"evaluation_context": '{"targeting_key": "user-1", "attributes": {"env": "prod"}}',
194+
"error_code": ErrorCode.GENERAL,
195+
"error_message": "something went wrong",
196+
},
197+
)

0 commit comments

Comments
 (0)