Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 43 additions & 0 deletions api/app_analytics/serializers.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,15 @@
import typing

from django.conf import settings
from rest_framework import serializers

from app_analytics.tasks import (
track_feature_evaluation_influxdb_v2,
track_feature_evaluation_v2,
)
from environments.models import Environment
from features.models import FeatureState

from .types import PERIOD_TYPE


Expand Down Expand Up @@ -37,3 +45,38 @@ class SDKAnalyticsFlagsSerializerDetail(serializers.Serializer): # type: ignore

class SDKAnalyticsFlagsSerializer(serializers.Serializer): # type: ignore[type-arg]
evaluations = SDKAnalyticsFlagsSerializerDetail(many=True)

def validate(self, attrs: dict[str, typing.Any]) -> dict[str, typing.Any]:
request = self.context["request"]
environment_feature_names = set(
FeatureState.objects.filter(
environment=request.environment,
feature_segment=None,
identity=None,
).values_list("feature__name", flat=True)
)
return {
"evaluations": [
evaluation
for evaluation in attrs["evaluations"]
if evaluation["feature_name"] in environment_feature_names
]
}

def save(self, **kwargs: typing.Any) -> None:
environment: Environment = kwargs["environment"]

if settings.USE_POSTGRES_FOR_ANALYTICS:
track_feature_evaluation_v2.delay(
args=(
environment.id,
self.validated_data["evaluations"],
)
)
elif settings.INFLUXDB_TOKEN:
track_feature_evaluation_influxdb_v2.delay(
args=(
environment.id,
self.validated_data["evaluations"],
)
)
114 changes: 26 additions & 88 deletions api/app_analytics/views.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import logging
import typing

from django.conf import settings
from drf_yasg.utils import swagger_auto_schema # type: ignore[import-untyped]
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.fields import IntegerField
from rest_framework.generics import CreateAPIView, GenericAPIView
from rest_framework.generics import CreateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
Expand All @@ -16,10 +16,6 @@
get_usage_data,
)
from app_analytics.cache import FeatureEvaluationCache
from app_analytics.tasks import (
track_feature_evaluation_influxdb_v2,
track_feature_evaluation_v2,
)
from environments.authentication import EnvironmentKeyAuthentication
from environments.permissions.permissions import EnvironmentKeyPermissions
from features.models import FeatureState
Expand All @@ -44,60 +40,26 @@ class SDKAnalyticsFlagsV2(CreateAPIView): # type: ignore[type-arg]
serializer_class = SDKAnalyticsFlagsSerializer
throttle_classes = []

@swagger_auto_schema( # type: ignore[misc]
request_body=SDKAnalyticsFlagsSerializer(),
responses={204: Response(status=status.HTTP_204_NO_CONTENT)},
)
def create(self, request: Request, *args, **kwargs) -> Response: # type: ignore[no-untyped-def]
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)

self.evaluations = serializer.validated_data["evaluations"]
if not self._is_data_valid():
return Response(
{"detail": "Invalid feature names associated with the project."},
content_type="application/json",
status=status.HTTP_400_BAD_REQUEST,
)
if settings.USE_POSTGRES_FOR_ANALYTICS:
track_feature_evaluation_v2.delay(
args=(
request.environment.id,
self.evaluations,
)
)
elif settings.INFLUXDB_TOKEN:
track_feature_evaluation_influxdb_v2.delay(
args=(
request.environment.id,
self.evaluations,
)
)

serializer.save(environment=self.request.environment)
return Response(status=status.HTTP_204_NO_CONTENT)

def _is_data_valid(self) -> bool:
environment_feature_names = set(
FeatureState.objects.filter(
environment=self.request.environment,
feature_segment=None,
identity=None,
).values_list("feature__name", flat=True)
)

valid = True
for evaluation in self.evaluations:
if evaluation["feature_name"] in environment_feature_names:
continue
valid = False

return valid


class SDKAnalyticsFlags(GenericAPIView): # type: ignore[type-arg]
class SDKAnalyticsFlags(CreateAPIView): # type: ignore[type-arg]
"""
Class to handle flag analytics events
"""

permission_classes = (EnvironmentKeyPermissions,)
authentication_classes = (EnvironmentKeyAuthentication,)
throttle_classes = []
format_kwarg = None

def get_serializer_class(self): # type: ignore[no-untyped-def]
if getattr(self, "swagger_fake_view", False):
Expand All @@ -118,57 +80,33 @@ def get_fields(self): # type: ignore[no-untyped-def]
for feature_name in environment_feature_names
}

def save(self, **kwargs: typing.Any) -> None:
request = self.context["request"]
for feature_name, eval_count in self.validated_data.items():
feature_evaluation_cache.track_feature_evaluation(
request.environment.id, feature_name, eval_count
)

return _AnalyticsSerializer

def post(self, request, *args, **kwargs): # type: ignore[no-untyped-def]
@swagger_auto_schema( # type: ignore[misc]
request_body=SDKAnalyticsFlagsSerializer(),
responses={200: Response(status=status.HTTP_200_OK)},
)
def create(
self, request: Request, *args: typing.Any, **kwargs: typing.Any
) -> Response:
"""
Send flag evaluation events from the SDK back to the API for reporting.


TODO: Eventually replace this with the v2 version of
this endpoint once SDKs have been updated.
"""
is_valid = self._is_data_valid()
if not is_valid:
# for now, return 200 to avoid breaking client integrations
return Response(
{"detail": "Invalid data. Not logged."},
content_type="application/json",
status=status.HTTP_200_OK,
)
for feature_name, eval_count in self.request.data.items():
feature_evaluation_cache.track_feature_evaluation(
request.environment.id, feature_name, eval_count
)
serializer = self.get_serializer(data=request.data)
serializer.is_valid()
serializer.save(environment=self.request.environment)
return Response(status=status.HTTP_200_OK)

def _is_data_valid(self) -> bool:
environment_feature_names = set(
FeatureState.objects.filter(
environment=self.request.environment,
feature_segment=None,
identity=None,
).values_list("feature__name", flat=True)
)

is_valid = True
for feature_name, request_count in self.request.data.items():
if not (
isinstance(feature_name, str)
and feature_name in environment_feature_names
):
logger.warning("Feature %s does not belong to project", feature_name)
is_valid = False

if not (isinstance(request_count, int)):
logger.error(
"Analytics data contains non integer request count. User agent: %s",
self.request.headers.get("User-Agent", "Not found"),
)
is_valid = False

return is_valid


class SelfHostedTelemetryAPIView(CreateAPIView): # type: ignore[type-arg]
"""
Expand Down
43 changes: 31 additions & 12 deletions api/tests/unit/app_analytics/test_unit_app_analytics_views.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
)
from app_analytics.dataclasses import UsageData
from app_analytics.models import FeatureEvaluationRaw
from app_analytics.views import SDKAnalyticsFlags
from environments.identities.models import Identity
from environments.models import Environment
from features.models import Feature
Expand All @@ -27,25 +26,34 @@
)


def test_sdk_analytics_does_not_allow_bad_data(mocker, settings, environment): # type: ignore[no-untyped-def]
def test_sdk_analytics_ignores_bad_data(
mocker: MockerFixture,
environment: Environment,
feature: Feature,
api_client: APIClient,
) -> None:
# Given
settings.INFLUXDB_TOKEN = "some-token"

data = {"bad": "data"}
request = mocker.MagicMock(data=data, environment=environment)

view = SDKAnalyticsFlags(request=request)
api_client.credentials(HTTP_X_ENVIRONMENT_KEY=environment.api_key)

data = {"invalid_feature_name": 20, feature.name: 2}
mocked_feature_eval_cache = mocker.patch(
"app_analytics.views.feature_evaluation_cache"
)

url = reverse("api-v1:analytics-flags")

# When
response = view.post(request) # type: ignore[no-untyped-call]
response = api_client.post(
url, data=json.dumps(data), content_type="application/json"
)

# Then
assert response.status_code == status.HTTP_200_OK
mocked_feature_eval_cache.track_feature_evaluation.assert_not_called()
assert mocked_feature_eval_cache.track_feature_evaluation.call_count == 1

mocked_feature_eval_cache.track_feature_evaluation.assert_called_once_with(
environment.id, feature.name, data[feature.name]
)


def test_get_usage_data(mocker, admin_client, organisation): # type: ignore[no-untyped-def]
Expand Down Expand Up @@ -387,7 +395,12 @@ def test_set_sdk_analytics_flags_without_identifier(
"feature_name": feature.name,
"count": feature_request_count,
"enabled_when_evaluated": True,
}
},
{
"feature_name": "invalid_feature_name",
"count": 10,
"enabled_when_evaluated": True,
},
]
}

Expand Down Expand Up @@ -433,7 +446,13 @@ def test_set_sdk_analytics_flags_with_identifier__influx__calls_expected(
"identity_identifier": identity.identifier,
"count": feature_request_count,
"enabled_when_evaluated": True,
}
},
{
"feature_name": "invalid_feature_name",
"identity_identifier": identity.identifier,
"count": 10,
"enabled_when_evaluated": True,
},
]
}

Expand Down
Loading