Skip to content
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions drift/core/content_type_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"application/vnd.api+json": DecodedType.JSON,
# Plain Text (ALLOWED)
"text/plain": DecodedType.PLAIN_TEXT,
# HTML (BLOCKED)
# HTML
"text/html": DecodedType.HTML,
"application/xhtml+xml": DecodedType.HTML,
# CSS (BLOCKED)
Expand Down Expand Up @@ -113,7 +113,7 @@

# Only JSON and plain text are acceptable (matches Node SDK)
# All other content types will cause trace blocking
ACCEPTABLE_DECODED_TYPES = {DecodedType.JSON, DecodedType.PLAIN_TEXT}
ACCEPTABLE_DECODED_TYPES = {DecodedType.JSON, DecodedType.PLAIN_TEXT, DecodedType.HTML}
Comment thread
sohankshirsagar marked this conversation as resolved.


def get_decoded_type(content_type: str | None) -> DecodedType | None:
Expand Down
19 changes: 19 additions & 0 deletions drift/core/drift_sdk.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,6 +406,16 @@ def _init_auto_instrumentations(self) -> None:
except ImportError:
pass

try:
import urllib.request

from ..instrumentation.urllib import UrllibInstrumentation

_ = UrllibInstrumentation()
logger.debug("urllib instrumentation initialized")
except ImportError:
pass

# Initialize PostgreSQL instrumentation before Django
# Instrument BOTH psycopg2 and psycopg if available
# This allows apps to use either or both
Expand Down Expand Up @@ -481,6 +491,15 @@ def _init_auto_instrumentations(self) -> None:
except Exception as e:
logger.debug(f"Socket instrumentation initialization failed: {e}")

# PyJWT instrumentation for JWT verification bypass
try:
from ..instrumentation.pyjwt import PyJWTInstrumentation

_ = PyJWTInstrumentation(mode=self.mode)
logger.debug("PyJWT instrumentation registered (REPLAY mode)")
except Exception as e:
logger.debug(f"PyJWT instrumentation registration failed: {e}")

def create_env_vars_snapshot(self) -> None:
"""Create a span capturing all environment variables.

Expand Down
4 changes: 3 additions & 1 deletion drift/core/mock_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,9 @@ def find_mock_response_sync(
mock_response = sdk.request_mock_sync(mock_request)

if not mock_response or not mock_response.found:
logger.debug(f"No matching mock found for {trace_id} with input value: {input_value}")
logger.debug(
f"No matching mock found for {trace_id} with input value: {input_value}, input schema: {input_schema_merges}, input schema hash: {outbound_span.input_schema_hash}, input value hash: {outbound_span.input_value_hash}"
)
return None

logger.debug(f"Found mock response for {trace_id}")
Expand Down
4 changes: 2 additions & 2 deletions drift/core/trace_blocking_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def should_block_span(span: CleanSpanData) -> bool:
"""Check if a span should be blocked due to size or server error status.

Blocks the trace if:
1. The span is a SERVER span with ERROR status (e.g., HTTP >= 300)
1. The span is a SERVER span with ERROR status (e.g., HTTP >= 400)
2. The span exceeds the maximum size limit (1MB)

This matches Node SDK behavior in TdSpanExporter.ts.
Expand All @@ -221,7 +221,7 @@ def should_block_span(span: CleanSpanData) -> bool:
span_name = span.name
blocking_manager = TraceBlockingManager.get_instance()

# Check 1: Block SERVER spans with ERROR status (e.g., HTTP >= 300)
# Check 1: Block SERVER spans with ERROR status (e.g., HTTP >= 400)
if span.kind == SpanKind.SERVER and span.status.code == StatusCode.ERROR:
logger.debug(f"Blocking trace {trace_id} - server span '{span_name}' has error status")
blocking_manager.block_trace(trace_id, reason="server_error")
Expand Down
4 changes: 2 additions & 2 deletions drift/instrumentation/django/middleware.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,8 +390,8 @@ def dict_to_schema_merges(merges_dict):
duration_seconds = duration_ns // 1_000_000_000
duration_nanos = duration_ns % 1_000_000_000

# Match Node SDK: >= 300 is considered an error (redirects, client errors, server errors)
if status_code >= 300:
# Match Node SDK: >= 400 is considered an error
if status_code >= 400:
status = SpanStatus(code=StatusCode.ERROR, message=f"HTTP {status_code}")
else:
status = SpanStatus(code=StatusCode.OK, message="")
Expand Down
4 changes: 2 additions & 2 deletions drift/instrumentation/fastapi/instrumentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,8 +530,8 @@ def _finalize_span(
TuskDrift.get_instance()

status_code = response_data.get("status_code", 200)
# Match Node SDK: >= 300 is considered an error (redirects, client errors, server errors)
if status_code >= 300:
# Match Node SDK: >= 400 is considered an error
if status_code >= 400:
span_info.span.set_status(Status(OTelStatusCode.ERROR, f"HTTP {status_code}"))
else:
span_info.span.set_status(Status(OTelStatusCode.OK))
Expand Down
5 changes: 5 additions & 0 deletions drift/instrumentation/pyjwt/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""PyJWT instrumentation for REPLAY mode."""

from .instrumentation import PyJWTInstrumentation

__all__ = ["PyJWTInstrumentation"]
98 changes: 98 additions & 0 deletions drift/instrumentation/pyjwt/instrumentation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
"""PyJWT instrumentation for REPLAY mode.

Patches PyJWT to disable all verification during test replay:
1. _merge_options - returns all verification options as False
2. _verify_signature - no-op (defense in depth)
3. _validate_claims - no-op (defense in depth)

Only active in REPLAY mode.
"""

from __future__ import annotations

import logging
from types import ModuleType

from ...core.types import TuskDriftMode
from ..base import InstrumentationBase

logger = logging.getLogger(__name__)


class PyJWTInstrumentation(InstrumentationBase):
"""Patches PyJWT to disable verification in REPLAY mode."""

def __init__(self, mode: TuskDriftMode = TuskDriftMode.DISABLED, enabled: bool = True) -> None:
self.mode = mode
should_enable = enabled and mode == TuskDriftMode.REPLAY

super().__init__(
name="PyJWTInstrumentation",
module_name="jwt",
supported_versions="*",
enabled=should_enable,
)

def patch(self, module: ModuleType) -> None:
if self.mode != TuskDriftMode.REPLAY:
return

self._patch_merge_options()
self._patch_signature_verification()
self._patch_claim_validation()
logger.debug("[PyJWTInstrumentation] All patches applied")

def _patch_signature_verification(self) -> None:
"""No-op signature verification."""
try:
from jwt import api_jws

def patched_verify_signature(self, *args, **kwargs):
logger.debug("[PyJWTInstrumentation] _verify_signature called - skipping verification")
return None

api_jws.PyJWS._verify_signature = patched_verify_signature
logger.debug("[PyJWTInstrumentation] Patched PyJWS._verify_signature")
except Exception as e:
logger.warning(f"[PyJWTInstrumentation] Failed to patch _verify_signature: {e}")

def _patch_claim_validation(self) -> None:
"""No-op claim validation."""
try:
from jwt import api_jwt

def patched_validate_claims(self, *args, **kwargs):
logger.debug("[PyJWTInstrumentation] _validate_claims called - skipping validation")
return None

api_jwt.PyJWT._validate_claims = patched_validate_claims
logger.debug("[PyJWTInstrumentation] Patched PyJWT._validate_claims")
except Exception as e:
logger.warning(f"[PyJWTInstrumentation] Failed to patch _validate_claims: {e}")

def _patch_merge_options(self) -> None:
"""Patch _merge_options to always return disabled verification options."""
try:
from jwt import api_jwt

disabled_options = {
"verify_signature": False,
"verify_exp": False,
"verify_nbf": False,
"verify_iat": False,
"verify_aud": False,
"verify_iss": False,
"verify_sub": False,
"verify_jti": False,
"require": [],
"strict_aud": False,
}

def patched_merge_options(self, options=None):
logger.debug("[PyJWTInstrumentation] _merge_options called - returning disabled options")
return disabled_options

api_jwt.PyJWT._merge_options = patched_merge_options
logger.debug("[PyJWTInstrumentation] Patched PyJWT._merge_options")
except Exception as e:
logger.warning(f"[PyJWTInstrumentation] Failed to patch _merge_options: {e}")
5 changes: 5 additions & 0 deletions drift/instrumentation/urllib/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
"""urllib.request instrumentation module."""

from .instrumentation import UrllibInstrumentation

__all__ = ["UrllibInstrumentation"]
27 changes: 27 additions & 0 deletions drift/instrumentation/urllib/e2e-tests/.tusk/config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
version: 1

service:
id: "urllib-e2e-test-id"
name: "urllib-e2e-test"
port: 8000
start:
command: "python src/app.py"
readiness_check:
command: "curl -f http://localhost:8000/health"
timeout: 45s
interval: 5s

tusk_api:
url: "http://localhost:8000"

test_execution:
concurrent_limit: 10
batch_size: 10
timeout: 30s

recording:
sampling_rate: 1.0
export_spans: false

replay:
enable_telemetry: false
21 changes: 21 additions & 0 deletions drift/instrumentation/urllib/e2e-tests/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
FROM python-e2e-base:latest

# Copy SDK source for editable install
COPY . /sdk

# Copy test files
COPY drift/instrumentation/urllib/e2e-tests /app

WORKDIR /app

# Install dependencies (requirements.txt uses -e /sdk for SDK)
RUN pip install -q -r requirements.txt

# Make entrypoint executable
RUN chmod +x entrypoint.py

# Create .tusk directories
RUN mkdir -p /app/.tusk/traces /app/.tusk/logs

# Run entrypoint
ENTRYPOINT ["python", "entrypoint.py"]
19 changes: 19 additions & 0 deletions drift/instrumentation/urllib/e2e-tests/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
services:
app:
build:
context: ../../../..
dockerfile: drift/instrumentation/urllib/e2e-tests/Dockerfile
args:
- TUSK_CLI_VERSION=${TUSK_CLI_VERSION:-latest}
environment:
- PORT=8000
- TUSK_ANALYTICS_DISABLED=1
- PYTHONUNBUFFERED=1
working_dir: /app
volumes:
# Mount SDK source for hot reload (no rebuild needed for SDK changes)
- ../../../..:/sdk
# Mount app source for development
- ./src:/app/src
# Mount .tusk folder to persist traces
- ./.tusk:/app/.tusk
34 changes: 34 additions & 0 deletions drift/instrumentation/urllib/e2e-tests/entrypoint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#!/usr/bin/env python3
"""
E2E Test Entrypoint for Urllib Instrumentation

This script orchestrates the full e2e test lifecycle:
1. Setup: Install dependencies
2. Record: Start app in RECORD mode, execute requests
3. Test: Run Tusk CLI tests
4. Teardown: Cleanup and return exit code
"""

import sys
from pathlib import Path

# Add SDK to path for imports
sys.path.insert(0, "/sdk")

from drift.instrumentation.e2e_common.base_runner import E2ETestRunnerBase


class UrllibE2ETestRunner(E2ETestRunnerBase):
"""E2E test runner for Urllib instrumentation."""

def __init__(self):
import os

port = int(os.getenv("PORT", "8000"))
super().__init__(app_port=port)


if __name__ == "__main__":
runner = UrllibE2ETestRunner()
exit_code = runner.run()
sys.exit(exit_code)
2 changes: 2 additions & 0 deletions drift/instrumentation/urllib/e2e-tests/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
-e /sdk
Flask>=3.1.2
64 changes: 64 additions & 0 deletions drift/instrumentation/urllib/e2e-tests/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
#!/bin/bash

# Exit on error
set -e

# Accept optional port parameter (default: 8000)
APP_PORT=${1:-8000}
export APP_PORT

# Generate unique docker compose project name
# Get the instrumentation name (parent directory of e2e-tests)
TEST_NAME="$(basename "$(dirname "$(pwd)")")"
PROJECT_NAME="python-${TEST_NAME}-${APP_PORT}"

# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'

echo -e "${BLUE}========================================${NC}"
echo -e "${BLUE}Running Python E2E Test: ${TEST_NAME}${NC}"
echo -e "${BLUE}Port: ${APP_PORT}${NC}"
echo -e "${BLUE}========================================${NC}"
echo ""

# Cleanup function
cleanup() {
echo ""
echo -e "${YELLOW}Cleaning up containers...${NC}"
docker compose -p "$PROJECT_NAME" down -v 2>/dev/null || true
}

# Register cleanup on exit
trap cleanup EXIT

# Build containers
echo -e "${BLUE}Building containers...${NC}"
docker compose -p "$PROJECT_NAME" build --no-cache

# Run the test container
echo -e "${BLUE}Starting test...${NC}"
echo ""

# Run container and capture exit code (always use port 8000 inside container)
# Disable set -e temporarily to capture exit code
set +e
docker compose -p "$PROJECT_NAME" run --rm app
EXIT_CODE=$?
set -e

echo ""
if [ $EXIT_CODE -eq 0 ]; then
echo -e "${GREEN}========================================${NC}"
echo -e "${GREEN}Test passed!${NC}"
echo -e "${GREEN}========================================${NC}"
else
echo -e "${RED}========================================${NC}"
echo -e "${RED}Test failed with exit code ${EXIT_CODE}${NC}"
echo -e "${RED}========================================${NC}"
fi

exit $EXIT_CODE
Loading
Loading