Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
c8b7bad
fix cursor-stream
sohankshirsagar Jan 13, 2026
1bdc5ca
refactor psycopg instrumentations to use mock utils
sohankshirsagar Jan 13, 2026
132b556
instrument ServerCursor
sohankshirsagar Jan 13, 2026
6e568a1
add psycopg cursor.copy() instrumentation for COPY operations
sohankshirsagar Jan 13, 2026
8feb8a7
fix multiple queries on same cursor
sohankshirsagar Jan 13, 2026
8723755
fix psycopg pipeline mode: defer result capture until sync()
sohankshirsagar Jan 14, 2026
da7329d
fix cursor iteration
sohankshirsagar Jan 14, 2026
8421029
fix executemany with returning=True instrumentation for psycopg
sohankshirsagar Jan 14, 2026
9c4a03e
fix cursor.scroll() support in psycopg replay mode
sohankshirsagar Jan 14, 2026
3f801cf
fix cursor.rownumber property returning null during REPLAY mode
sohankshirsagar Jan 14, 2026
b7c8502
fix psycopg cursor.statusmessage capture and replay
sohankshirsagar Jan 14, 2026
15a32a8
fix nextset() iteration for executemany with returning=True
sohankshirsagar Jan 14, 2026
02a5b45
fix cursor.scroll() position tracking in RECORD mode
sohankshirsagar Jan 14, 2026
acbb31b
refactor
sohankshirsagar Jan 14, 2026
35b3765
resolve cursor reuse hang in RECORD mode
sohankshirsagar Jan 14, 2026
d22ed0a
Fix UUID parameter serialization mismatch in psycopg REPLAY mode
sohankshirsagar Jan 14, 2026
765fd9b
fix bytea serialization to preserve binary data during record/replay
sohankshirsagar Jan 14, 2026
c95337f
Fix kwargs_row row factory handling in psycopg instrumentation
sohankshirsagar Jan 14, 2026
9a407ab
check number of tests replayed in e2e tests
sohankshirsagar Jan 14, 2026
be8de29
Fix scalar_row factory handling in psycopg instrumentation
sohankshirsagar Jan 15, 2026
0a2a1c9
handle replaying uuid properly
sohankshirsagar Jan 15, 2026
f6d4d32
Fix binary format hang by deferring result capture until fetch
sohankshirsagar Jan 15, 2026
8ad42ac
Enable null-values test for psycopg instrumentation
sohankshirsagar Jan 15, 2026
0526956
Clean up null-values test comments
sohankshirsagar Jan 15, 2026
2449d40
Enable transaction context manager test for psycopg instrumentation
sohankshirsagar Jan 15, 2026
a9ab04f
Clean up transaction context manager test comments
sohankshirsagar Jan 15, 2026
7dbf9eb
fix logging OOM issue during e2e tests
sohankshirsagar Jan 15, 2026
5fc28b2
Fix cursor.set_result() not mocked in REPLAY mode for executemany wit…
sohankshirsagar Jan 15, 2026
6903b87
Refactor psycopg instrumentation to reduce code duplication
sohankshirsagar Jan 15, 2026
f04b88b
Fix Decimal and timedelta serialization for consistent RECORD/REPLAY …
sohankshirsagar Jan 15, 2026
0170747
Refactor psycopg instrumentation: extract common helper methods
sohankshirsagar Jan 15, 2026
66453ad
Fix inet/cidr network type serialization for REPLAY mode
sohankshirsagar Jan 15, 2026
3418673
Fix psycopg Range type serialization for REPLAY mode
sohankshirsagar Jan 15, 2026
08f5ce7
fix format + lint issues
sohankshirsagar Jan 15, 2026
e5101ed
fix type errors
sohankshirsagar Jan 15, 2026
70e04b7
format
sohankshirsagar Jan 16, 2026
26f92f7
try catch record mode
sohankshirsagar Jan 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 2 additions & 22 deletions drift/instrumentation/django/e2e-tests/src/test_requests.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,6 @@
"""Execute test requests against the Django app."""

import os
import time

import requests

PORT = os.getenv("PORT", "8000")
BASE_URL = f"http://localhost:{PORT}"


def make_request(method: str, endpoint: str, **kwargs):
"""Make HTTP request and log result."""
url = f"{BASE_URL}{endpoint}"
print(f"→ {method} {endpoint}")

# Set default timeout if not provided
kwargs.setdefault("timeout", 30)
response = requests.request(method, url, **kwargs)
print(f" Status: {response.status_code}")
time.sleep(0.5) # Small delay between requests
return response

from drift.instrumentation.e2e_common.test_utils import make_request, print_request_summary

if __name__ == "__main__":
print("Starting Django test request sequence...\n")
Expand All @@ -42,4 +22,4 @@ def make_request(method: str, endpoint: str, **kwargs):
)
make_request("DELETE", "/api/post/1/delete")

print("\nAll requests completed successfully")
print_request_summary()
9 changes: 8 additions & 1 deletion drift/instrumentation/e2e_common/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
"""Common utilities for Python SDK e2e tests."""

from .base_runner import Colors, E2ETestRunnerBase
from .test_utils import get_request_count, make_request, print_request_summary

__all__ = ["Colors", "E2ETestRunnerBase"]
__all__ = [
"Colors",
"E2ETestRunnerBase",
"get_request_count",
"make_request",
"print_request_summary",
]
40 changes: 37 additions & 3 deletions drift/instrumentation/e2e_common/base_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ def __init__(self, app_port: int = 8000):
self.app_port = app_port
self.app_process: subprocess.Popen | None = None
self.exit_code = 0
self.expected_request_count: int | None = None

# Register signal handlers for cleanup
signal.signal(signal.SIGTERM, self._signal_handler)
Expand Down Expand Up @@ -76,6 +77,17 @@ def run_command(self, cmd: list[str], env: dict | None = None, check: bool = Tru

return result

def _parse_request_count(self, output: str):
"""Parse the request count from test_requests.py output."""
for line in output.split("\n"):
if line.startswith("TOTAL_REQUESTS_SENT:"):
try:
count = int(line.split(":")[1])
self.expected_request_count = count
self.log(f"Captured request count: {count}", Colors.GREEN)
except (ValueError, IndexError):
self.log(f"Failed to parse request count from: {line}", Colors.YELLOW)

def wait_for_service(self, check_cmd: list[str], timeout: int = 30, interval: int = 1) -> bool:
"""Wait for a service to become ready."""
elapsed = 0
Expand Down Expand Up @@ -138,8 +150,8 @@ def record_traces(self) -> bool:
self.app_process = subprocess.Popen(
["python", "src/app.py"],
env={**os.environ, **env},
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
Comment thread
sohankshirsagar marked this conversation as resolved.
text=True,
)

Expand All @@ -164,7 +176,13 @@ def record_traces(self) -> bool:
# Execute test requests
self.log("Executing test requests...", Colors.GREEN)
try:
self.run_command(["python", "src/test_requests.py"])
# Pass PYTHONPATH so test_requests.py can import from e2e_common
result = self.run_command(
["python", "src/test_requests.py"],
env={"PYTHONPATH": "/sdk"},
)
# Parse request count from output
self._parse_request_count(result.stdout)
except subprocess.CalledProcessError:
self.log("Test requests failed", Colors.RED)
self.exit_code = 1
Expand Down Expand Up @@ -257,13 +275,15 @@ def parse_test_results(self, output: str):
idx += 1

all_passed = True
passed_count = 0
for result in results:
test_id = result.get("test_id", "unknown")
passed = result.get("passed", False)
duration = result.get("duration", 0)

if passed:
self.log(f"✓ Test ID: {test_id} (Duration: {duration}ms)", Colors.GREEN)
passed_count += 1
else:
self.log(f"✗ Test ID: {test_id} (Duration: {duration}ms)", Colors.RED)
all_passed = False
Expand All @@ -276,6 +296,20 @@ def parse_test_results(self, output: str):
self.log("Some tests failed!", Colors.RED)
self.exit_code = 1

# Validate request count matches passed tests
if self.expected_request_count is not None:
if passed_count < self.expected_request_count:
self.log(
f"✗ Request count mismatch: {passed_count} passed tests != {self.expected_request_count} requests sent",
Colors.RED,
)
self.exit_code = 1
else:
self.log(
f"✓ Request count validation: {passed_count} passed tests >= {self.expected_request_count} requests sent",
Colors.GREEN,
)

except Exception as e:
self.log(f"Failed to parse test results: {e}", Colors.RED)
self.log(f"Raw output:\n{output}", Colors.YELLOW)
Expand Down
37 changes: 37 additions & 0 deletions drift/instrumentation/e2e_common/test_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
"""
Shared test utilities for e2e tests.

This module provides common functions used across all instrumentation e2e tests,
including request counting for validation.
"""

import time

import requests

BASE_URL = "http://localhost:8000"
_request_count = 0


def make_request(method, endpoint, **kwargs):
"""Make HTTP request, log result, and track count."""
global _request_count
_request_count += 1

url = f"{BASE_URL}{endpoint}"
print(f"→ {method} {endpoint}")
kwargs.setdefault("timeout", 30)
response = requests.request(method, url, **kwargs)
print(f" Status: {response.status_code}")
time.sleep(0.5)
return response


def get_request_count():
"""Return the current request count."""
return _request_count


def print_request_summary():
"""Print the total request count in a parseable format."""
print(f"\nTOTAL_REQUESTS_SENT:{_request_count}")
22 changes: 3 additions & 19 deletions drift/instrumentation/fastapi/e2e-tests/src/test_requests.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,9 @@
"""Execute test requests against the FastAPI app."""

import json
import os
import time
from pathlib import Path

import requests

PORT = os.getenv("PORT", "8000")
BASE_URL = f"http://localhost:{PORT}"


def make_request(method: str, endpoint: str, **kwargs):
"""Make HTTP request and log result."""
url = f"{BASE_URL}{endpoint}"
print(f"→ {method} {endpoint}")

# Set default timeout if not provided
kwargs.setdefault("timeout", 30)
response = requests.request(method, url, **kwargs)
print(f" Status: {response.status_code}")
time.sleep(0.5) # Small delay between requests
return response
from drift.instrumentation.e2e_common.test_utils import make_request, print_request_summary


def verify_stack_traces():
Expand Down Expand Up @@ -180,3 +162,5 @@ def verify_context_propagation():
print("\n" + "=" * 50)
print("All requests completed successfully")
print("=" * 50)

print_request_summary()
22 changes: 2 additions & 20 deletions drift/instrumentation/flask/e2e-tests/src/test_requests.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,6 @@
"""Execute test requests against the Flask app."""

import time

import requests

BASE_URL = "http://localhost:8000"


def make_request(method, endpoint, **kwargs):
"""Make HTTP request and log result."""
url = f"{BASE_URL}{endpoint}"
print(f"→ {method} {endpoint}")

# Set default timeout if not provided
kwargs.setdefault("timeout", 30)
response = requests.request(method, url, **kwargs)
print(f" Status: {response.status_code}")
time.sleep(0.5) # Small delay between requests
return response

from drift.instrumentation.e2e_common.test_utils import make_request, print_request_summary

if __name__ == "__main__":
print("Starting test request sequence...\n")
Expand All @@ -32,4 +14,4 @@ def make_request(method, endpoint, **kwargs):
make_request("POST", "/api/post", json={"title": "Test Post", "body": "This is a test post", "userId": 1})
make_request("DELETE", "/api/post/1")

print("\nAll requests completed successfully")
print_request_summary()
22 changes: 2 additions & 20 deletions drift/instrumentation/httpx/e2e-tests/src/test_requests.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,6 @@
"""Execute test requests against the Flask app to exercise the HTTPX instrumentation."""

import time

import requests

BASE_URL = "http://localhost:8000"


def make_request(method, endpoint, **kwargs):
"""Make HTTP request and log result."""
url = f"{BASE_URL}{endpoint}"
print(f"-> {method} {endpoint}")

# Set default timeout if not provided
kwargs.setdefault("timeout", 30)
response = requests.request(method, url, **kwargs)
print(f" Status: {response.status_code}")
time.sleep(0.5) # Small delay between requests
return response

from drift.instrumentation.e2e_common.test_utils import make_request, print_request_summary

if __name__ == "__main__":
print("Starting test request sequence for HTTPX instrumentation...\n")
Expand Down Expand Up @@ -124,4 +106,4 @@ def make_request(method, endpoint, **kwargs):

make_request("POST", "/test/file-like-body")

print("\nAll requests completed successfully")
print_request_summary()
1 change: 1 addition & 0 deletions drift/instrumentation/httpx/instrumentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -806,6 +806,7 @@ def _try_get_mock_from_request_sync(
input_value=input_value,
kind=SpanKind.CLIENT,
input_schema_merges=input_schema_merges,
is_pre_app_start=not sdk.app_ready,
)

if not mock_response_output or not mock_response_output.found:
Expand Down
1 change: 1 addition & 0 deletions drift/instrumentation/psycopg/e2e-tests/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
-e /sdk # Mount point for drift SDK
Flask>=3.1.2
psycopg[binary]>=3.2.1
psycopg-pool>=3.2.0
requests>=2.32.5
Loading