Skip to content

Commit a867113

Browse files
whummerclaude
andcommitted
add integration tests for read-only CloudWatch Logs exceptions
- Add test_logs_readonly_filter_log_events to verify FilterLogEvents works in read_only mode - Add test_logs_readonly_insights_query to verify StartQuery/GetQueryResults work in read_only mode - Update AGENTS.md with instruction to avoid time.sleep() in tests Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
1 parent dbd3e14 commit a867113

File tree

2 files changed

+163
-0
lines changed

2 files changed

+163
-0
lines changed

aws-proxy/AGENTS.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ When adding new integration tests, consider the following:
5555
* Make sure to either use fixtures (preferred), or reliable cleanups for removing the resources; several fixtures for creating AWS resources are available in the `localstack.testing.pytest.fixtures` module
5656
* If a test uses multiple resources with interdependencies (e.g., an SQS queue connected to an SNS topic), then the test needs to ensure that both resource types are proxied (i.e., created in real AWS), to avoid a situation where a resource in AWS is attempting to reference a local resource in LocalStack (using account ID `000000000000` in their ARN).
5757
* When waiting for the creation status of a resource, use the `localstack.utils.sync.retry(..)` utility function, rather than a manual `for` loop.
58+
* Avoid using `time.sleep()` in tests. Instead, use `localstack.utils.sync.retry(..)` to poll for the expected state. This makes tests more robust and avoids unnecessary delays when resources become available faster than expected.
5859

5960
## Fixing or Enhancing Logic in the Proxy
6061

aws-proxy/tests/proxy/test_cloudwatch.py

Lines changed: 162 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -444,6 +444,168 @@ def _get_log_events():
444444
assert len(groups_aws_new["logGroups"]) == 0
445445

446446

447+
def test_logs_readonly_filter_log_events(start_aws_proxy, cleanups):
448+
"""Test that FilterLogEvents works in read-only proxy mode.
449+
450+
FilterLogEvents is a read operation but doesn't match standard prefixes
451+
(Describe*, Get*, List*, Query*). This test verifies it's correctly
452+
forwarded when read_only: true is configured.
453+
"""
454+
log_group_name = f"/test/readonly-filter/{short_uid()}"
455+
log_stream_name_1 = f"stream-1-{short_uid()}"
456+
log_stream_name_2 = f"stream-2-{short_uid()}"
457+
458+
# start proxy - forwarding requests for CloudWatch Logs in read-only mode
459+
config = ProxyConfig(
460+
services={
461+
"logs": {
462+
"resources": [f".*:log-group:{log_group_name}:.*"],
463+
"read_only": True,
464+
}
465+
}
466+
)
467+
start_aws_proxy(config)
468+
469+
# create clients
470+
logs_client = connect_to().logs
471+
logs_client_aws = boto3.client("logs")
472+
473+
# create log group and streams in AWS
474+
logs_client_aws.create_log_group(logGroupName=log_group_name)
475+
cleanups.append(
476+
lambda: logs_client_aws.delete_log_group(logGroupName=log_group_name)
477+
)
478+
479+
logs_client_aws.create_log_stream(
480+
logGroupName=log_group_name, logStreamName=log_stream_name_1
481+
)
482+
logs_client_aws.create_log_stream(
483+
logGroupName=log_group_name, logStreamName=log_stream_name_2
484+
)
485+
486+
# put log events with different messages
487+
timestamp = int(time.time() * 1000)
488+
logs_client_aws.put_log_events(
489+
logGroupName=log_group_name,
490+
logStreamName=log_stream_name_1,
491+
logEvents=[
492+
{"timestamp": timestamp, "message": "ERROR: Something went wrong"},
493+
{"timestamp": timestamp + 1, "message": "INFO: Normal operation"},
494+
],
495+
)
496+
logs_client_aws.put_log_events(
497+
logGroupName=log_group_name,
498+
logStreamName=log_stream_name_2,
499+
logEvents=[
500+
{"timestamp": timestamp + 2, "message": "ERROR: Another error"},
501+
{"timestamp": timestamp + 3, "message": "DEBUG: Debug info"},
502+
],
503+
)
504+
505+
# filter_log_events through local client (proxied) - should work in read-only mode
506+
# This tests that FilterLogEvents is correctly identified as a read operation
507+
def _filter_log_events():
508+
response = logs_client.filter_log_events(
509+
logGroupName=log_group_name,
510+
filterPattern="ERROR",
511+
)
512+
if len(response["events"]) < 2:
513+
raise AssertionError("Not all error events found yet")
514+
return response
515+
516+
filtered_local = retry(_filter_log_events, retries=15, sleep=2)
517+
518+
# verify only ERROR messages are returned
519+
assert len(filtered_local["events"]) >= 2
520+
for event in filtered_local["events"]:
521+
assert "ERROR" in event["message"]
522+
523+
# compare with AWS client results to confirm proxy forwarded correctly
524+
filtered_aws = logs_client_aws.filter_log_events(
525+
logGroupName=log_group_name,
526+
filterPattern="ERROR",
527+
)
528+
assert len(filtered_local["events"]) == len(filtered_aws["events"])
529+
530+
# Verify the events match (same messages from AWS)
531+
local_messages = sorted([e["message"] for e in filtered_local["events"]])
532+
aws_messages = sorted([e["message"] for e in filtered_aws["events"]])
533+
assert local_messages == aws_messages
534+
535+
536+
def test_logs_readonly_insights_query(start_aws_proxy, cleanups):
537+
"""Test that StartQuery and GetQueryResults work in read-only proxy mode.
538+
539+
StartQuery and GetQueryResults are read operations for CloudWatch Logs Insights
540+
but don't match standard prefixes. This test verifies they're correctly
541+
forwarded when read_only: true is configured.
542+
"""
543+
log_group_name = f"/test/readonly-insights/{short_uid()}"
544+
log_stream_name = f"stream-{short_uid()}"
545+
546+
# start proxy - forwarding requests for CloudWatch Logs in read-only mode
547+
config = ProxyConfig(
548+
services={
549+
"logs": {
550+
"resources": [f".*:log-group:{log_group_name}:.*"],
551+
"read_only": True,
552+
}
553+
}
554+
)
555+
start_aws_proxy(config)
556+
557+
# create clients
558+
logs_client = connect_to().logs
559+
logs_client_aws = boto3.client("logs")
560+
561+
# create log group and stream in AWS
562+
logs_client_aws.create_log_group(logGroupName=log_group_name)
563+
cleanups.append(
564+
lambda: logs_client_aws.delete_log_group(logGroupName=log_group_name)
565+
)
566+
567+
logs_client_aws.create_log_stream(
568+
logGroupName=log_group_name, logStreamName=log_stream_name
569+
)
570+
571+
# put log events
572+
timestamp = int(time.time() * 1000)
573+
logs_client_aws.put_log_events(
574+
logGroupName=log_group_name,
575+
logStreamName=log_stream_name,
576+
logEvents=[
577+
{"timestamp": timestamp, "message": "Test log message for insights query"},
578+
],
579+
)
580+
581+
# start_query and get_query_results through local client (proxied)
582+
# should work in read-only mode - use retry to wait for query completion
583+
def _run_insights_query():
584+
start_time = int((time.time() - 300) * 1000) # 5 minutes ago
585+
end_time = int((time.time() + 60) * 1000) # 1 minute from now
586+
587+
query_response = logs_client.start_query(
588+
logGroupName=log_group_name,
589+
startTime=start_time,
590+
endTime=end_time,
591+
queryString="fields @timestamp, @message | limit 10",
592+
)
593+
query_id = query_response["queryId"]
594+
assert query_id is not None
595+
596+
# get_query_results - poll until complete
597+
results = logs_client.get_query_results(queryId=query_id)
598+
if results["status"] not in ["Complete", "Failed", "Cancelled"]:
599+
raise AssertionError(f"Query not complete yet: {results['status']}")
600+
if results["status"] != "Complete" or len(results["results"]) < 1:
601+
raise AssertionError("Query completed but no results found yet")
602+
return results
603+
604+
results = retry(_run_insights_query, retries=30, sleep=2)
605+
assert results["status"] == "Complete"
606+
assert len(results["results"]) >= 1
607+
608+
447609
def test_logs_resource_name_matching(start_aws_proxy, cleanups):
448610
"""Test that proxy forwards requests for specific log groups matching ARN pattern."""
449611
log_group_match = f"/proxy/logs/{short_uid()}"

0 commit comments

Comments
 (0)