-
Notifications
You must be signed in to change notification settings - Fork 16
Expand file tree
/
Copy pathauth_proxy.py
More file actions
524 lines (464 loc) · 21.3 KB
/
auth_proxy.py
File metadata and controls
524 lines (464 loc) · 21.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
# Note/disclosure: This file has been partially modified by an AI agent.
import json
import logging
import os
import re
from functools import cache
from typing import Dict, Optional, Tuple
from urllib.parse import urlparse, urlunparse
import boto3
import requests
from botocore.awsrequest import AWSPreparedRequest
from botocore.model import OperationModel, ServiceModel
from botocore.session import get_session as get_botocore_session
from localstack.aws.protocol.parser import create_parser
from localstack.aws.spec import load_service
from localstack import config as localstack_config
from localstack.config import external_service_url
from localstack.constants import (
AWS_REGION_US_EAST_1,
DOCKER_IMAGE_NAME_PRO,
LOCALHOST_HOSTNAME,
)
from localstack.http import Request
from localstack.utils.aws.aws_responses import requests_response
from localstack.utils.bootstrap import setup_logging
from localstack.utils.collections import select_attributes
from localstack.utils.container_utils.container_client import PortMappings
from localstack.utils.docker_utils import (
DOCKER_CLIENT,
reserve_available_container_port,
)
from localstack.utils.files import new_tmp_file, save_file
from localstack.utils.functions import run_safe
from localstack.utils.net import get_docker_host_from_container, get_free_tcp_port
from localstack.utils.serving import Server
from localstack.utils.strings import short_uid, to_bytes, to_str, truncate
from requests import Response
from aws_proxy import config as repl_config
from aws_proxy.client.utils import truncate_content
from aws_proxy.config import HANDLER_PATH_PROXIES
from aws_proxy.shared.constants import HEADER_HOST_ORIGINAL, SERVICE_NAME_MAPPING
from aws_proxy.shared.models import AddProxyRequest, ProxyConfig
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
if localstack_config.DEBUG:
LOG.setLevel(logging.DEBUG)
# TODO make configurable
CLI_PIP_PACKAGE = "localstack-extension-aws-proxy"
# note: enable the line below temporarily for testing:
# CLI_PIP_PACKAGE = "git+https://github.com/localstack/localstack-extensions/@branch#egg=localstack-extension-aws-proxy&subdirectory=aws-proxy"
CONTAINER_NAME_PREFIX = "ls-aws-proxy-"
CONTAINER_CONFIG_FILE = "/tmp/ls.aws.proxy.yml"
CONTAINER_LOG_FILE = "/tmp/ls-aws-proxy.log"
# default bind host if `bind_host` is not specified for the proxy
DEFAULT_BIND_HOST = "127.0.0.1"
class AuthProxyAWS(Server):
def __init__(self, config: ProxyConfig, port: int = None):
self.config = config
port = port or get_free_tcp_port()
super().__init__(port=port)
def do_run(self):
self.register_in_instance()
self.run_server()
def run_server(self):
# note: keep import here, to avoid runtime errors
from .http2_server import run_server
bind_host = self.config.get("bind_host") or DEFAULT_BIND_HOST
proxy = run_server(
port=self.port, bind_addresses=[bind_host], handler=self.proxy_request
)
proxy.join()
@classmethod
def start_from_config_file(cls, config_file: str, port: int):
config = json.load(open(config_file))
config["bind_host"] = "0.0.0.0"
cls(config, port=port).run_server()
@staticmethod
def register_proxy(
config_file: str, port: int, localstack_host: str, localstack_port: int = 4566
):
config = json.load(open(config_file))
url = f"http://{localstack_host}:{localstack_port}{HANDLER_PATH_PROXIES}"
requests.post(url, json={"port": port, "config": config})
def proxy_request(self, request: Request, data: bytes) -> Response:
parsed = self._extract_region_and_service(request.headers)
if not parsed:
return requests_response("", status_code=400)
region_name, service_name = parsed
# Map service names based on request context
service_name = self._get_service_name(service_name, request.path)
query_string = to_str(request.query_string or "")
LOG.debug(
"Proxying request to %s (%s): %s %s %s",
service_name,
region_name,
request.method,
request.path,
query_string,
)
# Convert Quart headers to a dict for the LocalStack Request
headers_dict = dict(request.headers)
request = Request(
body=data,
method=request.method,
headers=headers_dict,
path=request.path,
query_string=query_string,
)
session = boto3.Session()
client = session.client(service_name, region_name=region_name)
# fix headers (e.g., "Host") and create client
self._fix_headers(request, service_name)
self._fix_host_and_path(request, service_name)
# create request and request dict
operation_model, aws_request, request_dict = self._parse_aws_request(
request, service_name, region_name=region_name, client=client
)
# adjust request dict and fix certain edge cases in the request
self._adjust_request_dict(service_name, request_dict)
headers_truncated = {
k: truncate(to_str(v)) for k, v in dict(aws_request.headers).items()
}
LOG.debug(
"Sending request for service %s to AWS: %s %s - %s - %s",
service_name,
request.method,
aws_request.url,
truncate_content(request_dict.get("body"), max_length=500),
headers_truncated,
)
try:
# send request to upstream AWS
result = client._endpoint.make_request(operation_model, request_dict)
# create response object - TODO: to be replaced with localstack.http.Response over time
response = requests_response(
result[0].content,
status_code=result[0].status_code,
headers=dict(result[0].headers),
)
LOG.debug(
"Received response for service %s from AWS: %s - %s",
service_name,
response.status_code,
truncate_content(response.content, max_length=500),
)
return response
except Exception as e:
if LOG.isEnabledFor(logging.DEBUG):
LOG.exception(
"Error when making request to AWS service %s: %s", service_name, e
)
return requests_response("", status_code=400)
def register_in_instance(self):
port = getattr(self, "port", None)
if not port:
raise Exception("Proxy currently not running")
url = f"{external_service_url()}{HANDLER_PATH_PROXIES}"
data = AddProxyRequest(port=port, config=self.config)
LOG.debug("Registering new proxy in main container via: %s", url)
try:
response = requests.post(url, json=data)
assert response.ok
return response
except Exception:
LOG.warning(
"Unable to register auth proxy - is LocalStack running with the extension enabled?"
)
raise
def deregister_from_instance(self):
"""Deregister this proxy from the LocalStack instance."""
port = getattr(self, "port", None)
if not port:
return
url = f"{external_service_url()}{HANDLER_PATH_PROXIES}/{port}"
LOG.debug("Deregistering proxy from main container via: %s", url)
try:
response = requests.delete(url)
return response
except Exception as e:
LOG.debug("Unable to deregister auth proxy: %s", e)
def _parse_aws_request(
self, request: Request, service_name: str, region_name: str, client
) -> Tuple[OperationModel, AWSPreparedRequest, Dict]:
# Get botocore's service model for making the actual AWS request
botocore_service_model = self._get_botocore_service_model(service_name)
# Check if request uses JSON protocol (X-Amz-Target header) while service model
# uses RPC v2 CBOR. In this case, we need to parse the request manually since
# create_parser would reject the X-Amz-Target header for RPC v2 services.
x_amz_target = request.headers.get("X-Amz-Target") or request.headers.get(
"X-Amzn-Target"
)
if x_amz_target and botocore_service_model.protocol == "smithy-rpc-v2-cbor":
# Extract operation name from X-Amz-Target (format: "ServiceName.OperationName")
operation_name = x_amz_target.split(".")[-1]
operation_model = botocore_service_model.operation_model(operation_name)
# Parse JSON body
parsed_request = json.loads(to_str(request.data)) if request.data else {}
else:
# Use LocalStack's parser for other protocols
localstack_service_model = load_service(service_name)
parser = create_parser(localstack_service_model)
ls_operation_model, parsed_request = parser.parse(request)
operation_model = botocore_service_model.operation_model(
ls_operation_model.name
)
request_context = {
"client_region": region_name,
"has_streaming_input": operation_model.has_streaming_input,
"auth_type": operation_model.auth_type,
"client_config": client.meta.config,
}
parsed_request = {} if parsed_request is None else parsed_request
parsed_request = {k: v for k, v in parsed_request.items() if v is not None}
# get endpoint info
endpoint_info = client._resolve_endpoint_ruleset(
operation_model, parsed_request, request_context
)
# switch for https://github.com/boto/botocore/commit/826b78c54dd87b9da368e9ab6017d8c4823b28c1
if len(endpoint_info) == 3:
endpoint_url, additional_headers, properties = endpoint_info
if properties:
request_context["endpoint_properties"] = properties
else:
endpoint_url, additional_headers = endpoint_info
# create request dict
request_dict = client._convert_to_request_dict(
parsed_request,
operation_model,
endpoint_url=endpoint_url,
context=request_context,
headers=additional_headers,
)
# TODO: fix for switch between path/host addressing
# Note: the behavior seems to be different across botocore versions. Seems to be working
# with 1.29.97 (fix below not required) whereas newer versions like 1.29.151 require the fix.
if service_name == "s3":
request_url = request_dict["url"]
url_parsed = list(urlparse(request_url))
path_parts = url_parsed[2].strip("/").split("/")
bucket_subdomain_prefix = f"://{path_parts[0]}.s3."
if bucket_subdomain_prefix in request_url:
prefix = f"/{path_parts[0]}"
url_parsed[2] = url_parsed[2].removeprefix(prefix)
request_dict["url_path"] = request_dict["url_path"].removeprefix(prefix)
# replace empty path with "/" (seems required for signature calculation)
request_dict["url_path"] = request_dict["url_path"] or "/"
url_parsed[2] = url_parsed[2] or "/"
# re-construct final URL
request_dict["url"] = urlunparse(url_parsed)
aws_request = client._endpoint.create_request(request_dict, operation_model)
return operation_model, aws_request, request_dict
def _adjust_request_dict(self, service_name: str, request_dict: Dict):
"""Apply minor fixes to the request dict, which seem to be required in the current setup."""
req_body = request_dict.get("body")
body_str = run_safe(lambda: to_str(req_body)) or ""
# TODO: this custom fix should not be required - investigate and remove!
if (
"<CreateBucketConfiguration" in body_str
and "LocationConstraint" not in body_str
):
region = request_dict["context"]["client_region"]
if region == AWS_REGION_US_EAST_1:
request_dict["body"] = ""
else:
request_dict["body"] = (
'<CreateBucketConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
f"<LocationConstraint>{region}</LocationConstraint></CreateBucketConfiguration>"
)
if service_name == "sqs" and isinstance(req_body, dict):
account_id = self._query_account_id_from_aws()
if "QueueUrl" in req_body:
queue_name = req_body["QueueUrl"].split("/")[-1]
req_body["QueueUrl"] = (
f"https://queue.amazonaws.com/{account_id}/{queue_name}"
)
if "QueueOwnerAWSAccountId" in req_body:
req_body["QueueOwnerAWSAccountId"] = account_id
if service_name == "sqs" and request_dict.get("url"):
req_json = run_safe(lambda: json.loads(body_str)) or {}
account_id = self._query_account_id_from_aws()
queue_name = req_json.get("QueueName")
if account_id and queue_name:
request_dict["url"] = (
f"https://queue.amazonaws.com/{account_id}/{queue_name}"
)
req_json["QueueOwnerAWSAccountId"] = account_id
request_dict["body"] = to_bytes(json.dumps(req_json))
def _fix_headers(self, request: Request, service_name: str):
if service_name == "s3":
# fix the Host header, to avoid bucket addressing issues
host = request.headers.get("Host") or ""
regex = r"^(https?://)?([0-9.]+|localhost)(:[0-9]+)?"
if re.match(regex, host):
request.headers["Host"] = re.sub(
regex, rf"\1s3.{LOCALHOST_HOSTNAME}", host
)
request.headers.pop("Content-Length", None)
request.headers.pop("x-localstack-request-url", None)
request.headers.pop("X-Forwarded-For", None)
request.headers.pop("X-Localstack-Tgt-Api", None)
request.headers.pop("X-Moto-Account-Id", None)
request.headers.pop("Remote-Addr", None)
def _fix_host_and_path(self, request: Request, service_name: str):
if service_name == "s3":
# fix the path and prepend the bucket name, to avoid bucket addressing issues
regex_base_domain = rf"((amazonaws\.com)|({LOCALHOST_HOSTNAME}))"
host = request.headers.pop(HEADER_HOST_ORIGINAL, None)
host = host or request.headers.get("Host") or ""
match = re.match(rf"(.+)\.s3\..*{regex_base_domain}", host)
if match:
# prepend the bucket name (extracted from the host) to the path of the request (path-based addressing)
request.path = f"/{match.group(1)}{request.path}"
def _extract_region_and_service(self, headers) -> Optional[Tuple[str, str]]:
auth_header = headers.pop("Authorization", "")
parts = auth_header.split("Credential=", maxsplit=1)
if len(parts) < 2:
return
parts = parts[1].split("/")
if len(parts) < 5:
return
return parts[2], parts[3]
def _get_service_name(self, service_name: str, path: str) -> str:
"""Map AWS signing service names to boto3 client names based on request context."""
# Map AWS signing names to boto3 client names
service_name = SERVICE_NAME_MAPPING.get(service_name, service_name)
# API Gateway v2 uses 'apigateway' as signing name but needs 'apigatewayv2' client
if service_name == "apigateway" and path.startswith("/v2/"):
return "apigatewayv2"
# CloudWatch uses 'monitoring' as signing name
if service_name == "monitoring":
return "cloudwatch"
return service_name
@cache
def _query_account_id_from_aws(self) -> str:
session = boto3.Session()
sts_client = session.client("sts")
result = sts_client.get_caller_identity()
return result["Account"]
@staticmethod
@cache
def _get_botocore_service_model(service_name: str):
"""
Get the botocore service model for a service. This is used instead of LocalStack's
load_service() to ensure protocol compatibility, as LocalStack may use newer protocol
versions (e.g., smithy-rpc-v2-cbor) while clients use older protocols (e.g., query).
"""
session = get_botocore_session()
loader = session.get_component("data_loader")
api_data = loader.load_service_model(service_name, "service-2")
return ServiceModel(api_data)
def start_aws_auth_proxy(config: ProxyConfig, port: int = None) -> AuthProxyAWS:
setup_logging()
proxy = AuthProxyAWS(config, port=port)
proxy.start()
return proxy
def start_aws_auth_proxy_in_container(
config: ProxyConfig, env_vars: dict = None, port: int = None, quiet: bool = False
):
"""
Run the auth proxy in a separate local container. This can help in cases where users
are running into version/dependency issues on their host machines.
"""
# TODO: Currently running a container and installing the extension on the fly - we
# should consider building pre-baked images for the extension in the future. Also,
# the new packaged CLI binary can help us gain more stability over time...
logging.getLogger("localstack.utils.container_utils.docker_cmd_client").setLevel(
logging.INFO
)
logging.getLogger("localstack.utils.docker_utils").setLevel(logging.INFO)
logging.getLogger("localstack.utils.run").setLevel(logging.INFO)
print("Proxy container is starting up...")
# determine port mapping
localstack_config.PORTS_CHECK_DOCKER_IMAGE = DOCKER_IMAGE_NAME_PRO
port = port or reserve_available_container_port()
ports = PortMappings()
ports.add(port, port)
# create container
container_name = f"{CONTAINER_NAME_PREFIX}{short_uid()}"
image_name = DOCKER_IMAGE_NAME_PRO
# add host mapping for localstack.cloud to localhost to prevent the health check from failing
additional_flags = (
repl_config.PROXY_DOCKER_FLAGS
+ " --add-host=localhost.localstack.cloud:host-gateway"
)
DOCKER_CLIENT.create_container(
image_name,
name=container_name,
entrypoint="",
command=[
"bash",
"-c",
f"touch {CONTAINER_LOG_FILE}; tail -f {CONTAINER_LOG_FILE}",
],
ports=ports,
additional_flags=additional_flags,
)
# start container in detached mode
DOCKER_CLIENT.start_container(container_name, attach=False)
# install extension CLI package
venv_activate = ". .venv/bin/activate"
command = [
"bash",
"-c",
# TODO: manually installing quart/h11/hypercorn as a dirty quick fix for now. To be fixed!
f"{venv_activate}; pip install h11 hypercorn quart; pip install --upgrade --no-deps '{CLI_PIP_PACKAGE}'",
]
DOCKER_CLIENT.exec_in_container(container_name, command=command)
# create config file in container
config_file_host = new_tmp_file()
save_file(config_file_host, json.dumps(config))
DOCKER_CLIENT.copy_into_container(
container_name, config_file_host, container_path=CONTAINER_CONFIG_FILE
)
# prepare environment variables
env_var_names = [
"DEBUG",
"AWS_SECRET_ACCESS_KEY",
"AWS_ACCESS_KEY_ID",
"AWS_SESSION_TOKEN",
"AWS_DEFAULT_REGION",
"LOCALSTACK_AUTH_TOKEN",
]
env_vars = env_vars or os.environ
env_vars = select_attributes(dict(env_vars), env_var_names)
# Determine target hostname - we make the host configurable via PROXY_LOCALSTACK_HOST,
# and if not configured then use get_docker_host_from_container() as a fallback.
target_host = repl_config.PROXY_LOCALSTACK_HOST
if not repl_config.PROXY_LOCALSTACK_HOST:
target_host = get_docker_host_from_container()
env_vars["LOCALSTACK_HOST"] = target_host
try:
print("Proxy container is ready.")
# Start proxy server in background using Python directly (no CLI dependency)
start_server_cmd = (
f"{venv_activate}; "
f'python -c "from aws_proxy.client.auth_proxy import AuthProxyAWS; '
f"AuthProxyAWS.start_from_config_file('{CONTAINER_CONFIG_FILE}', {port})\" "
f">> {CONTAINER_LOG_FILE} 2>&1 &"
)
# Wait for proxy server to start, then register with LocalStack
register_cmd = (
f"sleep 2 && {venv_activate}; "
f'python -c "from aws_proxy.client.auth_proxy import AuthProxyAWS; '
f"AuthProxyAWS.register_proxy('{CONTAINER_CONFIG_FILE}', {port}, '{target_host}')\" "
f">> {CONTAINER_LOG_FILE} 2>&1"
)
command = f"{start_server_cmd} {register_cmd}"
DOCKER_CLIENT.exec_in_container(
container_name,
command=["bash", "-c", command],
env_vars=env_vars,
interactive=False,
)
except KeyboardInterrupt:
pass
except Exception as e:
LOG.info("Error: %s", e)
finally:
try:
if repl_config.CLEANUP_PROXY_CONTAINERS:
DOCKER_CLIENT.remove_container(container_name, force=True)
except Exception as e:
if "already in progress" not in str(e):
raise