forked from apify/apify-client-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path_http_client.py
More file actions
295 lines (231 loc) · 10.8 KB
/
_http_client.py
File metadata and controls
295 lines (231 loc) · 10.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
from __future__ import annotations
import gzip
import json as jsonlib
import logging
import os
import sys
from datetime import datetime, timezone
from http import HTTPStatus
from importlib import metadata
from typing import TYPE_CHECKING, Any
from urllib.parse import urlencode
import impit
from apify_client._logging import log_context, logger_name
from apify_client._statistics import Statistics
from apify_client._utils import is_retryable_error, retry_with_exp_backoff, retry_with_exp_backoff_async
from apify_client.errors import ApifyApiError
if TYPE_CHECKING:
from collections.abc import Callable
from apify_client._types import JSONSerializable
DEFAULT_BACKOFF_EXPONENTIAL_FACTOR = 2
DEFAULT_BACKOFF_RANDOM_FACTOR = 1
logger = logging.getLogger(logger_name)
class _BaseHTTPClient:
def __init__(
self,
*,
token: str | None = None,
max_retries: int = 8,
min_delay_between_retries_millis: int = 500,
timeout_secs: int = 360,
stats: Statistics | None = None,
headers: dict | None = None,
) -> None:
self.max_retries = max_retries
self.min_delay_between_retries_millis = min_delay_between_retries_millis
self.timeout_secs = timeout_secs
default_headers = {'Accept': 'application/json, */*'}
workflow_key = os.getenv('APIFY_WORKFLOW_KEY')
if workflow_key is not None:
default_headers['X-Apify-Workflow-Key'] = workflow_key
is_at_home = 'APIFY_IS_AT_HOME' in os.environ
python_version = '.'.join([str(x) for x in sys.version_info[:3]])
client_version = metadata.version('apify-client')
user_agent = f'ApifyClient/{client_version} ({sys.platform}; Python/{python_version}); isAtHome/{is_at_home}'
default_headers['User-Agent'] = user_agent
if token is not None:
default_headers['Authorization'] = f'Bearer {token}'
init_headers = {**default_headers, **(headers or {})}
self.impit_client = impit.Client(headers=init_headers, follow_redirects=True, timeout=timeout_secs)
self.impit_async_client = impit.AsyncClient(headers=init_headers, follow_redirects=True, timeout=timeout_secs)
self.stats = stats or Statistics()
@staticmethod
def _parse_params(params: dict | None) -> dict | None:
if params is None:
return None
parsed_params: dict = {}
for key, value in params.items():
# Our API needs boolean parameters passed as 0 or 1
if isinstance(value, bool):
parsed_params[key] = int(value)
# Our API needs lists passed as comma-separated strings
elif isinstance(value, list):
parsed_params[key] = ','.join(value)
elif isinstance(value, datetime):
utc_aware_dt = value.astimezone(timezone.utc)
iso_str = utc_aware_dt.isoformat(timespec='milliseconds')
# Convert to ISO 8601 string in Zulu format
zulu_date_str = iso_str.replace('+00:00', 'Z')
parsed_params[key] = zulu_date_str
elif value is not None:
parsed_params[key] = value
return parsed_params
def _prepare_request_call(
self,
headers: dict | None = None,
params: dict | None = None,
data: Any = None,
json: JSONSerializable | None = None,
) -> tuple[dict, dict | None, Any]:
if json and data:
raise ValueError('Cannot pass both "json" and "data" parameters at the same time!')
if not headers:
headers = {}
# dump JSON data to string, so they can be gzipped
if json:
data = jsonlib.dumps(json, ensure_ascii=False, allow_nan=False, default=str).encode('utf-8')
headers['Content-Type'] = 'application/json'
if isinstance(data, (str, bytes, bytearray)):
if isinstance(data, str):
data = data.encode('utf-8')
data = gzip.compress(data)
headers['Content-Encoding'] = 'gzip'
return (
headers,
self._parse_params(params),
data,
)
def _build_url_with_params(self, url: str, params: dict | None = None) -> str:
if not params:
return url
param_pairs: list[tuple[str, str]] = []
for key, value in params.items():
if isinstance(value, list):
param_pairs.extend((key, str(v)) for v in value)
else:
param_pairs.append((key, str(value)))
query_string = urlencode(param_pairs)
return f'{url}?{query_string}'
class HTTPClient(_BaseHTTPClient):
def call(
self,
*,
method: str,
url: str,
headers: dict | None = None,
params: dict | None = None,
data: Any = None,
json: JSONSerializable | None = None,
stream: bool | None = None,
timeout_secs: int | None = None,
) -> impit.Response:
log_context.method.set(method)
log_context.url.set(url)
self.stats.calls += 1
headers, params, content = self._prepare_request_call(headers, params, data, json)
impit_client = self.impit_client
def _make_request(stop_retrying: Callable, attempt: int) -> impit.Response:
log_context.attempt.set(attempt)
logger.debug('Sending request')
self.stats.requests += 1
try:
# Increase timeout with each attempt. Max timeout is bounded by the client timeout.
timeout = min(self.timeout_secs, (timeout_secs or self.timeout_secs) * 2 ** (attempt - 1))
url_with_params = self._build_url_with_params(url, params)
response = impit_client.request(
method=method,
url=url_with_params,
headers=headers,
content=content,
timeout=timeout,
stream=stream or False,
)
# If response status is < 300, the request was successful, and we can return the result
if response.status_code < 300: # noqa: PLR2004
logger.debug('Request successful', extra={'status_code': response.status_code})
return response
if response.status_code == HTTPStatus.TOO_MANY_REQUESTS:
self.stats.add_rate_limit_error(attempt)
except Exception as e:
logger.debug('Request threw exception', exc_info=e)
if not is_retryable_error(e):
logger.debug('Exception is not retryable', exc_info=e)
stop_retrying()
raise
# We want to retry only requests which are server errors (status >= 500) and could resolve on their own,
# and also retry rate limited requests that throw 429 Too Many Requests errors
logger.debug('Request unsuccessful', extra={'status_code': response.status_code})
if response.status_code < 500 and response.status_code != HTTPStatus.TOO_MANY_REQUESTS: # noqa: PLR2004
logger.debug('Status code is not retryable', extra={'status_code': response.status_code})
stop_retrying()
# Read the response in case it is a stream, so we can raise the error properly
response.read()
raise ApifyApiError(response, attempt, method=method)
return retry_with_exp_backoff(
_make_request,
max_retries=self.max_retries,
backoff_base_millis=self.min_delay_between_retries_millis,
backoff_factor=DEFAULT_BACKOFF_EXPONENTIAL_FACTOR,
random_factor=DEFAULT_BACKOFF_RANDOM_FACTOR,
)
class HTTPClientAsync(_BaseHTTPClient):
async def call(
self,
*,
method: str,
url: str,
headers: dict | None = None,
params: dict | None = None,
data: Any = None,
json: JSONSerializable | None = None,
stream: bool | None = None,
timeout_secs: int | None = None,
) -> impit.Response:
log_context.method.set(method)
log_context.url.set(url)
self.stats.calls += 1
headers, params, content = self._prepare_request_call(headers, params, data, json)
impit_async_client = self.impit_async_client
async def _make_request(stop_retrying: Callable, attempt: int) -> impit.Response:
log_context.attempt.set(attempt)
logger.debug('Sending request')
try:
# Increase timeout with each attempt. Max timeout is bounded by the client timeout.
timeout = min(self.timeout_secs, (timeout_secs or self.timeout_secs) * 2 ** (attempt - 1))
url_with_params = self._build_url_with_params(url, params)
response = await impit_async_client.request(
method=method,
url=url_with_params,
headers=headers,
content=content,
timeout=timeout,
stream=stream or False,
)
# If response status is < 300, the request was successful, and we can return the result
if response.status_code < 300: # noqa: PLR2004
logger.debug('Request successful', extra={'status_code': response.status_code})
return response
if response.status_code == HTTPStatus.TOO_MANY_REQUESTS:
self.stats.add_rate_limit_error(attempt)
except Exception as e:
logger.debug('Request threw exception', exc_info=e)
if not is_retryable_error(e):
logger.debug('Exception is not retryable', exc_info=e)
stop_retrying()
raise
# We want to retry only requests which are server errors (status >= 500) and could resolve on their own,
# and also retry rate limited requests that throw 429 Too Many Requests errors
logger.debug('Request unsuccessful', extra={'status_code': response.status_code})
if response.status_code < 500 and response.status_code != HTTPStatus.TOO_MANY_REQUESTS: # noqa: PLR2004
logger.debug('Status code is not retryable', extra={'status_code': response.status_code})
stop_retrying()
# Read the response in case it is a stream, so we can raise the error properly
await response.aread()
raise ApifyApiError(response, attempt, method=method)
return await retry_with_exp_backoff_async(
_make_request,
max_retries=self.max_retries,
backoff_base_millis=self.min_delay_between_retries_millis,
backoff_factor=DEFAULT_BACKOFF_EXPONENTIAL_FACTOR,
random_factor=DEFAULT_BACKOFF_RANDOM_FACTOR,
)