|
11 | 11 | from crawlee.storage_clients.models import AddRequestsResponse, ProcessedRequest, RequestQueueMetadata |
12 | 12 |
|
13 | 13 | from ._models import ApifyRequestQueueMetadata, CachedRequest, RequestQueueHead |
14 | | -from ._utils import unique_key_to_request_id |
15 | | -from apify import Request |
| 14 | +from ._utils import to_crawlee_request, unique_key_to_request_id |
16 | 15 |
|
17 | 16 | if TYPE_CHECKING: |
18 | 17 | from collections.abc import Callable, Coroutine, Sequence |
19 | 18 |
|
20 | 19 | from apify_client._resource_clients import RequestQueueClientAsync |
21 | 20 |
|
| 21 | + from apify import Request |
| 22 | + |
22 | 23 | logger = getLogger(__name__) |
23 | 24 |
|
24 | 25 |
|
@@ -311,8 +312,7 @@ async def _get_request_by_id(self, request_id: str) -> Request | None: |
311 | 312 | if response is None: |
312 | 313 | return None |
313 | 314 |
|
314 | | - response_dict = response.model_dump(by_alias=True) |
315 | | - return Request.model_validate(response_dict) |
| 315 | + return to_crawlee_request(response) |
316 | 316 |
|
317 | 317 | async def _ensure_head_is_non_empty(self) -> None: |
318 | 318 | """Ensure that the queue head has requests if they are available in the queue.""" |
@@ -442,7 +442,7 @@ async def _list_head( |
442 | 442 | self.metadata.had_multiple_clients = locked_queue_head.had_multiple_clients |
443 | 443 |
|
444 | 444 | for request_data in locked_queue_head.items: |
445 | | - request = Request.model_validate(request_data.model_dump(by_alias=True)) |
| 445 | + request = to_crawlee_request(request_data) |
446 | 446 | request_id = request_data.id |
447 | 447 |
|
448 | 448 | # Skip requests without ID or unique key |
@@ -473,8 +473,7 @@ async def _list_head( |
473 | 473 | # After adding new requests to the forefront, any existing leftover locked request is kept in the end. |
474 | 474 | self._queue_head.append(leftover_id) |
475 | 475 |
|
476 | | - list_and_lost_dict = locked_queue_head.model_dump(by_alias=True) |
477 | | - return RequestQueueHead.model_validate(list_and_lost_dict) |
| 476 | + return RequestQueueHead.from_client_locked_head(locked_queue_head) |
478 | 477 |
|
479 | 478 | def _cache_request( |
480 | 479 | self, |
|
0 commit comments