@@ -431,17 +431,17 @@ async def _list_head(
431431 self ._should_check_for_forefront_requests = False
432432
433433 # Otherwise fetch from API
434- list_and_lost_data = await self ._api_client .list_and_lock_head (
434+ locked_queue_head = await self ._api_client .list_and_lock_head (
435435 lock_secs = int (self ._DEFAULT_LOCK_TIME .total_seconds ()),
436436 limit = limit ,
437437 )
438438
439439 # Update the queue head cache
440- self ._queue_has_locked_requests = list_and_lost_data .queue_has_locked_requests
440+ self ._queue_has_locked_requests = locked_queue_head .queue_has_locked_requests
441441 # Check if there is another client working with the RequestQueue
442- self .metadata .had_multiple_clients = list_and_lost_data .had_multiple_clients
442+ self .metadata .had_multiple_clients = locked_queue_head .had_multiple_clients
443443
444- for request_data in list_and_lost_data .items :
444+ for request_data in locked_queue_head .items :
445445 request = Request .model_validate (request_data .model_dump (by_alias = True ))
446446 request_id = request_data .id
447447
@@ -473,7 +473,7 @@ async def _list_head(
473473 # After adding new requests to the forefront, any existing leftover locked request is kept in the end.
474474 self ._queue_head .append (leftover_id )
475475
476- list_and_lost_dict = list_and_lost_data .model_dump (by_alias = True )
476+ list_and_lost_dict = locked_queue_head .model_dump (by_alias = True )
477477 return RequestQueueHead .model_validate (list_and_lost_dict )
478478
479479 def _cache_request (
0 commit comments