Skip to content

Commit 204fd2f

Browse files
authored
Adopt pep585 (#2528)
1 parent f823dc4 commit 204fd2f

71 files changed

Lines changed: 660 additions & 661 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

integrations/ollama/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ known-first-party = ["haystack_integrations"]
8888

8989

9090
[tool.ruff]
91-
target-version = "py38"
91+
target-version = "py39"
9292
line-length = 120
9393

9494
[tool.ruff.lint]

integrations/ollama/src/haystack_integrations/components/embedders/ollama/document_embedder.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import asyncio
2-
from typing import Any, Dict, List, Optional, Union
2+
from typing import Any, Optional, Union
33

44
from haystack import Document, component
55
from tqdm import tqdm
@@ -30,13 +30,13 @@ def __init__(
3030
self,
3131
model: str = "nomic-embed-text",
3232
url: str = "http://localhost:11434",
33-
generation_kwargs: Optional[Dict[str, Any]] = None,
33+
generation_kwargs: Optional[dict[str, Any]] = None,
3434
timeout: int = 120,
3535
keep_alive: Optional[Union[float, str]] = None,
3636
prefix: str = "",
3737
suffix: str = "",
3838
progress_bar: bool = True,
39-
meta_fields_to_embed: Optional[List[str]] = None,
39+
meta_fields_to_embed: Optional[list[str]] = None,
4040
embedding_separator: str = "\n",
4141
batch_size: int = 32,
4242
):
@@ -87,7 +87,7 @@ def __init__(
8787
self._client = Client(host=self.url, timeout=self.timeout)
8888
self._async_client = AsyncClient(host=self.url, timeout=self.timeout)
8989

90-
def _prepare_input(self, documents: List[Document]) -> List[Document]:
90+
def _prepare_input(self, documents: list[Document]) -> list[Document]:
9191
"""
9292
Prepares the list of documents to embed by appropriate validation.
9393
"""
@@ -100,7 +100,7 @@ def _prepare_input(self, documents: List[Document]) -> List[Document]:
100100

101101
return documents
102102

103-
def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:
103+
def _prepare_texts_to_embed(self, documents: list[Document]) -> list[str]:
104104
"""
105105
Prepares the texts to embed by concatenating the Document text with the metadata fields to embed.
106106
"""
@@ -123,8 +123,8 @@ def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:
123123
return texts_to_embed
124124

125125
def _embed_batch(
126-
self, texts_to_embed: List[str], batch_size: int, generation_kwargs: Optional[Dict[str, Any]] = None
127-
) -> List[List[float]]:
126+
self, texts_to_embed: list[str], batch_size: int, generation_kwargs: Optional[dict[str, Any]] = None
127+
) -> list[list[float]]:
128128
"""
129129
Internal method to embed a batch of texts.
130130
"""
@@ -146,8 +146,8 @@ def _embed_batch(
146146
return all_embeddings
147147

148148
async def _embed_batch_async(
149-
self, texts_to_embed: List[str], batch_size: int, generation_kwargs: Optional[Dict[str, Any]] = None
150-
) -> List[List[float]]:
149+
self, texts_to_embed: list[str], batch_size: int, generation_kwargs: Optional[dict[str, Any]] = None
150+
) -> list[list[float]]:
151151
"""
152152
Internal method to embed a batch of texts asynchronously.
153153
"""
@@ -175,10 +175,10 @@ async def _embed_batch_async(
175175

176176
return all_embeddings
177177

178-
@component.output_types(documents=List[Document], meta=Dict[str, Any])
178+
@component.output_types(documents=list[Document], meta=dict[str, Any])
179179
def run(
180-
self, documents: List[Document], generation_kwargs: Optional[Dict[str, Any]] = None
181-
) -> Dict[str, Union[List[Document], Dict[str, Any]]]:
180+
self, documents: list[Document], generation_kwargs: Optional[dict[str, Any]] = None
181+
) -> dict[str, Union[list[Document], dict[str, Any]]]:
182182
"""
183183
Runs an Ollama Model to compute embeddings of the provided documents.
184184
@@ -210,10 +210,10 @@ def run(
210210

211211
return {"documents": documents, "meta": {"model": self.model}}
212212

213-
@component.output_types(documents=List[Document], meta=Dict[str, Any])
213+
@component.output_types(documents=list[Document], meta=dict[str, Any])
214214
async def run_async(
215-
self, documents: List[Document], generation_kwargs: Optional[Dict[str, Any]] = None
216-
) -> Dict[str, Union[List[Document], Dict[str, Any]]]:
215+
self, documents: list[Document], generation_kwargs: Optional[dict[str, Any]] = None
216+
) -> dict[str, Union[list[Document], dict[str, Any]]]:
217217
"""
218218
Asynchronously run an Ollama Model to compute embeddings of the provided documents.
219219

integrations/ollama/src/haystack_integrations/components/embedders/ollama/text_embedder.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Any, Dict, List, Optional, Union
1+
from typing import Any, Optional, Union
22

33
from haystack import component
44

@@ -25,7 +25,7 @@ def __init__(
2525
self,
2626
model: str = "nomic-embed-text",
2727
url: str = "http://localhost:11434",
28-
generation_kwargs: Optional[Dict[str, Any]] = None,
28+
generation_kwargs: Optional[dict[str, Any]] = None,
2929
timeout: int = 120,
3030
keep_alive: Optional[Union[float, str]] = None,
3131
):
@@ -58,10 +58,10 @@ def __init__(
5858
self._client = Client(host=self.url, timeout=self.timeout)
5959
self._async_client = AsyncClient(host=self.url, timeout=self.timeout)
6060

61-
@component.output_types(embedding=List[float], meta=Dict[str, Any])
61+
@component.output_types(embedding=list[float], meta=dict[str, Any])
6262
def run(
63-
self, text: str, generation_kwargs: Optional[Dict[str, Any]] = None
64-
) -> Dict[str, Union[List[float], Dict[str, Any]]]:
63+
self, text: str, generation_kwargs: Optional[dict[str, Any]] = None
64+
) -> dict[str, Union[list[float], dict[str, Any]]]:
6565
"""
6666
Runs an Ollama Model to compute embeddings of the provided text.
6767
@@ -85,10 +85,10 @@ def run(
8585

8686
return result
8787

88-
@component.output_types(embedding=List[float], meta=Dict[str, Any])
88+
@component.output_types(embedding=list[float], meta=dict[str, Any])
8989
async def run_async(
90-
self, text: str, generation_kwargs: Optional[Dict[str, Any]] = None
91-
) -> Dict[str, Union[List[float], Dict[str, Any]]]:
90+
self, text: str, generation_kwargs: Optional[dict[str, Any]] = None
91+
) -> dict[str, Union[list[float], dict[str, Any]]]:
9292
"""
9393
Asynchronously run an Ollama Model to compute embeddings of the provided text.
9494

integrations/ollama/src/haystack_integrations/components/generators/ollama/chat/chat_generator.py

Lines changed: 28 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import json
2-
from typing import Any, AsyncIterator, Callable, Dict, Iterator, List, Literal, Optional, Union
2+
from collections.abc import AsyncIterator, Iterator
3+
from typing import Any, Callable, Literal, Optional, Union
34

45
from haystack import component, default_from_dict, default_to_dict
56
from haystack.dataclasses import (
@@ -23,14 +24,14 @@
2324

2425
from ollama import AsyncClient, ChatResponse, Client
2526

26-
FINISH_REASON_MAPPING: Dict[str, FinishReason] = {
27+
FINISH_REASON_MAPPING: dict[str, FinishReason] = {
2728
"stop": "stop",
2829
"tool_calls": "tool_calls",
2930
# we skip load and unload reasons
3031
}
3132

3233

33-
def _convert_chatmessage_to_ollama_format(message: ChatMessage) -> Dict[str, Any]:
34+
def _convert_chatmessage_to_ollama_format(message: ChatMessage) -> dict[str, Any]:
3435
"""
3536
Convert a ChatMessage to the format expected by the Ollama Chat API.
3637
"""
@@ -48,7 +49,7 @@ def _convert_chatmessage_to_ollama_format(message: ChatMessage) -> Dict[str, Any
4849
msg = "For Ollama compatibility, a `ChatMessage` can contain at most one `TextContent` or `ToolCallResult`."
4950
raise ValueError(msg)
5051

51-
ollama_msg: Dict[str, Any] = {"role": message.role.value}
52+
ollama_msg: dict[str, Any] = {"role": message.role.value}
5253

5354
if tool_call_results:
5455
# Ollama does not provide a way to communicate errors in tool invocations, so we ignore the error field
@@ -70,7 +71,7 @@ def _convert_chatmessage_to_ollama_format(message: ChatMessage) -> Dict[str, Any
7071
return ollama_msg
7172

7273

73-
def _convert_ollama_meta_to_openai_format(input_response_dict: Dict) -> Dict[str, Any]:
74+
def _convert_ollama_meta_to_openai_format(input_response_dict: dict) -> dict[str, Any]:
7475
"""
7576
Map Ollama metadata keys onto the OpenAI-compatible names Haystack expects.
7677
All fields that are not part of the OpenAI metadata are left unchanged in the returned dict.
@@ -129,7 +130,7 @@ def _convert_ollama_response_to_chatmessage(ollama_response: ChatResponse) -> Ch
129130
response_dict = ollama_response.model_dump()
130131
ollama_message = response_dict["message"]
131132
text = ollama_message["content"]
132-
tool_calls: List[ToolCall] = []
133+
tool_calls: list[ToolCall] = []
133134

134135
if ollama_tool_calls := ollama_message.get("tool_calls"):
135136
for ollama_tc in ollama_tool_calls:
@@ -211,7 +212,7 @@ def __init__(
211212
self,
212213
model: str = "qwen3:0.6b",
213214
url: str = "http://localhost:11434",
214-
generation_kwargs: Optional[Dict[str, Any]] = None,
215+
generation_kwargs: Optional[dict[str, Any]] = None,
215216
timeout: int = 120,
216217
keep_alive: Optional[Union[float, str]] = None,
217218
streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
@@ -274,7 +275,7 @@ def __init__(
274275
self._client = Client(host=self.url, timeout=self.timeout)
275276
self._async_client = AsyncClient(host=self.url, timeout=self.timeout)
276277

277-
def to_dict(self) -> Dict[str, Any]:
278+
def to_dict(self) -> dict[str, Any]:
278279
"""
279280
Serializes the component to a dictionary.
280281
@@ -296,7 +297,7 @@ def to_dict(self) -> Dict[str, Any]:
296297
)
297298

298299
@classmethod
299-
def from_dict(cls, data: Dict[str, Any]) -> "OllamaChatGenerator":
300+
def from_dict(cls, data: dict[str, Any]) -> "OllamaChatGenerator":
300301
"""
301302
Deserializes the component from a dictionary.
302303
@@ -315,20 +316,20 @@ def _handle_streaming_response(
315316
self,
316317
response_iter: Iterator[ChatResponse],
317318
callback: Optional[SyncStreamingCallbackT],
318-
) -> Dict[str, List[ChatMessage]]:
319+
) -> dict[str, list[ChatMessage]]:
319320
"""
320321
Merge an Ollama streaming response into a single ChatMessage, preserving
321322
tool calls. Works even when arguments arrive piecemeal as str fragments
322323
or as full JSON dicts.
323324
"""
324325

325326
component_info = ComponentInfo.from_component(self)
326-
chunks: List[StreamingChunk] = []
327+
chunks: list[StreamingChunk] = []
327328

328329
# Accumulators
329-
arg_by_id: Dict[str, str] = {}
330-
name_by_id: Dict[str, str] = {}
331-
id_order: List[str] = []
330+
arg_by_id: dict[str, str] = {}
331+
name_by_id: dict[str, str] = {}
332+
id_order: list[str] = []
332333
tool_call_index: int = 0
333334

334335
# Stream
@@ -399,18 +400,18 @@ async def _handle_streaming_response_async(
399400
self,
400401
response_iter: AsyncIterator[ChatResponse],
401402
callback: Optional[AsyncStreamingCallbackT],
402-
) -> Dict[str, List[ChatMessage]]:
403+
) -> dict[str, list[ChatMessage]]:
403404
"""
404405
Merge an Ollama async streaming response into a single ChatMessage, preserving
405406
tool calls. Works even when arguments arrive piecemeal as str fragments
406407
or as full JSON dicts."""
407408
component_info = ComponentInfo.from_component(self)
408-
chunks: List[StreamingChunk] = []
409+
chunks: list[StreamingChunk] = []
409410

410411
# Accumulators
411-
arg_by_id: Dict[str, str] = {}
412-
name_by_id: Dict[str, str] = {}
413-
id_order: List[str] = []
412+
arg_by_id: dict[str, str] = {}
413+
name_by_id: dict[str, str] = {}
414+
id_order: list[str] = []
414415
tool_call_index: int = 0
415416

416417
# Stream
@@ -466,15 +467,15 @@ async def _handle_streaming_response_async(
466467

467468
return {"replies": [reply]}
468469

469-
@component.output_types(replies=List[ChatMessage])
470+
@component.output_types(replies=list[ChatMessage])
470471
def run(
471472
self,
472-
messages: List[ChatMessage],
473-
generation_kwargs: Optional[Dict[str, Any]] = None,
473+
messages: list[ChatMessage],
474+
generation_kwargs: Optional[dict[str, Any]] = None,
474475
tools: Optional[ToolsType] = None,
475476
*,
476477
streaming_callback: Optional[StreamingCallbackT] = None,
477-
) -> Dict[str, List[ChatMessage]]:
478+
) -> dict[str, list[ChatMessage]]:
478479
"""
479480
Runs an Ollama Model on a given chat history.
480481
@@ -532,15 +533,15 @@ def run(
532533
# non-stream path
533534
return {"replies": [_convert_ollama_response_to_chatmessage(ollama_response=response)]}
534535

535-
@component.output_types(replies=List[ChatMessage])
536+
@component.output_types(replies=list[ChatMessage])
536537
async def run_async(
537538
self,
538-
messages: List[ChatMessage],
539-
generation_kwargs: Optional[Dict[str, Any]] = None,
539+
messages: list[ChatMessage],
540+
generation_kwargs: Optional[dict[str, Any]] = None,
540541
tools: Optional[ToolsType] = None,
541542
*,
542543
streaming_callback: Optional[StreamingCallbackT] = None,
543-
) -> Dict[str, List[ChatMessage]]:
544+
) -> dict[str, list[ChatMessage]]:
544545
"""
545546
Async version of run. Runs an Ollama Model on a given chat history.
546547

0 commit comments

Comments
 (0)