Skip to content

Commit 2093503

Browse files
julian-rischclaude
andauthored
chore: enforce ruff docstring rules (D102/D103/D205/D209/D213/D417/D419) in integrations 21-30 (#3010)
Adds D102, D103, D205, D209, D213, D417, D419 ruff rules to pyproject.toml for: langfuse, lara, llama_cpp, llama_stack, mcp, meta_llama, mistral, mongodb_atlas, nvidia, ollama. Fixes all resulting docstring violations. Part of #2947 Co-authored-by: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent 95b01cb commit 2093503

38 files changed

Lines changed: 176 additions & 57 deletions

File tree

integrations/langfuse/pyproject.toml

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,13 @@ select = [
8888
"ARG",
8989
"B",
9090
"C",
91+
"D102", # Missing docstring in public method
92+
"D103", # Missing docstring in public function
93+
"D205", # 1 blank line required between summary line and description
94+
"D209", # Closing triple quotes go to new line
95+
"D213", # summary lines must be positioned on the second physical line of the docstring
96+
"D417", # Missing argument descriptions in the docstring
97+
"D419", # Docstring is empty
9198
"DTZ",
9299
"E",
93100
"EM",
@@ -136,10 +143,10 @@ ban-relative-imports = "parents"
136143

137144
[tool.ruff.lint.per-file-ignores]
138145
# Tests can use magic values, assertions, and relative imports
139-
"tests/**/*" = ["PLR2004", "S101", "TID252", "ANN"]
146+
"tests/**/*" = ["D", "PLR2004", "S101", "TID252", "ANN"]
140147
# Examples can print their output
141-
"examples/**" = ["T201", "ANN"]
142-
"example/**" = ["T201", "ANN"]
148+
"examples/**" = ["T201", "ANN", "D"]
149+
"example/**" = ["T201", "ANN", "D"]
143150
"tests/**" = ["T201"]
144151

145152
[tool.coverage.run]

integrations/langfuse/src/haystack_integrations/components/connectors/langfuse/langfuse_connector.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
class LangfuseConnector:
2020
"""
2121
LangfuseConnector connects Haystack LLM framework with [Langfuse](https://langfuse.com) in order to enable the
22+
2223
tracing of operations and data flow within various components of a pipeline.
2324
2425
To use LangfuseConnector, add it to your pipeline without connecting it to any other components.

integrations/langfuse/src/haystack_integrations/tracing/langfuse/tracer.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ def get_data(self) -> dict[str, Any]:
125125
return self._data
126126

127127
def get_correlation_data_for_logs(self) -> dict[str, Any]:
128+
"""Return correlation data for log enrichment."""
128129
return {}
129130

130131

@@ -234,9 +235,11 @@ def handle(self, span: LangfuseSpan, component_type: str | None) -> None:
234235

235236
@classmethod
236237
def from_dict(cls, data: dict[str, Any]) -> "SpanHandler":
238+
"""Deserialize a SpanHandler from a dictionary."""
237239
return default_from_dict(cls, data)
238240

239241
def to_dict(self) -> dict[str, Any]:
242+
"""Serialize this SpanHandler to a dictionary."""
240243
return default_to_dict(self)
241244

242245

@@ -273,6 +276,7 @@ class DefaultSpanHandler(SpanHandler):
273276
"""DefaultSpanHandler provides the default Langfuse tracing behavior for Haystack."""
274277

275278
def create_span(self, context: SpanContext) -> LangfuseSpan:
279+
"""Create a Langfuse span based on the given context."""
276280
if self.tracer is None:
277281
message = (
278282
"Tracer is not initialized. "
@@ -343,6 +347,7 @@ def create_span(self, context: SpanContext) -> LangfuseSpan:
343347
return LangfuseSpan(self.tracer.start_as_current_span(name=context.name))
344348

345349
def handle(self, span: LangfuseSpan, component_type: str | None) -> None:
350+
"""Process and enrich a span after component execution."""
346351
# If the span is at the pipeline level, we add input and output keys to the span
347352
at_pipeline_level = span.get_data().get(_PIPELINE_INPUT_KEY) is not None
348353
if at_pipeline_level:
@@ -456,6 +461,7 @@ def __init__(
456461
def trace(
457462
self, operation_name: str, tags: dict[str, Any] | None = None, parent_span: Span | None = None
458463
) -> Iterator[Span]:
464+
"""Create and manage a tracing span as a context manager."""
459465
tags = tags or {}
460466
span_name = tags.get(_COMPONENT_NAME_KEY, operation_name)
461467
component_type = tags.get(_COMPONENT_TYPE_KEY)
@@ -543,6 +549,7 @@ def trace(
543549
self.flush()
544550

545551
def flush(self) -> None:
552+
"""Flush all pending spans to Langfuse."""
546553
self._tracer.flush()
547554

548555
def current_span(self) -> Span | None:
@@ -558,13 +565,15 @@ def current_span(self) -> Span | None:
558565
def get_trace_url(self) -> str:
559566
"""
560567
Return the URL to the tracing data.
568+
561569
:return: The URL to the tracing data.
562570
"""
563571
return self._tracer.get_trace_url() or ""
564572

565573
def get_trace_id(self) -> str:
566574
"""
567575
Return the trace ID.
576+
568577
:return: The trace ID.
569578
"""
570579
return self._tracer.get_current_trace_id() or ""

integrations/lara/pyproject.toml

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,13 @@ select = [
9595
"ARG",
9696
"B",
9797
"C",
98+
"D102", # Missing docstring in public method
99+
"D103", # Missing docstring in public function
100+
"D205", # 1 blank line required between summary line and description
101+
"D209", # Closing triple quotes go to new line
102+
"D213", # summary lines must be positioned on the second physical line of the docstring
103+
"D417", # Missing argument descriptions in the docstring
104+
"D419", # Docstring is empty
98105
"DTZ",
99106
"E",
100107
"EM",
@@ -149,7 +156,7 @@ ban-relative-imports = "parents"
149156

150157
[tool.ruff.lint.per-file-ignores]
151158
# Tests can use magic values, assertions, and relative imports
152-
"tests/**/*" = ["PLR2004", "S101", "TID252", "ANN"]
159+
"tests/**/*" = ["D", "PLR2004", "S101", "TID252", "ANN"]
153160

154161
[tool.coverage.run]
155162
source = ["haystack_integrations"]

integrations/llama_cpp/pyproject.toml

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,13 @@ select = [
9999
"ARG",
100100
"B",
101101
"C",
102+
"D102", # Missing docstring in public method
103+
"D103", # Missing docstring in public function
104+
"D205", # 1 blank line required between summary line and description
105+
"D209", # Closing triple quotes go to new line
106+
"D213", # summary lines must be positioned on the second physical line of the docstring
107+
"D417", # Missing argument descriptions in the docstring
108+
"D419", # Docstring is empty
102109
"DTZ",
103110
"E",
104111
"EM",
@@ -142,7 +149,7 @@ ban-relative-imports = "parents"
142149

143150
[tool.ruff.lint.per-file-ignores]
144151
# Tests can use magic values, assertions, and relative imports
145-
"tests/**/*" = ["PLR2004", "S101", "TID252", "ANN"]
152+
"tests/**/*" = ["D", "PLR2004", "S101", "TID252", "ANN"]
146153
# Examples can print their output
147154
"examples/**" = ["T201"]
148155
"tests/**" = ["T201"]

integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -211,8 +211,11 @@ def __init__(
211211
model_clip_path: str | None = None,
212212
) -> None:
213213
"""
214+
Initialize LlamaCppChatGenerator.
215+
214216
:param model: The path of a quantized model for text generation, for example, "zephyr-7b-beta.Q4_0.gguf".
215217
If the model path is also specified in the `model_kwargs`, this parameter will be ignored.
218+
216219
:param n_ctx: The number of tokens in the context. When set to 0, the context will be taken from the model.
217220
:param n_batch: Prompt processing maximum batch size.
218221
:param model_kwargs: Dictionary containing keyword arguments used to initialize the LLM for text generation.
@@ -274,6 +277,7 @@ def __init__(
274277
self._inference_lock = asyncio.Lock()
275278

276279
def warm_up(self) -> None:
280+
"""Load and initialize the llama.cpp model."""
277281
if self._model is not None:
278282
return
279283

@@ -462,6 +466,7 @@ def _handle_streaming_response(
462466
) -> dict[str, list[ChatMessage]]:
463467
"""
464468
Take streaming responses from llama.cpp, convert to Haystack StreamingChunk objects, stream them,
469+
465470
and finally convert them to a ChatMessage.
466471
467472
:param response_stream: The streaming responses from llama.cpp.

integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/generator.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,11 @@ def __init__(
3838
generation_kwargs: dict[str, Any] | None = None,
3939
) -> None:
4040
"""
41+
Initialize LlamaCppGenerator.
42+
4143
:param model: The path of a quantized model for text generation, for example, "zephyr-7b-beta.Q4_0.gguf".
4244
If the model path is also specified in the `model_kwargs`, this parameter will be ignored.
45+
4346
:param n_ctx: The number of tokens in the context. When set to 0, the context will be taken from the model.
4447
:param n_batch: Prompt processing maximum batch size.
4548
:param model_kwargs: Dictionary containing keyword arguments used to initialize the LLM for text generation.
@@ -69,6 +72,7 @@ def __init__(
6972
self.model: Llama | None = None
7073

7174
def warm_up(self) -> None:
75+
"""Load and initialize the llama.cpp model."""
7276
if self.model is None:
7377
self.model = Llama(**self.model_kwargs)
7478

integrations/llama_stack/pyproject.toml

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,13 @@ select = [
8484
"ARG",
8585
"B",
8686
"C",
87+
"D102", # Missing docstring in public method
88+
"D103", # Missing docstring in public function
89+
"D205", # 1 blank line required between summary line and description
90+
"D209", # Closing triple quotes go to new line
91+
"D213", # summary lines must be positioned on the second physical line of the docstring
92+
"D417", # Missing argument descriptions in the docstring
93+
"D419", # Docstring is empty
8794
"DTZ",
8895
"E",
8996
"EM",
@@ -137,7 +144,7 @@ ban-relative-imports = "parents"
137144

138145
[tool.ruff.lint.per-file-ignores]
139146
# Tests can use magic values, assertions, and relative imports
140-
"tests/**/*" = ["PLR2004", "S101", "TID252", "ANN", "E501", "F841"]
147+
"tests/**/*" = ["D", "PLR2004", "S101", "TID252", "ANN", "E501", "F841"]
141148
# Examples can print their output and don't need type annotations
142149
"examples/**/*" = ["T201", "ANN"]
143150

integrations/llama_stack/src/haystack_integrations/components/generators/llama_stack/chat/chat_generator.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
class LlamaStackChatGenerator(OpenAIChatGenerator):
2121
"""
2222
Enables text generation using Llama Stack framework.
23+
2324
Llama Stack Server supports multiple inference providers, including Ollama, Together,
2425
and vLLM and other cloud providers.
2526
For a complete list of inference providers, see [Llama Stack docs](https://llama-stack.readthedocs.io/en/latest/providers/inference/index.html).
@@ -70,8 +71,10 @@ def __init__(
7071
http_client_kwargs: dict[str, Any] | None = None,
7172
) -> None:
7273
"""
73-
Creates an instance of LlamaStackChatGenerator. To use this chat generator,
74-
you need to setup Llama Stack Server with an inference provider and have a model available.
74+
Creates an instance of LlamaStackChatGenerator.
75+
76+
To use this chat generator, you need to setup Llama Stack Server with an inference provider and have a model
77+
available.
7578
7679
:param model:
7780
The name of the model to use for chat completion.

integrations/mcp/pyproject.toml

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,13 @@ select = [
9999
"ARG",
100100
"B",
101101
"C",
102+
"D102", # Missing docstring in public method
103+
"D103", # Missing docstring in public function
104+
"D205", # 1 blank line required between summary line and description
105+
"D209", # Closing triple quotes go to new line
106+
"D213", # summary lines must be positioned on the second physical line of the docstring
107+
"D417", # Missing argument descriptions in the docstring
108+
"D419", # Docstring is empty
102109
"DTZ",
103110
"E",
104111
"EM",
@@ -153,8 +160,8 @@ ban-relative-imports = "parents"
153160

154161
[tool.ruff.lint.per-file-ignores]
155162
# Tests can use magic values, assertions, and relative imports
156-
"tests/**/*" = ["PLR2004", "S101", "TID252", "ANN"]
157-
"examples/**/*" = ["T201", "E501", "ANN"]
163+
"tests/**/*" = ["D", "PLR2004", "S101", "TID252", "ANN"]
164+
"examples/**/*" = ["T201", "E501", "ANN", "D"]
158165

159166
[tool.coverage.run]
160167
source = ["haystack_integrations"]

0 commit comments

Comments
 (0)