diff --git a/docs-website/docs/pipeline-components/generators/openaigenerator.mdx b/docs-website/docs/pipeline-components/generators/openaigenerator.mdx index 57f1c063f0..5bab2c5f4b 100644 --- a/docs-website/docs/pipeline-components/generators/openaigenerator.mdx +++ b/docs-website/docs/pipeline-components/generators/openaigenerator.mdx @@ -122,7 +122,7 @@ pipe = Pipeline() pipe.add_component("retriever", InMemoryBM25Retriever(document_store=docstore)) pipe.add_component("prompt_builder", PromptBuilder(template=template)) -pipe.add_component("llm", OpenAIGenerator(api_key=Secret.from_token("")) +pipe.add_component("llm", OpenAIGenerator(api_key=Secret.from_env_var("OPENAI_API_KEY"))) pipe.connect("retriever", "prompt_builder.documents") pipe.connect("prompt_builder", "llm") @@ -137,3 +137,60 @@ res=pipe.run({ print(res) ``` + +### In YAML + +This is the YAML representation of the RAG pipeline shown above. It retrieves documents based on a query, constructs a prompt using a template, and generates an answer using a chat model. + +```yaml +components: + llm: + init_parameters: + api_base_url: null + api_key: + env_vars: + - OPENAI_API_KEY + strict: true + type: env_var + generation_kwargs: {} + http_client_kwargs: null + max_retries: null + model: gpt-5-mini + organization: null + streaming_callback: null + system_prompt: null + timeout: null + type: haystack.components.generators.openai.OpenAIGenerator + prompt_builder: + init_parameters: + required_variables: null + template: "\nGiven the following information, answer the question.\n\nContext:\n\ + {% for document in documents %}\n {{ document.content }}\n{% endfor %}\n\n\ + Question: {{ query }}?\n" + variables: null + type: haystack.components.builders.prompt_builder.PromptBuilder + retriever: + init_parameters: + document_store: + init_parameters: + bm25_algorithm: BM25L + bm25_parameters: {} + bm25_tokenization_regex: (?u)\b\w+\b + embedding_similarity_function: dot_product + index: 64e4f9ab-87fb-47fd-b390-dabcfda61447 + return_embedding: true + type: haystack.document_stores.in_memory.document_store.InMemoryDocumentStore + filter_policy: replace + filters: null + scale_score: false + top_k: 10 + type: haystack.components.retrievers.in_memory.bm25_retriever.InMemoryBM25Retriever +connection_type_validation: true +connections: +- receiver: prompt_builder.documents + sender: retriever.documents +- receiver: llm.prompt + sender: prompt_builder.prompt +max_runs_per_component: 100 +metadata: {} +```