From b53402c1f7f3b951d35e05cb600c5571136c14b4 Mon Sep 17 00:00:00 2001 From: Jordan Ritter Date: Tue, 14 Apr 2026 15:34:20 -0700 Subject: [PATCH 1/3] docs: add framework integration guides and fixture examples Adds guides for LangChain, CrewAI, PydanticAI, LlamaIndex, Mastra, and Google ADK. Each covers Quick Start, aimock-pytest, structured output, tool calls, and CI with GitHub Actions. Includes fixture examples and sidebar navigation updates. --- README.md | 6 +- docs/integrate-adk/index.html | 279 +++++++++++++ docs/integrate-crewai/index.html | 370 ++++++++++++++++++ docs/integrate-langchain/index.html | 240 ++++++++++++ docs/integrate-llamaindex/index.html | 322 +++++++++++++++ docs/integrate-mastra/index.html | 225 +++++++++++ docs/integrate-pydanticai/index.html | 309 +++++++++++++++ docs/sidebar.js | 11 + fixtures/examples/adk/gemini-agent.json | 47 +++ .../examples/crewai/multi-agent-crew.json | 16 + fixtures/examples/langchain/agent-loop.json | 27 ++ .../examples/llamaindex/aimock-config.json | 62 +++ .../examples/llamaindex/rag-pipeline.json | 34 ++ fixtures/examples/mastra/agent-workflow.json | 32 ++ .../pydanticai/structured-output.json | 15 + 15 files changed, 1994 insertions(+), 1 deletion(-) create mode 100644 docs/integrate-adk/index.html create mode 100644 docs/integrate-crewai/index.html create mode 100644 docs/integrate-langchain/index.html create mode 100644 docs/integrate-llamaindex/index.html create mode 100644 docs/integrate-mastra/index.html create mode 100644 docs/integrate-pydanticai/index.html create mode 100644 fixtures/examples/adk/gemini-agent.json create mode 100644 fixtures/examples/crewai/multi-agent-crew.json create mode 100644 fixtures/examples/langchain/agent-loop.json create mode 100644 fixtures/examples/llamaindex/aimock-config.json create mode 100644 fixtures/examples/llamaindex/rag-pipeline.json create mode 100644 fixtures/examples/mastra/agent-workflow.json create mode 100644 fixtures/examples/pydanticai/structured-output.json diff --git a/README.md b/README.md index db1565e..982b679 100644 --- a/README.md +++ b/README.md @@ -88,9 +88,13 @@ npx aimock convert mockllm ./config.yaml ./fixtures/ docker run -d -p 4010:4010 -v ./fixtures:/fixtures ghcr.io/copilotkit/aimock -f /fixtures ``` +## Framework Guides + +Test your AI agents with aimock — no API keys, no network calls: [LangChain](https://aimock.copilotkit.dev/integrate-langchain) · [CrewAI](https://aimock.copilotkit.dev/integrate-crewai) · [PydanticAI](https://aimock.copilotkit.dev/integrate-pydanticai) · [LlamaIndex](https://aimock.copilotkit.dev/integrate-llamaindex) · [Mastra](https://aimock.copilotkit.dev/integrate-mastra) · [Google ADK](https://aimock.copilotkit.dev/integrate-adk) + ## Switching from other tools? -Step-by-step migration guides: [MSW](https://aimock.copilotkit.dev/migrate-from-msw) · [VidaiMock](https://aimock.copilotkit.dev/migrate-from-vidaimock) · [mock-llm](https://aimock.copilotkit.dev/migrate-from-mock-llm) · [Python mocks](https://aimock.copilotkit.dev/migrate-from-python-mocks) · [Mokksy](https://aimock.copilotkit.dev/migrate-from-mokksy) +Step-by-step migration guides: [MSW](https://aimock.copilotkit.dev/migrate-from-msw) · [VidaiMock](https://aimock.copilotkit.dev/migrate-from-vidaimock) · [mock-llm](https://aimock.copilotkit.dev/migrate-from-mock-llm) · [piyook/llm-mock](https://aimock.copilotkit.dev/migrate-from-piyook) · [Python mocks](https://aimock.copilotkit.dev/migrate-from-python-mocks) · [Mokksy](https://aimock.copilotkit.dev/migrate-from-mokksy) ## Documentation diff --git a/docs/integrate-adk/index.html b/docs/integrate-adk/index.html new file mode 100644 index 0000000..17ace3b --- /dev/null +++ b/docs/integrate-adk/index.html @@ -0,0 +1,279 @@ + + + + + + Google ADK — aimock + + + + + + + + + +
+ + +
+

Google ADK

+

+ Test your Google ADK agents without Gemini API keys. aimock speaks the Gemini API format + natively — generateContent, streamGenerateContent, and + function calling. +

+ +

Quick Start

+

+ Point the Google Gen AI SDK at your aimock instance. ADK uses the Gen AI SDK under the + hood, so this is all you need: +

+
+
+ Configure the Gen AI client python +
+
from google import genai
+
+client = genai.Client(
+    vertexai=False,
+    api_key="test",
+    http_options={"api_version": "v1beta", "base_url": "http://localhost:4010"},
+)
+
+

+ Start aimock with a Gemini-format fixture, then run your ADK agent. Requests to + generateContent and streamGenerateContent are handled + automatically. +

+
+
Start aimock shell
+
npx aimock --fixtures fixtures/examples/adk/gemini-agent.json
+
+ +

Gemini API Format

+

+ aimock handles the Gemini native request and response format. ADK agents hit these + endpoints: +

+ + + + + + + + + + + + + + + + + + + + + + + + + +
MethodPathFormat
POST/v1beta/models/{model}:generateContentJSON
POST/v1beta/models/{model}:streamGenerateContentJSON stream / SSE (with ?alt=sse)
WS/ws/google.ai.generativelanguage.*Gemini Live WebSocket
+

+ Gemini uses contents with parts rather than OpenAI's + messages array. aimock translates Gemini requests to the unified fixture + format internally, so the same match.userMessage works regardless of which + provider endpoint the request arrives on. +

+ +

Function Calling

+

+ ADK agents rely heavily on Gemini function calling. The Gemini format uses + functionCall and functionResponse parts instead of the + OpenAI-style tool_calls array. aimock generates the correct Gemini response + shape from the same fixture format: +

+
+
+ Function calling fixture json +
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "weather" },
+      "response": {
+        "toolCalls": [
+          {
+            "name": "get_weather",
+            "arguments": "{\"city\":\"San Francisco\",\"unit\":\"fahrenheit\"}"
+          }
+        ]
+      }
+    }
+  ]
+}
+
+

+ When a Gemini endpoint receives this fixture, aimock returns it as a + functionCall part: +

+
+
+ Gemini response shape json +
+
{
+  "candidates": [{
+    "content": {
+      "role": "model",
+      "parts": [{
+        "functionCall": {
+          "id": "call_abc123",
+          "name": "get_weather",
+          "args": { "city": "San Francisco", "unit": "fahrenheit" }
+        }
+      }]
+    }
+  }]
+}
+
+ +

With aimock-pytest

+

+ The aimock-pytest plugin provides a aimock fixture that starts + and stops the server automatically: +

+
+
conftest.py python
+
import pytest
+
+# The aimock fixture is provided by aimock-pytest
+# pip install aimock-pytest
+
+
+
+ test_adk_agent.py python +
+
from google import genai
+
+def test_agent_weather_tool(aimock):
+    """Test that ADK agent calls get_weather tool."""
+    aimock.load_fixtures("fixtures/examples/adk/gemini-agent.json")
+
+    client = genai.Client(
+        vertexai=False,
+        api_key="test",
+        http_options={"api_version": "v1beta", "base_url": aimock.url},
+    )
+
+    response = client.models.generate_content(
+        model="gemini-2.0-flash",
+        contents="what is the weather in San Francisco?",
+    )
+
+    # Verify the tool call was returned
+    part = response.candidates[0].content.parts[0]
+    assert part.function_call.name == "get_weather"
+
+ +

CI with GitHub Action

+

+ Use the aimock GitHub Action to run aimock as a service in + CI: +

+
+
+ .github/workflows/test.yml yaml +
+
steps:
+  - uses: actions/checkout@v4
+  - name: Start aimock
+    uses: CopilotKit/aimock@v1
+    with:
+      fixtures: fixtures/examples/adk/gemini-agent.json
+
+  - name: Run ADK tests
+    run: pytest tests/
+
+

+ No real Gemini API keys needed in CI — aimock does not validate them. Note that ADK + / the Google Gen AI SDK does not support a base_url environment variable + override, so you must configure the base URL programmatically in your test code using + http_options (see the pytest example above). +

+ +

Arbitrary Path Prefixes

+

+ If your ADK configuration or a proxy layer adds a non-standard base URL prefix (e.g. + /my-app/v1beta/models/...), aimock normalizes it automatically. The + normalizeCompatPath feature strips arbitrary prefixes and rewrites paths + ending in known suffixes to their canonical form. +

+

+ This means your ADK agent can use any base URL structure and aimock will still route the + request correctly to the Gemini handler. +

+
+
+ Non-standard prefix example python +
+
# Even with a custom prefix, aimock routes correctly
+client = genai.Client(
+    vertexai=False,
+    api_key="test",
+    http_options={
+        "api_version": "v1beta",
+        "base_url": "http://localhost:4010/my-proxy",
+    },
+)
+
+
+ +
+ + + + + diff --git a/docs/integrate-crewai/index.html b/docs/integrate-crewai/index.html new file mode 100644 index 0000000..d6e3a13 --- /dev/null +++ b/docs/integrate-crewai/index.html @@ -0,0 +1,370 @@ + + + + + + CrewAI — aimock + + + + + + + + + +
+ + +
+

CrewAI

+

+ Test your CrewAI crews without API keys. Each agent in a crew makes its own LLM calls + — aimock handles them all with fixture-based responses. +

+ +

Quick Start

+

+ CrewAI agents make OpenAI-compatible LLM calls by default. Point them at aimock and every + agent in your crew will send requests to the mock server instead of the real API. The + recommended approach is to use CrewAI's LLM class with an explicit + base_url, shown in the examples below. +

+
+
+ + +
+
+
+
+ Start aimock, then run the crew + shell +
+
# Terminal 1 — start the mock server
+npx aimock --fixtures ./fixtures
+
+# Terminal 2 — run your CrewAI script
+export OPENAI_BASE_URL=http://localhost:4010/v1
+export OPENAI_API_KEY=test
+python crew.py
+
+
+
+
+
crew.py python
+
from crewai import Agent, Task, Crew, LLM
+
+# Recommended: use the LLM class with an explicit base_url
+llm = LLM(
+    model="openai/gpt-4o",
+    base_url="http://localhost:4010/v1",
+    api_key="test",
+)
+
+researcher = Agent(
+    role="Researcher",
+    goal="Research topics",
+    backstory="Expert researcher",
+    llm=llm,
+)
+
+task = Task(
+    description="Research the history of testing",
+    expected_output="A short summary",
+    agent=researcher,
+)
+
+crew = Crew(agents=[researcher], tasks=[task])
+result = crew.kickoff()
+print(result)
+
+
+
+ +
+ Environment variables vs LLM(base_url=...) — Setting + OPENAI_BASE_URL works when agents use the default OpenAI provider, but the + LLM(base_url=...) approach is more reliable across all configurations and is + the recommended way to point CrewAI at aimock. +
+ +

With aimock-pytest

+

+ The aimock-pytest plugin starts and stops the server automatically per test, + so you never need to manage a background process. +

+
+
Install shell
+
pip install aimock-pytest
+
+
+
conftest.py python
+
import os, pytest
+
+@pytest.fixture(autouse=True)
+def mock_llm(aimock):
+    """aimock-pytest provides the `aimock` fixture automatically.
+    It starts a fresh server for each test that requests it (function-scoped).
+    You must set OPENAI_BASE_URL yourself so CrewAI agents route to aimock."""
+    os.environ["OPENAI_BASE_URL"] = aimock.url + "/v1"
+    os.environ["OPENAI_API_KEY"] = "test"
+    aimock.load_fixtures("./fixtures/crewai-crew.json")
+    yield aimock
+
+
+
test_crew.py python
+
from crewai import Agent, Task, Crew
+
+def test_researcher_crew():
+    researcher = Agent(
+        role="Researcher",
+        goal="Research topics",
+        backstory="Expert researcher",
+    )
+    task = Task(
+        description="Summarize recent AI breakthroughs",
+        expected_output="A short summary",
+        agent=researcher,
+    )
+    crew = Crew(agents=[researcher], tasks=[task])
+    result = crew.kickoff()
+    assert "AI" in str(result)
+
+ +

Multi-Agent Crews

+

+ In a CrewAI crew, each agent makes independent LLM calls. The researcher agent sends its + own chat completion request, then the writer agent sends a separate one. Because aimock + matches on the userMessage field, you can write fixtures that target each + agent's prompt pattern independently. +

+
+
+ fixtures/crewai-crew.json json +
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "research" },
+      "response": {
+        "content": "Based on my research, the key findings are:\n\n1. LLM testing with fixture-based mocks eliminates flaky tests caused by non-deterministic API responses.\n2. Proxy recording captures real interactions for replay in CI without API keys.\n3. Multi-agent frameworks like CrewAI benefit most because each agent multiplies the number of LLM calls per run."
+      }
+    },
+    {
+      "match": { "userMessage": "write" },
+      "response": {
+        "content": "# Testing LLMs in CI\n\nFixture-based mocking brings determinism to AI-powered applications. By replacing real API calls with recorded responses, teams ship faster with confidence.\n\n## Why It Matters\n\nEvery agent in a CrewAI crew makes independent LLM calls. Without mocking, a two-agent crew means two sources of non-determinism per run. With aimock, every call returns the exact same response every time."
+      }
+    }
+  ]
+}
+
+
+
Two-agent crew python
+
from crewai import Agent, Task, Crew, Process
+
+researcher = Agent(
+    role="Researcher",
+    goal="Research topics thoroughly",
+    backstory="Senior research analyst",
+)
+
+writer = Agent(
+    role="Writer",
+    goal="Write compelling articles",
+    backstory="Technical content writer",
+)
+
+research_task = Task(
+    description="Research LLM testing best practices",
+    expected_output="Key findings as bullet points",
+    agent=researcher,
+)
+
+write_task = Task(
+    description="Write a blog post from the research",
+    expected_output="A short blog post in markdown",
+    agent=writer,
+)
+
+crew = Crew(
+    agents=[researcher, writer],
+    tasks=[research_task, write_task],
+    process=Process.sequential,
+)
+result = crew.kickoff()
+
+

+ The researcher's prompt contains "research", matching the first fixture. The writer's + prompt contains "write", matching the second. Each agent gets its own deterministic + response. +

+ +

Tool Calls

+

+ CrewAI agents can use tools. When an agent invokes a tool, CrewAI sends a chat completion + with tool_choice and expects a tool-call response. aimock fixtures handle + this with the toolCalls response field. +

+
+
+ fixtures/crewai-tools.json json +
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "search", "sequenceIndex": 0 },
+      "response": {
+        "toolCalls": [
+          {
+            "name": "web_search",
+            "arguments": "{\"query\": \"LLM testing frameworks 2025\"}"
+          }
+        ]
+      }
+    },
+    {
+      "match": { "userMessage": "search", "sequenceIndex": 1 },
+      "response": {
+        "content": "Based on the search results, the top LLM testing frameworks are aimock, promptfoo, and deepeval."
+      }
+    }
+  ]
+}
+
+
+
Agent with tools python
+
from crewai import Agent, Task, Crew
+from crewai_tools import SerperDevTool
+
+search_tool = SerperDevTool()
+
+researcher = Agent(
+    role="Researcher",
+    goal="Search the web for information",
+    backstory="Expert web researcher",
+    tools=[search_tool],
+)
+
+task = Task(
+    description="Search for the latest LLM testing frameworks",
+    expected_output="A ranked list of frameworks",
+    agent=researcher,
+)
+
+crew = Crew(agents=[researcher], tasks=[task])
+result = crew.kickoff()
+
+

+ The first fixture triggers the tool call. After CrewAI processes the tool result and sends + it back to the LLM, the second fixture matches the follow-up message and returns the final + answer. +

+ +

CI with GitHub Action

+

+ Use the CopilotKit/aimock GitHub Action to run aimock as a background service + in your CI pipeline. +

+
+
+ .github/workflows/test.yml yaml +
+
name: Test CrewAI Crew
+on: [push, pull_request]
+jobs:
+  test:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v4
+      - uses: actions/setup-python@v5
+        with:
+          python-version: "3.11"
+      - uses: CopilotKit/aimock@v1
+        with:
+          fixtures: ./fixtures
+      - run: pip install crewai pytest aimock-pytest
+      - run: pytest
+        env:
+          OPENAI_BASE_URL: http://127.0.0.1:4010/v1
+          OPENAI_API_KEY: test
+
+

+ The action starts aimock on port 4010, loads your fixtures, and keeps the server running + for the duration of the job. No real API keys needed. +

+ +

Record & Replay

+

+ Record a full crew execution against a real LLM, then replay it deterministically in tests + and CI. This is especially useful for capturing complex multi-agent interactions. +

+
+
Record a crew run shell
+
# Start aimock in record mode — unmatched requests go to OpenAI
+npx aimock --fixtures ./fixtures \
+  --record \
+  --provider-openai https://api.openai.com
+
+# Run your crew with the real API key (proxied through aimock)
+export OPENAI_BASE_URL=http://localhost:4010/v1
+export OPENAI_API_KEY=sk-your-real-key
+python crew.py
+
+# New fixtures appear in ./fixtures/recorded/
+# Commit them to your repo for deterministic replay
+
+

+ On subsequent runs without --record, aimock replays the recorded fixtures. + Every agent in the crew gets the exact same response it received during the original + recording, making your tests fully reproducible. +

+
+ +
+ + + + + diff --git a/docs/integrate-langchain/index.html b/docs/integrate-langchain/index.html new file mode 100644 index 0000000..35c96cc --- /dev/null +++ b/docs/integrate-langchain/index.html @@ -0,0 +1,240 @@ + + + + + + LangChain & LangGraph — aimock + + + + + + + + + +
+ + +
+

LangChain & LangGraph

+

+ Test your LangChain and LangGraph agents without API keys or network calls. Point your LLM + at aimock and get deterministic, fixture-driven responses. +

+ +

Quick Start

+

+ LangChain's ChatOpenAI accepts a base_url parameter. Point it at + aimock and every call goes through your fixtures instead of the real API. +

+
+
Minimal setup python
+
from langchain_openai import ChatOpenAI
+
+# Start aimock first: npx aimock --fixtures ./fixtures
+llm = ChatOpenAI(
+    base_url="http://localhost:4010/v1",
+    api_key="test",
+)
+
+result = llm.invoke("hello")
+print(result.content)  # deterministic fixture response
+
+

+ This works with any LangChain component that wraps an OpenAI-compatible API: + ChatOpenAI or any provider using the base_url parameter. For + Azure OpenAI, see the Azure OpenAI guide. +

+ +

With aimock-pytest

+

+ The aimock-pytest plugin starts and stops the server automatically per test. + The aimock fixture exposes a .url property you pass as the base + URL. +

+
+
pytest fixture python
+
def test_my_chain(aimock):
+    # Load fixtures before making LLM calls
+    aimock.load_fixtures("./fixtures/langchain.json")
+
+    llm = ChatOpenAI(
+        base_url=f"{aimock.url}/v1",
+        api_key="test",
+    )
+    result = llm.invoke("hello")
+    assert "Hi" in result.content
+
+

+ Install with pip install aimock-pytest. The fixture handles server lifecycle + so your tests stay fast and isolated. +

+ +

Multi-Turn Agent Loops (LangGraph)

+

+ LangGraph agents make multiple sequential LLM calls as they reason, call tools, and + synthesize results. A single user request like "plan a trip" might trigger three or more + completions in a loop. While later calls include tool results, the original user message + persists across the loop, so aimock's sequenceIndex matcher tracks how many + times a given message pattern has matched. +

+
+
Multi-turn fixture json
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "plan a trip", "sequenceIndex": 0 },
+      "response": { "content": "I'll help plan your trip. Let me look up some options." }
+    },
+    {
+      "match": { "userMessage": "plan a trip", "sequenceIndex": 1 },
+      "response": {
+        "toolCalls": [
+          { "name": "search_flights", "arguments": "{\"origin\":\"SFO\",\"dest\":\"NRT\"}" }
+        ]
+      }
+    },
+    {
+      "match": { "userMessage": "plan a trip", "sequenceIndex": 2 },
+      "response": { "content": "I found 3 flights from SFO to Tokyo Narita. The best option is..." }
+    }
+  ]
+}
+
+

+ Each fixture fires once in order. The first LLM call matches index 0, the second matches + index 1, and so on. This lets you script the exact behavior of a multi-step agent without + any real API calls. +

+

+ See Fixtures and + Sequential Responses + for the full matching reference. +

+ +

Tool Call Fixtures

+

+ LangChain's tool-calling agents expect the LLM to return structured + tool_calls in the response. Use the toolCalls response field to + return them from aimock. +

+
+
Tool call fixture json
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "what's the weather in SF" },
+      "response": {
+        "toolCalls": [
+          {
+            "name": "get_weather",
+            "arguments": "{\"location\":\"San Francisco\",\"unit\":\"fahrenheit\"}"
+          }
+        ]
+      }
+    }
+  ]
+}
+
+

+ When LangChain receives this response, it will invoke the get_weather tool + with the given arguments, just as it would with a real OpenAI response. Combine tool call + fixtures with sequenceIndex to script the full loop: tool call, tool result + injection, then final answer. +

+ +

Record & Replay

+

+ Don't want to write fixtures by hand? Record a real LangGraph session and replay it in + tests. Start aimock in record mode, run your agent against a live provider, and aimock + saves every request/response pair as a fixture file. +

+
+
Record a session shell
+
npx aimock --record --provider-openai https://api.openai.com -f ./fixtures
+
+

+ Then point your LangChain code at http://localhost:4010/v1 and run your agent + normally. Every LLM call is captured. On subsequent runs, aimock replays the recorded + responses without network calls. +

+
+
Replay in tests shell
+
# Replay mode (default when fixtures exist)
+npx aimock -f ./fixtures
+
+# Run your tests against the recorded fixtures
+pytest tests/
+
+

See Record & Replay for the full reference.

+ +

CI with GitHub Action

+

+ The CopilotKit/aimock GitHub Action starts aimock as a background service in + CI. Your tests run against fixtures with zero external dependencies. +

+
+
+ GitHub Actions workflow yaml +
+
- uses: CopilotKit/aimock@v1
+  with:
+    fixtures: ./test/fixtures
+
+- run: pytest
+  env:
+    OPENAI_BASE_URL: http://127.0.0.1:4010/v1
+
+

+ No API keys needed in CI. No flaky tests from rate limits or network timeouts. See + GitHub Action for all available options. +

+
+ +
+ + + + + diff --git a/docs/integrate-llamaindex/index.html b/docs/integrate-llamaindex/index.html new file mode 100644 index 0000000..6af75e9 --- /dev/null +++ b/docs/integrate-llamaindex/index.html @@ -0,0 +1,322 @@ + + + + + + LlamaIndex — aimock + + + + + + + + + +
+ + +
+

LlamaIndex

+

+ Test your LlamaIndex RAG pipelines end-to-end. aimock mocks both the LLM and the vector + database — retriever and generator in one server. +

+ +

Quick Start

+

+ Point the LlamaIndex OpenAI LLM at aimock instead of the real API. No code changes to your + RAG pipeline — just swap the base URL. +

+
+
Python python
+
from llama_index.llms.openai import OpenAI
+
+# Point at aimock instead of api.openai.com
+llm = OpenAI(
+    api_base="http://localhost:4010/v1",
+    api_key="test",
+)
+
+# Configure LlamaIndex to use aimock for both LLM and embeddings
+from llama_index.core import Settings, VectorStoreIndex, SimpleDirectoryReader
+from llama_index.embeddings.openai import OpenAIEmbedding
+
+Settings.llm = llm
+Settings.embed_model = OpenAIEmbedding(api_base="http://localhost:4010/v1", api_key="test")
+
+documents = SimpleDirectoryReader("data").load_data()
+index = VectorStoreIndex.from_documents(documents)
+query_engine = index.as_query_engine()
+response = query_engine.query("What is gravity?")
+
+

Start aimock with fixtures that match the queries your pipeline will send:

+
+
Terminal shell
+
npx aimock --fixtures ./fixtures/llamaindex
+
+ +

Mock Both LLM and Vector DB

+

+ This is where aimock shines for RAG testing. A LlamaIndex RAG pipeline has two external + dependencies: the retriever (vector database) and the + generator (LLM). aimock serves both on one port, so a single server + replaces Pinecone/Qdrant and OpenAI/Anthropic. +

+
+
+ fixtures/rag-pipeline.json json +
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "What is gravity?" },
+      "response": {
+        "content": "Based on the retrieved documents, gravity is a fundamental force of nature that attracts objects with mass toward one another. It is described by Newton's law of universal gravitation and Einstein's general theory of relativity."
+      }
+    },
+    {
+      "match": { "inputText": "What is gravity?", "endpoint": "embedding" },
+      "response": {
+        "embedding": [0.9, 0.1, 0.05]
+      }
+    }
+  ]
+}
+
+
+
aimock.json json
+
{
+  "llm": {
+    "fixtures": "./fixtures/rag-pipeline.json"
+  },
+  "vector": {
+    "collections": [
+      {
+        "name": "knowledge-base",
+        "dimension": 3,
+        "vectors": [
+          {
+            "id": "doc-gravity",
+            "values": [0.9, 0.1, 0.05],
+            "metadata": { "source": "physics.pdf", "page": 12 }
+          }
+        ],
+        "queryResults": [
+          {
+            "id": "doc-gravity",
+            "score": 0.97,
+            "metadata": { "source": "physics.pdf", "page": 12 }
+          }
+        ]
+      }
+    ]
+  }
+}
+
+

+ Load both with npx aimock --config aimock.json. The config points to the + fixture file via llm.fixtures, so aimock handles both legs of the RAG + pipeline: +

+
    +
  • /v1/chat/completions — matches LLM fixtures for the generator
  • +
  • /vector — serves vector query results for the retriever
  • +
+
+
+ Python — dual mock python +
+
from llama_index.llms.openai import OpenAI
+from llama_index.embeddings.openai import OpenAIEmbedding
+from llama_index.vector_stores.qdrant import QdrantVectorStore
+
+# Generator: LLM pointed at aimock
+llm = OpenAI(
+    api_base="http://localhost:4010/v1",
+    api_key="test",
+)
+
+# Embeddings: also served by aimock
+embed_model = OpenAIEmbedding(
+    api_base="http://localhost:4010/v1",
+    api_key="test",
+)
+
+# Retriever: aimock's vector endpoint
+# Point your vector store client at localhost:4010/vector
+# aimock implements the Qdrant-compatible REST API
+
+# Now your entire RAG pipeline runs against one mock server
+
+ +

Embedding Fixtures

+

+ LlamaIndex indexes documents by generating embeddings. Use inputText matching + to return deterministic embedding vectors for specific inputs, ensuring your indexing and + retrieval paths produce consistent results in tests. +

+
+
+ fixtures/embeddings.json json +
+
{
+  "fixtures": [
+    {
+      "match": { "inputText": "What is gravity?", "endpoint": "embedding" },
+      "response": {
+        "embedding": [0.9, 0.1, 0.05]
+      }
+    },
+    {
+      "match": { "inputText": "Gravity is a fundamental force", "endpoint": "embedding" },
+      "response": {
+        "embedding": [0.88, 0.12, 0.07]
+      }
+    }
+  ]
+}
+
+

+ The inputText matcher performs substring matching, so + "gravity" matches any input containing that word. Use exact strings when you + need precision. +

+ +

With aimock-pytest

+

+ The aimock-pytest plugin starts and stops the server automatically per test. + Install with pip install aimock-pytest. +

+
+
test_rag.py python
+
from llama_index.llms.openai import OpenAI
+
+def test_rag_query(aimock):
+    # Load fixtures before making LLM calls
+    aimock.load_fixtures("./fixtures/llamaindex/rag.json")
+
+    llm = OpenAI(
+        api_base=f"{aimock.url}/v1",
+        api_key="test",
+    )
+    response = llm.complete("What is gravity?")
+    assert "force" in str(response).lower()
+
+ +

CI with GitHub Action

+

+ Run your LlamaIndex test suite in CI with the aimock GitHub Action. The action starts + aimock as a background service and exposes it on the default port. +

+
+
+ .github/workflows/test.yml yaml +
+
name: LlamaIndex Tests
+
+on: [push, pull_request]
+
+jobs:
+  test:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v4
+
+      - name: Start aimock
+        uses: CopilotKit/aimock@v1
+        with:
+          fixtures: ./fixtures/llamaindex
+
+      - name: Install dependencies
+        run: pip install -r requirements.txt
+
+      - name: Run tests
+        run: pytest tests/
+        env:
+          OPENAI_BASE_URL: http://127.0.0.1:4010/v1
+          OPENAI_API_KEY: test
+
+

+ No API keys needed in CI. Your LlamaIndex pipeline talks to aimock, which returns + deterministic responses from fixtures. +

+ +

Record & Replay

+

+ Record a RAG query end-to-end against real services, then replay it in tests. aimock + captures both the LLM completions and the embedding calls, so the full pipeline is + reproducible. +

+
+
Record mode shell
+
# Record LLM and embedding calls from a live session
+npx aimock \
+  --record \
+  --provider-openai https://api.openai.com \
+  --fixtures ./fixtures/llamaindex
+
+# Run your LlamaIndex pipeline against aimock
+python my_rag_pipeline.py
+
+# aimock saves fixtures to ./fixtures/llamaindex/
+# Next run replays them without hitting the real API
+
+
+
Replay in tests python
+
def test_rag_query(aimock):
+    # Load the recorded fixtures
+    aimock.load_fixtures("./fixtures/llamaindex/recorded.json")
+
+    from llama_index.llms.openai import OpenAI
+    llm = OpenAI(api_base=f"{aimock.url}/v1", api_key="test")
+    # ... run your RAG pipeline, assert on results
+
+
+ +
+ + + + + diff --git a/docs/integrate-mastra/index.html b/docs/integrate-mastra/index.html new file mode 100644 index 0000000..936d8d6 --- /dev/null +++ b/docs/integrate-mastra/index.html @@ -0,0 +1,225 @@ + + + + + + Mastra — aimock + + + + + + + + + +
+ + +
+

Mastra

+

+ Test your Mastra agents with deterministic LLM responses and AG-UI event streams. aimock + integrates natively with CopilotKit's Mastra support. +

+ +

Quick Start

+

+ Mastra agents support multiple LLM providers through its model configuration. For + OpenAI-compatible models, set the base URL to point at aimock. The simplest approach is + setting the OPENAI_BASE_URL environment variable. +

+
+
+ Point your agent at aimock typescript +
+
import { Agent } from "@mastra/core/agent";
+
+const agent = new Agent({
+  name: "my-agent",
+  instructions: "You are a helpful assistant",
+  model: {
+    provider: "OPEN_AI",
+    name: "gpt-4o",
+    toolChoice: "auto",
+  },
+});
+
+// Set OPENAI_BASE_URL=http://localhost:4010/v1 to redirect to aimock
+// All completions will be served from aimock fixtures
+
+ +

With Vitest Plugin

+

+ The useAimock plugin starts and stops the aimock server automatically around + your test suite. No manual setup or teardown required. +

+
+
+ mastra-agent.test.ts typescript +
+
import { describe, it, expect } from "vitest";
+import { useAimock } from "@copilotkit/aimock/vitest";
+import { Agent } from "@mastra/core/agent";
+
+const mock = useAimock({ fixtures: "./fixtures" });
+
+describe("Mastra agent", () => {
+  it("handles a travel planning request", async () => {
+    process.env.OPENAI_BASE_URL = mock().url + "/v1";
+
+    const agent = new Agent({
+      name: "travel-planner",
+      instructions: "You are a travel planning assistant",
+      model: { provider: "OPEN_AI", name: "gpt-4o" },
+    });
+    const result = await agent.generate("plan a trip to Tokyo");
+    expect(result.text).toContain("Tokyo");
+  });
+});
+
+ +

Tool Call Workflows

+

+ Mastra agents use tools extensively for travel planning, API calls, and multi-step + workflows. Define tool call fixtures to test these interactions deterministically. +

+
+
+ fixtures/mastra-tools.json json +
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "plan a trip", "sequenceIndex": 0 },
+      "response": {
+        "toolCalls": [
+          {
+            "name": "search_flights",
+            "arguments": "{\"origin\":\"SFO\",\"destination\":\"NRT\",\"date\":\"2025-03-15\"}"
+          }
+        ]
+      }
+    },
+    {
+      "match": { "userMessage": "plan a trip", "sequenceIndex": 1 },
+      "response": {
+        "toolCalls": [
+          {
+            "name": "search_hotels",
+            "arguments": "{\"city\":\"Tokyo\",\"checkIn\":\"2025-03-15\",\"checkOut\":\"2025-03-22\"}"
+          }
+        ]
+      }
+    },
+    {
+      "match": { "userMessage": "plan a trip", "sequenceIndex": 2 },
+      "response": {
+        "content": "I found a great itinerary for your Tokyo trip!\n\n**Flight:** SFO → NRT on March 15, departing 11:30 AM (United UA837) — $890 round trip\n\n**Hotel:** Hotel Gracery Shinjuku, March 15–22 — $185/night\n\nWould you like me to book these, or would you prefer different options?"
+      }
+    }
+  ]
+}
+
+

+ Each fixture fires once in sequence using sequenceIndex. The first call + triggers a flight search, the second searches for hotels, and the third returns the + combined itinerary. See Fixtures for the full matching syntax. +

+ +

AG-UI Frontend Testing

+

+ When using Mastra with CopilotKit, the frontend receives AG-UI event streams. Use + AGUIMock to test the frontend separately from the agent — no running Mastra + server required. +

+
+
+ Mock the AG-UI layer typescript +
+
import { LLMock } from "@copilotkit/aimock";
+import { AGUIMock } from "@copilotkit/aimock";
+
+const llm = new LLMock();
+const agui = new AGUIMock();
+
+agui.onMessage("hello", "Hi from the agent!");
+agui.onToolCall(/search/, "web_search", '{"q":"test"}', { result: "[]" });
+
+llm.mount("/agui", agui);
+const url = await llm.start();
+
+// Point your CopilotKit frontend at url + "/agui"
+// It receives deterministic AG-UI SSE event streams
+
+

+ This lets you develop and test CopilotKit UI components against canned agent responses + without running Mastra or any LLM provider. See + AGUIMock for the full API. +

+ +

CI with GitHub Action

+

+ Run your Mastra agent tests in CI with a single step. The aimock GitHub Action starts the + server, waits for it to be healthy, and cleans up when the job finishes. +

+
+
workflow.yml yaml
+
steps:
+  - uses: actions/checkout@v4
+  - uses: CopilotKit/aimock@v1
+    with:
+      fixtures: ./fixtures
+  - run: pnpm test
+    env:
+      OPENAI_BASE_URL: http://127.0.0.1:4010/v1
+
+

See GitHub Action for all available inputs and outputs.

+
+ +
+ + + + + diff --git a/docs/integrate-pydanticai/index.html b/docs/integrate-pydanticai/index.html new file mode 100644 index 0000000..0dd22ce --- /dev/null +++ b/docs/integrate-pydanticai/index.html @@ -0,0 +1,309 @@ + + + + + + PydanticAI — aimock + + + + + + + + + + +
+ + +
+

PydanticAI

+

+ Test your PydanticAI agents with deterministic responses. aimock handles structured output + validation, tool calls, and streaming — all without API keys. +

+ +

Quick Start

+

+ PydanticAI agents accept a custom base_url through their model configuration. + Point it at aimock and use any string for the API key: +

+
+
agent.py python
+
from pydantic_ai import Agent
+from pydantic_ai.models.openai import OpenAIChatModel
+from pydantic_ai.providers.openai import OpenAIProvider
+
+model = OpenAIChatModel(
+    "gpt-4o",
+    provider=OpenAIProvider(
+        base_url="http://localhost:4010/v1",
+        api_key="test",
+    ),
+)
+agent = Agent(model)
+
+ +

Start aimock with a fixture file, then run your agent:

+
+
Terminal shell
+
# Terminal 1 — start aimock
+npx aimock --fixtures ./fixtures
+
+# Terminal 2 — run the agent
+python agent.py
+
+ +

With aimock-pytest

+

+ The aimock-pytest plugin starts and stops the server automatically per test. + Install with pip install aimock-pytest. The aimock + fixture is provided automatically — just request it in your test function. +

+
+
test_agent.py python
+
from pydantic_ai import Agent
+from pydantic_ai.models.openai import OpenAIChatModel
+from pydantic_ai.providers.openai import OpenAIProvider
+
+async def test_agent_responds(aimock):
+    # Load fixtures before making LLM calls
+    aimock.load_fixtures("./fixtures/pydanticai.json")
+
+    model = OpenAIChatModel(
+        "gpt-4o",
+        provider=OpenAIProvider(
+            base_url=aimock.url + "/v1",
+            api_key="test",
+        ),
+    )
+    agent = Agent(model)
+    result = await agent.run("What is the weather?")
+    assert result.output is not None
+
+ +

Structured Output

+

+ PydanticAI validates LLM responses against Pydantic models. When your agent expects + structured output, PydanticAI uses tool calls by default to extract + structured data — not response_format. This means your fixture should + return a tool call whose arguments match your Pydantic schema. Use aimock’s + toolCalls fixture to serve the expected structured response: +

+
+
+ fixtures/structured-output.json json +
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "Weather" },
+      "response": {
+        "toolCalls": [
+          {
+            "name": "final_result",
+            "arguments": "{\"city\": \"SF\", \"temp\": 72, \"unit\": \"fahrenheit\"}"
+          }
+        ]
+      }
+    }
+  ]
+}
+
+

+ Note: PydanticAI generates a tool named final_result (by + default) whose schema matches your output_type model. The LLM + “calls” this tool with the structured data, and PydanticAI validates the + arguments against your Pydantic model. If you need to use + response_format instead, you can opt in via + Agent(..., result_tool_name=None), in which case a + responseFormat fixture would apply: +

+
+
+ fixtures/structured-output-response-format.json json +
+
// Only needed if you disable tool-based structured output
+{
+  "fixtures": [
+    {
+      "match": { "responseFormat": "json_object" },
+      "response": {
+        "content": "{\"city\": \"SF\", \"temp\": 72, \"unit\": \"fahrenheit\"}"
+      }
+    }
+  ]
+}
+
+ +

The corresponding PydanticAI agent with a typed output:

+
+
+ structured_agent.py python +
+
from pydantic import BaseModel
+from pydantic_ai import Agent
+from pydantic_ai.models.openai import OpenAIChatModel
+from pydantic_ai.providers.openai import OpenAIProvider
+
+class Weather(BaseModel):
+    city: str
+    temp: int
+    unit: str
+
+model = OpenAIChatModel(
+    "gpt-4o",
+    provider=OpenAIProvider(
+        base_url="http://localhost:4010/v1",
+        api_key="test",
+    ),
+)
+agent = Agent(model, output_type=Weather)
+
+result = agent.run_sync("Weather in San Francisco")
+# result.output is a validated Weather instance
+assert result.output.city == "SF"
+assert result.output.temp == 72
+
+ +

Tool Calls

+

+ PydanticAI tools use typed function arguments. When an agent invokes a tool, the LLM + returns a tool call that PydanticAI validates and dispatches. Use aimock’s + toolCalls fixture to return deterministic tool invocations: +

+
+
+ fixtures/tool-call.json json +
+
{
+  "fixtures": [
+    {
+      "match": { "userMessage": "weather" },
+      "response": {
+        "toolCalls": [
+          {
+            "name": "get_weather",
+            "arguments": "{\"city\": \"San Francisco\"}"
+          }
+        ]
+      }
+    }
+  ]
+}
+
+ +

The PydanticAI agent that registers and handles the tool:

+
+
tool_agent.py python
+
from pydantic_ai import Agent, RunContext
+from pydantic_ai.models.openai import OpenAIChatModel
+from pydantic_ai.providers.openai import OpenAIProvider
+
+model = OpenAIChatModel(
+    "gpt-4o",
+    provider=OpenAIProvider(
+        base_url="http://localhost:4010/v1",
+        api_key="test",
+    ),
+)
+agent = Agent(model)
+
+@agent.tool
+async def get_weather(ctx: RunContext[None], city: str) -> str:
+    return f"72F and sunny in {city}"
+
+result = agent.run_sync("What's the weather?")
+# aimock triggers the tool call, PydanticAI dispatches get_weather
+
+ +

CI with GitHub Action

+

+ Use the aimock GitHub Action to run a mock server alongside your Python test suite. No API + keys or network access required: +

+
+
+ .github/workflows/test.yml yaml +
+
name: Tests
+on: [push, pull_request]
+
+jobs:
+  test:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: "3.12"
+
+      - uses: CopilotKit/aimock@v1
+        with:
+          fixtures: ./fixtures
+
+      - run: pip install pydantic-ai pytest
+
+      - run: pytest
+        env:
+          OPENAI_BASE_URL: http://127.0.0.1:4010/v1
+          OPENAI_API_KEY: test
+
+

+ The action starts aimock as a background service on port 4010. Your tests connect via + OPENAI_BASE_URL and never hit a real API. See the + GitHub Action page for all available inputs. +

+
+ +
+ + + + + diff --git a/docs/sidebar.js b/docs/sidebar.js index 305ee9b..8d092bd 100644 --- a/docs/sidebar.js +++ b/docs/sidebar.js @@ -61,6 +61,17 @@ { label: "Services", href: "/services" }, ], }, + { + title: "Framework Guides", + links: [ + { label: "LangChain / LangGraph", href: "/integrate-langchain" }, + { label: "CrewAI", href: "/integrate-crewai" }, + { label: "PydanticAI", href: "/integrate-pydanticai" }, + { label: "LlamaIndex", href: "/integrate-llamaindex" }, + { label: "Mastra", href: "/integrate-mastra" }, + { label: "Google ADK", href: "/integrate-adk" }, + ], + }, { title: "Orchestration", links: [ diff --git a/fixtures/examples/adk/gemini-agent.json b/fixtures/examples/adk/gemini-agent.json new file mode 100644 index 0000000..929f125 --- /dev/null +++ b/fixtures/examples/adk/gemini-agent.json @@ -0,0 +1,47 @@ +{ + "fixtures": [ + { + "match": { "userMessage": "weather" }, + "response": { + "toolCalls": [ + { + "name": "get_weather", + "arguments": "{\"city\":\"San Francisco\",\"unit\":\"fahrenheit\"}" + } + ] + } + }, + { + "match": { "userMessage": "hello" }, + "response": { + "content": "Hello! I'm an ADK agent running on aimock. How can I help you today?" + } + }, + { + "match": { "userMessage": "search" }, + "response": { + "toolCalls": [ + { + "name": "google_search", + "arguments": "{\"query\":\"latest AI news\"}" + } + ] + } + }, + { + "match": { "userMessage": "multi-tool" }, + "response": { + "toolCalls": [ + { + "name": "get_weather", + "arguments": "{\"city\":\"New York\",\"unit\":\"celsius\"}" + }, + { + "name": "get_time", + "arguments": "{\"timezone\":\"America/New_York\"}" + } + ] + } + } + ] +} diff --git a/fixtures/examples/crewai/multi-agent-crew.json b/fixtures/examples/crewai/multi-agent-crew.json new file mode 100644 index 0000000..e863dfe --- /dev/null +++ b/fixtures/examples/crewai/multi-agent-crew.json @@ -0,0 +1,16 @@ +{ + "fixtures": [ + { + "match": { "userMessage": "research" }, + "response": { + "content": "Based on my research, the key findings are:\n\n1. LLM testing with fixture-based mocks eliminates flaky tests caused by non-deterministic API responses.\n2. Proxy recording captures real interactions for replay in CI without API keys.\n3. Multi-agent frameworks like CrewAI benefit most because each agent multiplies the number of LLM calls per run." + } + }, + { + "match": { "userMessage": "write" }, + "response": { + "content": "# Testing LLMs in CI\n\nFixture-based mocking brings determinism to AI-powered applications. By replacing real API calls with recorded responses, teams ship faster with confidence.\n\n## Why It Matters\n\nEvery agent in a CrewAI crew makes independent LLM calls. Without mocking, a two-agent crew means two sources of non-determinism per run. With aimock, every call returns the exact same response every time." + } + } + ] +} diff --git a/fixtures/examples/langchain/agent-loop.json b/fixtures/examples/langchain/agent-loop.json new file mode 100644 index 0000000..852e71f --- /dev/null +++ b/fixtures/examples/langchain/agent-loop.json @@ -0,0 +1,27 @@ +{ + "fixtures": [ + { + "match": { "userMessage": "plan a trip", "sequenceIndex": 0 }, + "response": { + "content": "I'll help plan your trip. Let me look up some options." + } + }, + { + "match": { "userMessage": "plan a trip", "sequenceIndex": 1 }, + "response": { + "toolCalls": [ + { + "name": "search_flights", + "arguments": "{\"origin\":\"SFO\",\"dest\":\"NRT\"}" + } + ] + } + }, + { + "match": { "userMessage": "plan a trip", "sequenceIndex": 2 }, + "response": { + "content": "I found 3 flights from SFO to Tokyo Narita. The best option is..." + } + } + ] +} diff --git a/fixtures/examples/llamaindex/aimock-config.json b/fixtures/examples/llamaindex/aimock-config.json new file mode 100644 index 0000000..9ad2a97 --- /dev/null +++ b/fixtures/examples/llamaindex/aimock-config.json @@ -0,0 +1,62 @@ +{ + "llm": { + "fixtures": "./rag-pipeline.json" + }, + "vector": { + "collections": [ + { + "name": "knowledge-base", + "dimension": 3, + "vectors": [ + { + "id": "doc-gravity", + "values": [0.9, 0.1, 0.05], + "metadata": { + "source": "physics.pdf", + "page": 12, + "text": "Gravity is a fundamental force of nature that attracts objects with mass toward one another." + } + }, + { + "id": "doc-orbits", + "values": [0.75, 0.3, 0.15], + "metadata": { + "source": "physics.pdf", + "page": 45, + "text": "Orbital mechanics describes the motion of planets and satellites under gravitational influence." + } + }, + { + "id": "doc-tides", + "values": [0.6, 0.5, 0.2], + "metadata": { + "source": "physics.pdf", + "page": 78, + "text": "Tidal forces result from the differential gravitational pull of the Moon and Sun on Earth's oceans." + } + } + ], + "queryResults": [ + { + "id": "doc-gravity", + "score": 0.97, + "metadata": { + "source": "physics.pdf", + "page": 12, + "text": "Gravity is a fundamental force of nature that attracts objects with mass toward one another." + } + }, + { + "id": "doc-orbits", + "score": 0.82, + "metadata": { + "source": "physics.pdf", + "page": 45, + "text": "Orbital mechanics describes the motion of planets and satellites under gravitational influence." + } + } + ] + } + ] + } +} diff --git a/fixtures/examples/llamaindex/rag-pipeline.json b/fixtures/examples/llamaindex/rag-pipeline.json new file mode 100644 index 0000000..d2714cd --- /dev/null +++ b/fixtures/examples/llamaindex/rag-pipeline.json @@ -0,0 +1,34 @@ +{ + "fixtures": [ + { + "match": { "userMessage": "What is gravity?" }, + "response": { + "content": "Based on the retrieved documents, gravity is a fundamental force of nature that attracts objects with mass toward one another. It is described by Newton's law of universal gravitation and Einstein's general theory of relativity." + } + }, + { + "match": { "userMessage": "Summarize the document" }, + "response": { + "content": "The document covers three main topics: gravitational force, orbital mechanics, and tidal effects. It explains how gravity governs planetary motion and influences ocean tides on Earth." + } + }, + { + "match": { "inputText": "What is gravity?", "endpoint": "embedding" }, + "response": { + "embedding": [0.9, 0.1, 0.05] + } + }, + { + "match": { "inputText": "Gravity is a fundamental force", "endpoint": "embedding" }, + "response": { + "embedding": [0.88, 0.12, 0.07] + } + }, + { + "match": { "inputText": "orbital mechanics and planetary motion", "endpoint": "embedding" }, + "response": { + "embedding": [0.75, 0.3, 0.15] + } + } + ] +} diff --git a/fixtures/examples/mastra/agent-workflow.json b/fixtures/examples/mastra/agent-workflow.json new file mode 100644 index 0000000..3ac6505 --- /dev/null +++ b/fixtures/examples/mastra/agent-workflow.json @@ -0,0 +1,32 @@ +{ + "fixtures": [ + { + "match": { "userMessage": "plan a trip", "sequenceIndex": 0 }, + "response": { + "toolCalls": [ + { + "name": "search_flights", + "arguments": "{\"origin\":\"SFO\",\"destination\":\"NRT\",\"date\":\"2025-03-15\"}" + } + ] + } + }, + { + "match": { "userMessage": "plan a trip", "sequenceIndex": 1 }, + "response": { + "toolCalls": [ + { + "name": "search_hotels", + "arguments": "{\"city\":\"Tokyo\",\"checkIn\":\"2025-03-15\",\"checkOut\":\"2025-03-22\"}" + } + ] + } + }, + { + "match": { "userMessage": "plan a trip", "sequenceIndex": 2 }, + "response": { + "content": "I found a great itinerary for your Tokyo trip!\n\n**Flight:** SFO → NRT on March 15, departing 11:30 AM (United UA837) — $890 round trip\n\n**Hotel:** Hotel Gracery Shinjuku, March 15–22 — $185/night\n\nWould you like me to book these, or would you prefer different options?" + } + } + ] +} diff --git a/fixtures/examples/pydanticai/structured-output.json b/fixtures/examples/pydanticai/structured-output.json new file mode 100644 index 0000000..3cca151 --- /dev/null +++ b/fixtures/examples/pydanticai/structured-output.json @@ -0,0 +1,15 @@ +{ + "fixtures": [ + { + "match": { "userMessage": "Weather" }, + "response": { + "toolCalls": [ + { + "name": "final_result", + "arguments": "{\"city\": \"SF\", \"temp\": 72, \"unit\": \"fahrenheit\"}" + } + ] + } + } + ] +} From 94e5f4869f1107bb48fabf09ef6d732a0516fd3e Mon Sep 17 00:00:00 2001 From: Jordan Ritter Date: Tue, 14 Apr 2026 15:34:26 -0700 Subject: [PATCH 2/3] docs: redesign overview page with Quick Start above fold and compact suite table Adds highlight cards, framework guide links, migration links with correct paths, and page-toc aside for on-page navigation. --- docs/docs/index.html | 434 ++++++++++++++++++++++--------------------- 1 file changed, 223 insertions(+), 211 deletions(-) diff --git a/docs/docs/index.html b/docs/docs/index.html index c867408..7863533 100644 --- a/docs/docs/index.html +++ b/docs/docs/index.html @@ -17,141 +17,159 @@ />