Skip to content

Commit 5f72d6e

Browse files
committed
Add GoogleModel support
This PR adds support to Google Models via: - Gemini API - VertexAI The actual backend used depends on the parameters passed to GoogleModel
1 parent 7b2d6a2 commit 5f72d6e

8 files changed

Lines changed: 337 additions & 18 deletions

File tree

.basedpyright/baseline.json

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -72,16 +72,6 @@
7272
}
7373
}
7474
],
75-
"./splunklib/ai/model.py": [
76-
{
77-
"code": "reportDeprecated",
78-
"range": {
79-
"startColumn": 24,
80-
"endColumn": 31,
81-
"lineCount": 1
82-
}
83-
}
84-
],
8575
"./splunklib/ai/serialized_service.py": [
8676
{
8777
"code": "reportPrivateUsage",

pyproject.toml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ compat = ["six>=1.17.0"]
3636
ai = ["httpx==0.28.1", "langchain>=1.2.15", "mcp>=1.27.0", "pydantic>=2.13.1"]
3737
anthropic = ["splunk-sdk[ai]>=2.1.1", "langchain-anthropic>=1.4.0"]
3838
openai = ["splunk-sdk[ai]>=2.1.1", "langchain-openai>=1.1.13"]
39+
google = ["splunk-sdk[ai]>=2.1.1", "langchain-google-genai>=4.2.2", "google-auth>=2.0.0"]
3940

4041
# Treat the same as NPM's `devDependencies`
4142
[dependency-groups]
@@ -51,7 +52,7 @@ release = ["build>=1.4.3", "jinja2>=3.1.6", "sphinx>=9.1.0", "twine>=6.2.0"]
5152
lint = ["basedpyright>=1.39.0", "ruff>=0.15.10"]
5253
dev = [
5354
"rich>=14.3.3",
54-
"splunk-sdk[openai, anthropic]",
55+
"splunk-sdk[openai, anthropic, google]",
5556
{ include-group = "test" },
5657
{ include-group = "lint" },
5758
{ include-group = "release" },

splunklib/ai/README.md

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ We support following predefined models:
4747

4848
- `OpenAIModel` - works with OpenAI and any [OpenAI-compatible API](https://platform.openai.com/docs/api-reference).
4949
- `AnthropicModel` - works with Anthropic and any [Anthropic-compatible API](https://docs.anthropic.com/en/api).
50+
- `GoogleModel` - works with Google's Gemini models via the [Gemini API](https://ai.google.dev/gemini-api/docs) or [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/overview).
5051

5152
### OpenAI
5253

@@ -76,6 +77,88 @@ model = AnthropicModel(
7677
async with Agent(model=model) as agent: ....
7778
```
7879

80+
### Google
81+
82+
`GoogleModel` supports two backends: the [Gemini API](https://ai.google.dev/gemini-api/docs) and [Vertex AI](https://cloud.google.com/vertex-ai/generative-ai/docs/overview).
83+
The backend is selected automatically based on the parameters you provide, or you can
84+
force it with the `vertexai` flag.
85+
86+
Requires the `google` optional extra:
87+
88+
```sh
89+
pip install "splunk-sdk[google]"
90+
# or with uv:
91+
uv add splunk-sdk[google]
92+
```
93+
94+
#### Gemini API
95+
96+
Use this when you have a Google AI Studio API key and do not need Vertex AI infrastructure.
97+
Only `model` and `api_key` are required.
98+
99+
```py
100+
from splunklib.ai import Agent, GoogleModel
101+
102+
model = GoogleModel(
103+
model="gemini-2.0-flash",
104+
api_key="YOUR_GOOGLE_API_KEY",
105+
)
106+
107+
async with Agent(model=model) as agent: ...
108+
```
109+
110+
#### Vertex AI - API key
111+
112+
Use this to route requests through Vertex AI with an API key. Providing `project` is enough
113+
for the SDK to switch to the Vertex AI backend automatically.
114+
115+
```py
116+
from splunklib.ai import Agent, GoogleModel
117+
118+
model = GoogleModel(
119+
model="gemini-2.0-flash",
120+
api_key="YOUR_VERTEX_API_KEY",
121+
project="your-gcp-project-id",
122+
# location="us-central1", # optional, defaults to us-central1
123+
)
124+
125+
async with Agent(model=model) as agent: ...
126+
```
127+
128+
#### Vertex AI - service account credentials
129+
130+
Use this when authenticating with a service account key file (or any
131+
`google.auth.credentials.Credentials`-compatible object). No `api_key` is needed.
132+
133+
```py
134+
from google.oauth2 import service_account
135+
from splunklib.ai import Agent, GoogleModel
136+
137+
credentials = service_account.Credentials.from_service_account_file(
138+
"path/to/service-account.json",
139+
scopes=["https://www.googleapis.com/auth/cloud-platform"],
140+
)
141+
142+
model = GoogleModel(
143+
model="gemini-2.0-flash",
144+
project="your-gcp-project-id",
145+
credentials=credentials,
146+
# location="us-central1", # optional, defaults to us-central1
147+
)
148+
149+
async with Agent(model=model) as agent: ...
150+
```
151+
152+
#### Backend selection rules
153+
154+
| `project` | `credentials` | `vertexai` | Backend used |
155+
|---|---|---|---|
156+
| not set | not set | `None` (default) | Gemini API |
157+
| set | - | `None` (default) | Vertex AI |
158+
| - | set | `None` (default) | Vertex AI |
159+
| any | any | `True` | Vertex AI (forced) |
160+
| any | any | `False` | Gemini API (forced) |
161+
79162
### Self-hosted models via Ollama
80163

81164
[Ollama](https://ollama.com/) can serve local models with both OpenAI and Anthropic-compatible endpoints, so either model class works.

splunklib/ai/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
raise ImportError("Python 3.13 or newer is required to use this module")
1919

2020
from splunklib.ai.agent import Agent
21-
from splunklib.ai.model import AnthropicModel, OpenAIModel
21+
from splunklib.ai.model import AnthropicModel, GoogleModel, OpenAIModel
2222
from splunklib.ai.security import (
2323
create_structured_prompt,
2424
detect_injection,
@@ -29,6 +29,7 @@
2929
"Agent",
3030
"AnthropicModel",
3131
"OpenAIModel",
32+
"GoogleModel",
3233
"create_structured_prompt",
3334
"detect_injection",
3435
"truncate_input",

splunklib/ai/engines/langchain.py

Lines changed: 28 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@
112112
subagent_middleware,
113113
tool_middleware,
114114
)
115-
from splunklib.ai.model import AnthropicModel, OpenAIModel, PredefinedModel
115+
from splunklib.ai.model import AnthropicModel, GoogleModel, OpenAIModel, PredefinedModel
116116
from splunklib.ai.security import create_structured_prompt
117117
from splunklib.ai.structured_output import (
118118
StructuredOutputGenerationException,
@@ -1847,6 +1847,33 @@ def _create_langchain_model(model: PredefinedModel) -> BaseChatModel:
18471847
+ "# or if using uv:\n"
18481848
+ "uv add splunk-sdk[anthropic]"
18491849
)
1850+
case GoogleModel():
1851+
try:
1852+
from langchain_google_genai import ChatGoogleGenerativeAI
1853+
1854+
google_kwargs: dict[str, Any] = {"model": model.model}
1855+
if model.api_key is not None:
1856+
google_kwargs["google_api_key"] = model.api_key
1857+
if model.project is not None:
1858+
google_kwargs["project"] = model.project
1859+
if model.location is not None:
1860+
google_kwargs["location"] = model.location
1861+
if model.credentials is not None:
1862+
google_kwargs["credentials"] = model.credentials
1863+
if model.vertexai is not None:
1864+
google_kwargs["vertexai"] = model.vertexai
1865+
if model.temperature is not None:
1866+
google_kwargs["temperature"] = model.temperature
1867+
1868+
return ChatGoogleGenerativeAI(**google_kwargs)
1869+
except ImportError:
1870+
raise ImportError(
1871+
"Google GenAI support is not installed.\n"
1872+
+ "To enable Google / Gemini models, install the optional extra:\n"
1873+
+ 'pip install "splunk-sdk[google]"\n'
1874+
+ "# or if using uv:\n"
1875+
+ "uv add splunk-sdk[google]"
1876+
)
18501877
case _:
18511878
raise InvalidModelError(
18521879
"Cannot create langchain model - invalid SDK model provided"

splunklib/ai/model.py

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,15 @@
1212
# License for the specific language governing permissions and limitations
1313
# under the License.
1414

15+
from collections.abc import Mapping
1516
from dataclasses import dataclass
16-
from typing import Any, Mapping
17+
from typing import TYPE_CHECKING, Any
1718

1819
import httpx
1920

21+
if TYPE_CHECKING:
22+
from google.oauth2 import service_account
23+
2024

2125
@dataclass(frozen=True, kw_only=True)
2226
class PredefinedModel:
@@ -63,8 +67,40 @@ class AnthropicModel(PredefinedModel):
6367
temperature: float | None = None
6468

6569

70+
@dataclass(frozen=True, kw_only=True)
71+
class GoogleModel(PredefinedModel):
72+
"""Predefined Google Model
73+
74+
Supports the Gemini API and Vertex AI. The backend is chosen
75+
automatically: Vertex AI when ``project`` or ``credentials`` is set,
76+
otherwise the Gemini API. Override with ``vertexai=True/False``.
77+
78+
See the README for full usage examples and authentication options.
79+
"""
80+
81+
model: str
82+
api_key: str | None = None
83+
"""API key for the Gemini API or Vertex AI."""
84+
85+
project: str | None = None
86+
"""Google Cloud project ID (Vertex AI only)."""
87+
88+
location: str | None = None
89+
"""Vertex AI region, e.g. ``"us-central1"`` or ``"europe-west4"``."""
90+
91+
credentials: "service_account.Credentials | None" = None
92+
"""Service account credentials for Vertex AI. When set, ``api_key`` is not required."""
93+
94+
vertexai: bool | None = None
95+
"""Force backend selection: ``True`` for Vertex AI, ``False`` for Gemini API, ``None`` to auto-detect."""
96+
97+
temperature: float | None = None
98+
"""Sampling temperature in the range ``[0.0, 2.0]``."""
99+
100+
66101
__all__ = [
67102
"AnthropicModel",
103+
"GoogleModel",
68104
"OpenAIModel",
69105
"PredefinedModel",
70106
]

tests/unit/ai/engine/test_langchain_backend.py

Lines changed: 51 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
ToolMessage,
4242
ToolResult,
4343
)
44-
from splunklib.ai.model import AnthropicModel, OpenAIModel, PredefinedModel
44+
from splunklib.ai.model import AnthropicModel, GoogleModel, OpenAIModel, PredefinedModel
4545
from splunklib.ai.tools import ToolType
4646

4747

@@ -646,6 +646,56 @@ def test_create_langchain_model_anthropic_with_base_url(self) -> None:
646646
# ChatAnthropic stores base_url in anthropic_api_url
647647
assert result.anthropic_api_url == model.base_url
648648

649+
def test_create_langchain_model_google_gemini_api(self) -> None:
650+
pytest.importorskip("langchain_google_genai")
651+
import langchain_google_genai
652+
653+
model = GoogleModel(model="gemini-2.0-flash", api_key="test-key")
654+
result = lc._create_langchain_model(model)
655+
656+
assert isinstance(result, langchain_google_genai.ChatGoogleGenerativeAI)
657+
assert result.model == model.model
658+
assert result._use_vertexai is False # pyright: ignore[reportAttributeAccessIssue]
659+
660+
def test_create_langchain_model_google_vertex_ai_via_project(self) -> None:
661+
pytest.importorskip("langchain_google_genai")
662+
import langchain_google_genai
663+
664+
model = GoogleModel(
665+
model="gemini-2.0-flash",
666+
api_key="test-key",
667+
project="my-project",
668+
)
669+
result = lc._create_langchain_model(model)
670+
671+
assert isinstance(result, langchain_google_genai.ChatGoogleGenerativeAI)
672+
assert result.project == model.project
673+
assert result._use_vertexai is True # pyright: ignore[reportAttributeAccessIssue]
674+
675+
def test_create_langchain_model_google_vertex_ai_explicit_flag(self) -> None:
676+
pytest.importorskip("langchain_google_genai")
677+
import langchain_google_genai
678+
679+
model = GoogleModel(
680+
model="gemini-2.0-flash",
681+
api_key="test-key",
682+
vertexai=True,
683+
)
684+
result = lc._create_langchain_model(model)
685+
686+
assert isinstance(result, langchain_google_genai.ChatGoogleGenerativeAI)
687+
assert result._use_vertexai is True # pyright: ignore[reportAttributeAccessIssue]
688+
689+
def test_create_langchain_model_google_temperature(self) -> None:
690+
pytest.importorskip("langchain_google_genai")
691+
import langchain_google_genai
692+
693+
model = GoogleModel(model="gemini-2.0-flash", api_key="test-key", temperature=0.5)
694+
result = lc._create_langchain_model(model)
695+
696+
assert isinstance(result, langchain_google_genai.ChatGoogleGenerativeAI)
697+
assert result.temperature == model.temperature
698+
649699

650700
@pytest.mark.parametrize(
651701
("name", "tool_type", "expected_name"),

0 commit comments

Comments
 (0)