Skip to content

Commit 11e3d8f

Browse files
committed
add server extensions docs
1 parent b8d6a8e commit 11e3d8f

File tree

2 files changed

+214
-1
lines changed

2 files changed

+214
-1
lines changed

content/docs/extensions/server.mdx

Lines changed: 213 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,216 @@
22
title: Server Extensions API
33
description: This guide provides a walkthrough of the LLM Server Extensions API
44
---
5+
6+
xmas __init__.py:
7+
8+
Example of creating a simple extension that adds a greet endpoint to the server.
9+
10+
```python
11+
# runs after providers are configured but before server is run
12+
def install(ctx):
13+
14+
# Load greetings
15+
# Load greetings
16+
greetings_path = Path(__file__).parent / 'ui' / 'greetings.json'
17+
if greetings_path.exists():
18+
with open(greetings_path, 'r') as f:
19+
greetings = json.load(f)
20+
else:
21+
greetings = ["Merry Christmas!"]
22+
23+
count = 0
24+
25+
async def greet(request):
26+
nonlocal count
27+
name = request.query.get('name')
28+
if not name:
29+
data = await request.post()
30+
name = data.get('name')
31+
32+
if not name:
33+
name = 'Stranger'
34+
35+
greeting = greetings[count % len(greetings)]
36+
count += 1
37+
return web.json_response({"result":f"Hello {name}, {greeting}"})
38+
39+
ctx.add_get("greet", greet)
40+
ctx.add_post("greet", greet)
41+
42+
43+
# register install extension handler
44+
__install__ = install
45+
```
46+
47+
system_tools __init__.py:
48+
49+
Example that adds and endpoint that returns User system prompts. First checks if user is signed in (Github OAuth), then returns prompts for that user if exists, otherwise returns default prompts from all users or default prompts in this extension.
50+
51+
```python
52+
default_prompts = [
53+
{"name": "Helpful Assistant", "prompt": "You are a helpful assistant."},
54+
]
55+
56+
# runs after providers are configured but before server is run
57+
def install(ctx):
58+
# helper to get user or default prompts
59+
def get_user_prompts(request):
60+
candidate_paths = []
61+
# check if user is signed in
62+
username = ctx.get_username(request)
63+
if username:
64+
# if signed in (Github OAuth), return the prompts for this user if exists
65+
candidate_paths.append(
66+
os.path.join(Path.home(), ".llms", "user", username, "system_prompts", "prompts.json")
67+
)
68+
# return default prompts for all users if exists
69+
candidate_paths.append(os.path.join(Path.home(), ".llms", "user", "default", "system_prompts", "prompts.json"))
70+
# otherwise return the default prompts from this repo
71+
candidate_paths.append(os.path.join(ctx.path, "ui", "prompts.json"))
72+
73+
# iterate all candidate paths and when exists return its json
74+
for path in candidate_paths:
75+
if os.path.exists(path):
76+
with open(path, encoding="utf-8") as f:
77+
txt = f.read()
78+
return json.loads(txt)
79+
return default_prompts
80+
81+
# API Handler to get prompts
82+
async def get_prompts(request):
83+
prompts_json = get_user_prompts(request)
84+
return web.json_response(prompts_json)
85+
86+
ctx.add_get("prompts.json", get_prompts)
87+
88+
89+
# register install extension handler
90+
__install__ = install
91+
```
92+
93+
94+
duckduckgo __init__.py:
95+
96+
Example of creating a tool extension that adds a web search tool to the server. Uses requirements.txt to install 3rd Party ddgs dependency.
97+
98+
```python
99+
from typing import Any, Dict
100+
101+
from ddgs import DDGS
102+
103+
104+
def web_search(query: str, max_results: int | None = 10, page: int = 1) -> Dict[str, Any]:
105+
"""
106+
Perform a web search using DuckDuckGo.
107+
"""
108+
109+
try:
110+
results = []
111+
with DDGS() as ddgs:
112+
# text() returns an iterator
113+
for r in ddgs.text(query, max_results=max_results):
114+
results.append(r)
115+
return {"query": query, "results": results}
116+
except Exception as e:
117+
return {"query": query, "error": str(e)}
118+
119+
120+
def install(ctx):
121+
ctx.register_tool(web_search)
122+
123+
124+
__install__ = install
125+
```
126+
127+
See [core_tools](https://github.com/llmspy/core_tools/blob/main/__init__.py) for more tool examples.
128+
129+
## Custom Provider Implementation example
130+
131+
Example of creating a custom provider that extends the GeneratorBase class to add support for OpenRouter image generation.
132+
133+
```python
134+
def install(ctx):
135+
from llms.main import GeneratorBase
136+
137+
# https://openrouter.ai/docs/guides/overview/multimodal/image-generation
138+
class OpenRouterGenerator(GeneratorBase):
139+
sdk = "openrouter/image"
140+
141+
def __init__(self, **kwargs):
142+
super().__init__(**kwargs)
143+
144+
def to_response(self, response, chat, started_at):
145+
# go through all image responses and save them to cache
146+
for choice in response["choices"]:
147+
if "message" in choice and "images" in choice["message"]:
148+
for image in choice["message"]["images"]:
149+
if choice["message"]["content"] == "":
150+
choice["message"]["content"] = self.default_content
151+
if "image_url" in image:
152+
data_uri = image["image_url"]["url"]
153+
if data_uri.startswith("data:"):
154+
parts = data_uri.split(",", 1)
155+
ext = parts[0].split(";")[0].split("/")[1]
156+
base64_data = parts[1]
157+
model = chat["model"].split("/")[-1]
158+
filename = f"{model}-{choice['index']}.{ext}"
159+
info = {
160+
"model": model,
161+
"prompt": ctx.last_user_prompt(chat),
162+
}
163+
relative_url, info = ctx.save_image_to_cache(base64_data, filename, info)
164+
image["image_url"]["url"] = relative_url
165+
166+
return response
167+
168+
async def chat(self, chat, provider=None):
169+
headers = self.get_headers(provider, chat)
170+
if provider is not None:
171+
chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
172+
173+
started_at = time.time()
174+
if ctx.MOCK:
175+
print("Mocking OpenRouterGenerator")
176+
text = ctx.text_from_file(f"{ctx.MOCK_DIR}/openrouter-image.json")
177+
return ctx.log_json(self.to_response(json.loads(text), chat, started_at))
178+
else:
179+
chat_url = provider.chat_url
180+
chat = await self.process_chat(chat, provider_id=self.id)
181+
ctx.log(f"POST {chat_url}")
182+
ctx.log(provider.chat_summary(chat))
183+
# remove metadata if any (conflicts with some providers, e.g. Z.ai)
184+
chat.pop("metadata", None)
185+
186+
async with aiohttp.ClientSession() as session, session.post(
187+
chat_url,
188+
headers=headers,
189+
data=json.dumps(chat),
190+
timeout=aiohttp.ClientTimeout(total=300),
191+
) as response:
192+
return ctx.log_json(self.to_response(await self.response_json(response), chat, started_at))
193+
194+
ctx.add_provider(OpenRouterGenerator)
195+
196+
197+
__install__ = install
198+
```
199+
200+
This new implementation can be used by registering it as the **image** modality whose **npm** matches the providers **sdk** in `llms.json`, e.g:
201+
202+
```json
203+
{
204+
"openrouter": {
205+
"enabled": true,
206+
"id": "openrouter",
207+
"modalities": {
208+
"image": {
209+
"name": "OpenRouter Image",
210+
"npm": "openrouter/image"
211+
}
212+
}
213+
}
214+
}
215+
```
216+
217+
Find more Provider implementations in [providers](https://github.com/ServiceStack/llms/tree/main/llms/providers) folder.

content/docs/v3.mdx

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ Available extensions:
317317
318318
Usage:
319319
llms --add <extension>
320-
llms --add <github-user>/<repo>
320+
llms --add <github-user>/<repo>
321321
```
322322

323323
**Install an extension:**

0 commit comments

Comments
 (0)