Skip to content

Commit cb42b70

Browse files
Update to v0.0.4
1 parent af2e3dc commit cb42b70

File tree

3 files changed

+65
-15
lines changed

3 files changed

+65
-15
lines changed

README.md

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,12 +35,14 @@ for token, tool, tool_bool in T.handle_streaming(stream) :
3535

3636
```bash
3737
.
38-
├── __version__ = "0.0.3_genesis"
38+
├── __version__ = "0.0.4_genesis"
3939
4040
├── clients
4141
│ ├── veniceai(api_key:str) -> openai.OpenAI
4242
│ ├── deepseek(api_key:str) -> openai.OpenAI
4343
│ ├── openrouter(api_key:str) -> openai.OpenAI
44+
│ ├── xai(api_key:str) -> openai.OpenAI
45+
│ ├── groq(api_key:str) -> openai.OpenAI
4446
│ │
4547
│ ├── veniceai_request(client:openai.OpenAI, messages:list[dict], model:str, temperature:float, max_tokens:int, tools: list[dict], include_venice_system_prompt:bool=False, **kwargs) -> openai.Stream
4648
│ ├── generic_request(client:openai.OpenAI, messages:list[dict], model:str, temperature:float, max_tokens:int, tools:list[dict], **kwargs) -> openai.Stream
@@ -69,6 +71,11 @@ for token, tool, tool_bool in T.handle_streaming(stream) :
6971
- [ ] v1.x.x: Reduce dependencies to Python for Rust backend
7072
- [ ] v2.0.0: Backend totally in Rust
7173

74+
## Changelog
75+
76+
#### **v0.0.4** :
77+
- Add **xai** and **groq** provider
78+
7279
## Advanced Examples
7380

7481
- [tools call in a JSON database with Qwen3 4b](https://github.com/SyntaxError4Life/open-taranis/blob/main/examples/test_json_database.py)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "open-taranis"
7-
version = "0.0.3"
7+
version = "0.0.4"
88
description = "Minimalist Python framework for AI agents logic-only coding with streaming, tool calls, and multi-LLM provider support"
99
authors = [{name = "SyntaxError4Life", email = "lilian@zanomega.com"}]
1010
dependencies = ["openai"]

src/open_taranis/__init__.py

Lines changed: 56 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,21 +2,56 @@
22
import json
33
import re
44

5-
__version__ = "0.0.3_genesis"
5+
__version__ = "0.0.4_genesis"
66

77
class clients:
8+
9+
# ==============================
10+
# The clients with their URL
11+
# ==============================
12+
813
@staticmethod
914
def veniceai(api_key: str) -> openai.OpenAI:
15+
"""
16+
Use `clients.veniceai_request` for call
17+
"""
1018
return openai.OpenAI(api_key=api_key, base_url="https://api.venice.ai/api/v1")
1119

1220
@staticmethod
1321
def deepseek(api_key: str) -> openai.OpenAI:
22+
"""
23+
Use `clients.generic_request` for call
24+
"""
1425
return openai.OpenAI(api_key=api_key, base_url="https://api.deepseek.com")
1526

1627
@staticmethod
1728
def openrouter(api_key: str) -> openai.OpenAI:
29+
"""
30+
Use `clients.openrouter_request` for call
31+
"""
1832
return openai.OpenAI(api_key=api_key, base_url="https://openrouter.ai/api/v1")
1933

34+
@staticmethod
35+
def xai(api_key: str) -> openai.OpenAI:
36+
"""
37+
Use `clients.generic_request` for call
38+
"""
39+
return openai.OpenAI(api_key=api_key, base_url="https://api.x.ai/v1")
40+
41+
@staticmethod
42+
def groq(api_key: str) -> openai.OpenAI:
43+
"""
44+
Use `clients.generic_request` for call
45+
"""
46+
return openai.OpenAI(api_key=api_key, base_url="https://api.groq.com/openai/v1")
47+
48+
49+
# ==============================
50+
# Customers for calls with their specifications
51+
#
52+
# Like "include_venice_system_prompt" for venice.ai or custom app for openrouter
53+
# ==============================
54+
2055
@staticmethod
2156
def veniceai_request(client: openai.OpenAI, messages: list[dict], model:str="defaut", temperature:float=0.7, max_tokens:int=4096, tools:list[dict]=None, include_venice_system_prompt:bool=False, **kwargs) -> openai.Stream:
2257
base_params = {
@@ -48,9 +83,9 @@ def veniceai_request(client: openai.OpenAI, messages: list[dict], model:str="def
4883
params = {**base_params, **tool_params, **venice_params}
4984

5085
return client.chat.completions.create(**params)
51-
86+
5287
@staticmethod
53-
def generic_request(client: openai.OpenAI, messages: list[dict], model:str="defaut", temperature:float=0.7, max_tokens:int=4096, tools:list[dict]=None, **kwargs) -> openai.Stream:
88+
def openrouter_request(client: openai.OpenAI, messages: list[dict], model:str="defaut", temperature:float=0.7, max_tokens:int=4096, tools:list[dict]=None, **kwargs) -> openai.Stream:
5489
base_params = {
5590
"model": model,
5691
"messages": messages,
@@ -69,10 +104,16 @@ def generic_request(client: openai.OpenAI, messages: list[dict], model:str="defa
69104

70105
params = {**base_params, **tool_params}
71106

72-
return client.chat.completions.create(**params)
107+
return client.chat.completions.create(
108+
**params,
109+
extra_headers={
110+
"HTTP-Referer": "https://zanomega.com/",
111+
"X-Title": "Zanomega/open-taranis"
112+
}
113+
)
73114

74115
@staticmethod
75-
def openrouter_request(client: openai.OpenAI, messages: list[dict], model:str="defaut", temperature:float=0.7, max_tokens:int=4096, tools:list[dict]=None, **kwargs) -> openai.Stream:
116+
def generic_request(client: openai.OpenAI, messages: list[dict], model:str="defaut", temperature:float=0.7, max_tokens:int=4096, tools:list[dict]=None, **kwargs) -> openai.Stream:
76117
base_params = {
77118
"model": model,
78119
"messages": messages,
@@ -91,13 +132,11 @@ def openrouter_request(client: openai.OpenAI, messages: list[dict], model:str="d
91132

92133
params = {**base_params, **tool_params}
93134

94-
return client.chat.completions.create(
95-
**params,
96-
extra_headers={
97-
"HTTP-Referer": "https://zanomega.com/",
98-
"X-Title": "Zanomega/open-taranis"
99-
}
100-
)
135+
return client.chat.completions.create(**params)
136+
137+
# ==============================
138+
# Functions for the streaming
139+
# ==============================
101140

102141
def handle_streaming(stream: openai.Stream):
103142
"""
@@ -199,7 +238,11 @@ def handle_tool_call(tool_call:dict) -> tuple[str, str, dict, str] :
199238

200239
return fid, fname, args, ""
201240

202-
def create_assistant_response(content:str, tool_calls:list[dict]=None) -> dict:
241+
# ==============================
242+
# Functions to simplify the messages roles
243+
# ==============================
244+
245+
def create_assistant_response(content:str, tool_calls:list[dict]=None) -> dict[str, str]:
203246
"""
204247
Creates an assistant message, optionally with tool calls.
205248

0 commit comments

Comments
 (0)