@@ -38,40 +38,43 @@ for token, tool, tool_bool in T.handle_streaming(stream) :
3838├── __version__ = "0.0.3_genesis"
3939│
4040├── clients
41- │ ├── veniceai(api_key:str) -> openai.OpenAI
42- │ ├── deepseek(api_key:str) -> openai.OpenAI
43- │ ├── openrouter(api_key:str) -> openai.OpenAI
41+ │ ├── veniceai(api_key: str) -> openai.OpenAI
42+ │ ├── deepseek(api_key: str) -> openai.OpenAI
43+ │ ├── openrouter(api_key: str) -> openai.OpenAI
4444│ │
45- │ ├── veniceai_request(client:openai.OpenAI, messages:list[dict], model:str, temperature:float, max_tokens:int, tools: list[dict], include_venice_system_prompt:bool=False, **kwargs) -> openai.Stream
46- │ ├── generic_request(client:openai.OpenAI, messages:list[dict], model:str, temperature:float, max_tokens:int, tools:list[dict], **kwargs) -> openai.Stream
47- │ └── openrouter_request(client:openai.OpenAI, messages:list[dict], model:str, temperature:float, max_tokens:int, tools:list[dict], **kwargs) -> openai.Stream
45+ │ ├── veniceai_request(client:openai.OpenAI, messages:list[dict], model:str, temperature:float,
46+ │ │ max_tokens:int, tools: list[dict], include_venice_system_prompt:bool=False, **kwargs) -> openai.Stream
47+ │ ├── generic_request(client:openai.OpenAI, messages:list[dict], model:str, temperature:float,
48+ │ │ max_tokens:int, tools:list[dict], **kwargs) -> openai.Stream
49+ │ └── openrouter_request(client:openai.OpenAI, messages:list[dict], model:str, temperature:float,
50+ │ max_tokens:int, tools:list[dict], **kwargs) -> openai.Stream
4851│
49- ├── handle_streaming(stream:openai.Stream) -> generator(token:str| None, tool:list, tool_bool:bool)
50- ├── handle_tool_call(tool_call:dict) -> tuple[str, str, dict, str]
52+ ├── handle_streaming(stream: openai.Stream) -> generator(token: str or None, tool: list, tool_bool: bool)
53+ ├── handle_tool_call(tool_call: dict) -> tuple[str, str, dict, str]
5154│
52- ├── create_assistant_response(content:str, tool_calls:list[dict]) -> dict
53- ├── create_function_response(id:str, result:str, name:str) -> dict[str, str, str]
54- ├── create_system_prompt(content:str) -> dict[str, str]
55- └── create_user_prompt(content:str) -> dict[str, str]
55+ ├── create_assistant_response(content: str, tool_calls: list[dict]=None ) -> dict
56+ ├── create_function_response(id: str, result: str, name: str) -> dict
57+ ├── create_system_prompt(content: str) -> dict[str, str]
58+ └── create_user_prompt(content: str) -> dict[str, str]
5659```
5760
5861## Roadmap
5962
60- - [ ] v0.0.1: start
61- - [X ] v0.0.x: Add and confirm other API providers
62- - [X ] v0.1.x: Functionality verifications
63- - [X ] > v0.2.0: Add features for ** logic-only coding** approach
64- - [X ] v0.6.x: Add llama.cpp as backend in addition to APIs
65- - [X ] v0.7.x: Add reverse proxy + server to create a dedicated full relay/backend (like OpenRouter), framework usable as server and client
66- - [X ] v0.8.x: Add PyTorch as backend with ` transformers ` to deploy a remote server
67- - [X ] > v0.9.0: Total reduction of dependencies for built-in functions (unless counter-optimizations)
68- - [X ] v1.0.0: First complete version in Python without dependencies
69- - [X ] v1.x.x: Reduce dependencies to Python for Rust backend
70- - [X ] v2.0.0: Backend totally in Rust
63+ - [X ] v0.0.1: start
64+ - [ ] v0.0.x: Add and confirm other API providers
65+ - [ ] v0.1.x: Functionality verifications
66+ - [ ] > v0.2.0: Add features for ** logic-only coding** approach
67+ - [ ] v0.6.x: Add llama.cpp as backend in addition to APIs
68+ - [ ] v0.7.x: Add reverse proxy + server to create a dedicated full relay/backend (like OpenRouter), framework usable as server and client
69+ - [ ] v0.8.x: Add PyTorch as backend with ` transformers ` to deploy a remote server
70+ - [ ] > v0.9.0: Total reduction of dependencies for built-in functions (unless counter-optimizations)
71+ - [ ] v1.0.0: First complete version in Python without dependencies
72+ - [ ] v1.x.x: Reduce dependencies to Python for Rust backend
73+ - [ ] v2.0.0: Backend totally in Rust
7174
7275## Advanced Examples
7376
74- - [ tools call in a JSON database with Qwen3 4b] ( https://github.com/SyntaxError4Life/open-taranis/examples/test_json_database.py )
77+ - [ tools call in a JSON database with Qwen3 4b] ( https://github.com/SyntaxError4Life/open-taranis/blob/main/ examples/test_json_database.py )
7578
7679## Links
7780
0 commit comments