Skip to content

Commit ebf5d8d

Browse files
Update to v0.1.1
1 parent 914ffc7 commit ebf5d8d

File tree

5 files changed

+91
-5
lines changed

5 files changed

+91
-5
lines changed

README.md

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ messages = [
2222
stream = T.clients.openrouter_request(
2323
client=client,
2424
messages=messages,
25-
model="qwen/qwen3-4b:free",
25+
model="mistralai/mistral-7b-instruct:free",
2626
)
2727

2828
print("assistant : ",end="")
@@ -31,6 +31,22 @@ for token, tool, tool_bool in T.handle_streaming(stream) :
3131
print(token, end="")
3232
```
3333

34+
To create a simple display using gradio as backend :
35+
```python
36+
import open_taranis as T
37+
import open_taranis.web_front as W
38+
39+
W.gr.ChatInterface(
40+
fn=W.chat_fn_gradio(
41+
client=T.clients.openrouter(api_key="api_key"),
42+
request=T.clients.openrouter_request,
43+
model="mistralai/mistral-7b-instruct:free",
44+
_system_prompt=""
45+
).create_fn(),
46+
title="web front"
47+
).launch()
48+
```
49+
3450
## Documentation :
3551

3652
- [Base of the docs](https://zanomega.com/open-taranis/) (coding some things before the real docs)
@@ -52,6 +68,7 @@ for token, tool, tool_bool in T.handle_streaming(stream) :
5268
- **v0.0.4** : Add **xai** and **groq** provider
5369
- **v0.0.6** : Add **huggingface** provider and args for **clients.veniceai_request**
5470
- **v0.1.0** : Start the **docs**, add **update-checker** and preparing for the continuation of the project...
71+
- **v0.1.1** : Code to deploy a **frontend with gradio** added (no complex logic at the moment, ex: tool_calls)
5572

5673
## Advanced Examples
5774

pyproject.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,10 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "open-taranis"
7-
version = "0.1.0"
7+
version = "0.1.1"
88
description = "Minimalist Python framework for AI agents logic-only coding with streaming, tool calls, and multi-LLM provider support"
99
authors = [{name = "SyntaxError4Life", email = "lilian@zanomega.com"}]
10-
dependencies = ["openai"]
10+
dependencies = ["openai","gradio"]
1111
readme = "README.md"
1212
requires-python = ">=3.8"
1313
license = {text = "GPL-3.0-or-later"}

src/open_taranis/__gradio__.py

Lines changed: 0 additions & 1 deletion
This file was deleted.

src/open_taranis/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import json
33
import re
44

5-
__version__ = "0.1.0"
5+
__version__ = "0.1.1"
66

77
import requests
88
from packaging import version

src/open_taranis/web_front.py

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
import gradio as gr
2+
import open_taranis as T
3+
4+
class chat_fn_gradio:
5+
def __init__(self,
6+
client:T.openai.OpenAI,
7+
request:T.openai.Stream,
8+
model:str,
9+
_system_prompt:str=""
10+
):
11+
12+
self.client:T.openai.OpenAI = client
13+
self.request:T.openai.Stream = request
14+
self.model = model
15+
self._system_prompt = [{"role":"system", "content":_system_prompt}]
16+
17+
self.meta = {
18+
"defaut_create_stream_used":False
19+
}
20+
self.memory = []
21+
22+
def create_stream(self, messages):
23+
"""
24+
TO IMPLEMENT
25+
26+
```
27+
if self.meta["defaut_create_stream_used"]==False : # Just used to detect, must be rewritten by the user
28+
print("Classic create_stream method used !")
29+
self.meta=True
30+
31+
return self.request(self.client,messages=messages,model=self.model)
32+
```
33+
34+
"""
35+
if self.meta["defaut_create_stream_used"]==False : # Just used to detect, must be rewritten by the user
36+
print("Classic create_stream method used !")
37+
self.meta=True
38+
39+
return self.request(self.client,messages=messages,model=self.model)
40+
41+
def create_fn(self):
42+
43+
# Gradio chat function
44+
# Gradio sends: message, history
45+
def fn(message, history, *args):
46+
47+
# Here we use our own internal memory rather than that of the gradio :
48+
self.memory.append(T.create_user_prompt(message))
49+
50+
stream = self.create_stream(
51+
self._system_prompt+self.memory # We make the system prompt adaptable and never at the beginning of the memory
52+
)
53+
54+
partial = ""
55+
token_nb = 0
56+
for token, _, _ in T.handle_streaming(stream):
57+
if token :
58+
partial += token
59+
token_nb += 1
60+
61+
yield f"""Tokens : {token_nb}
62+
Model : {self.model}
63+
64+
---
65+
66+
{partial}"""
67+
self.memory.append(T.create_assistant_response(partial))
68+
print(partial)
69+
return
70+
return fn

0 commit comments

Comments
 (0)