Skip to content

Commit 2f34a73

Browse files
Update to v0.2.4
1 parent ba2f005 commit 2f34a73

File tree

5 files changed

+78
-119
lines changed

5 files changed

+78
-119
lines changed

README.md

Lines changed: 28 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -44,27 +44,6 @@ for token, tool, tool_bool in T.handle_streaming(stream) :
4444
```
4545
</details>
4646

47-
<details><summary><b>To create a simple display using gradio as backend</b></summary>
48-
49-
```python
50-
import open_taranis as T
51-
import open_taranis.web_front as W
52-
import gradio as gr
53-
54-
gr.ChatInterface(
55-
fn=W.chat_fn_gradio(
56-
client=T.clients.openrouter(), # API_KEY in env_var
57-
request=T.clients.openrouter_request,
58-
model="nvidia/nemotron-3-nano-30b-a3b:free",
59-
_system_prompt="You are an agent named **Taranis**"
60-
).create_fn(),
61-
title="web front"
62-
).launch()
63-
```
64-
</details>
65-
66-
67-
6847
<details><summary><b>Make a simple agent with a context windows on the 6 last turns</b></summary>
6948

7049
```python
@@ -102,6 +81,33 @@ while True :
10281

10382
print("\n\n","="*60,"\n")
10483
```
84+
</details>
85+
86+
<details><summary><b>To create a simple display using gradio as backend</b></summary>
87+
88+
```python
89+
import open_taranis as T
90+
import open_taranis.web_front as W
91+
import gradio as gr
92+
93+
class Gradio_agent(T.agent_base):
94+
def __init__(self):
95+
super().__init__()
96+
97+
self._system_prompt = [T.create_system_prompt("You are a agent nammed **Taranis**")]
98+
99+
def create_stream(self):
100+
return T.clients.openrouter_request(
101+
client=T.clients.openrouter(),
102+
messages=self._system_prompt+self.messages,
103+
model="nvidia/nemotron-3-nano-30b-a3b:free"
104+
)
105+
106+
gr.ChatInterface(
107+
fn=W.create_fn_gradio(Gradio_agent()),
108+
title="Open-taranis Agent"
109+
).launch()
110+
```
105111
</details>
106112

107113
---
@@ -156,6 +162,7 @@ Available in [French](https://zanomega.com/open-taranis/fr/)
156162
- **v0.2.1** : Updated `agent_base` and added a more concrete example of agents
157163
- **v0.2.2** : Upgraded all the code to add [**Kimi Code**](https://www.kimi.com/code) as client and reduce code (**Not official !**)
158164
- **v0.2.3** : Updated `agent_base`, add some functions and add a **cool** agent
165+
- **v0.2.4** : Improved CoT techniques and updated `web_front.py`, deploy an agent to the browser in a few lines
159166
</details>
160167

161168

examples/infinite_agent_v1.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,9 @@
66
REQUEST = T.clients.openrouter_request
77
MAX_TOKENS = 16000
88

9-
# Made with v0.2.3
9+
# Made with v0.2.4
10+
if T.__version__ < "0.2.4":
11+
exit(f"Version v0.2.4 minimum required, you have v{T.__version__}")
1012

1113
# =========================================
1214

@@ -61,13 +63,11 @@ def manage_messages_after_reply(self):
6163
is_thinking = False
6264
new_memory = ""
6365
for token, _, _ in T.handle_streaming(stream):
64-
is_thinking = T.handle_thinking(token, is_thinking)
66+
is_thinking, norm_tok, cot_tok = T.handle_thinking(token, is_thinking)
6567

66-
if is_thinking :
67-
continue
68-
else :
69-
new_memory += token
70-
68+
if norm_tok :
69+
new_memory += norm_tok
70+
7171
self.memory = T.remove_thinks(new_memory)
7272
self._system_prompt = [T.create_system_prompt(
7373
self.system_prompt + self.memory
@@ -93,4 +93,4 @@ def manage_messages_after_reply(self):
9393
for t in My_agent(prompt):
9494
print(t, end="", flush=True)
9595

96-
print("\n\n","="*60,"\n")
96+
print("\n\n","="*60,"\n")

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
44

55
[project]
66
name = "open-taranis"
7-
version = "0.2.3"
7+
version = "0.2.4"
88
description = "Python framework for AI agents logic-only coding with streaming, tool calls, and multi-LLM provider support"
99
authors = [{name = "SyntaxError4Life", email = "contact@zanomega.com"}]
1010
dependencies = ["requests", "packaging", "openai", "bs4"]

src/open_taranis/__init__.py

Lines changed: 24 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
import inspect
88
from typing import Any, Callable, Literal, Union, get_args, get_origin
99

10-
__version__ = "0.2.3"
10+
__version__ = "0.2.4"
1111

1212
import requests
1313
from packaging import version
@@ -395,14 +395,17 @@ def handle_tool_call(tool_call:dict) -> tuple[str, str, dict, str] :
395395
def functions_to_tools(funcs: list[Callable]) -> list[dict[str, Any]]:
396396
return [utils.function_to_openai_tool(f) for f in funcs]
397397

398-
def handle_thinking(token, is_thinking):
399-
if "<think>" in token or is_thinking :
400-
is_thinking=True
398+
def handle_thinking(TOKEN, is_thinking):
399+
token, CoT = TOKEN, None
401400

402-
if "</think>" in token :
403-
is_thinking=False
401+
if "<think>" in TOKEN or is_thinking :
402+
token, CoT = None, TOKEN
403+
is_thinking = True
404+
405+
if "</think>" in TOKEN :
406+
is_thinking = False
404407

405-
return is_thinking
408+
return is_thinking, token, CoT
406409

407410
def remove_thinks(message:str):
408411
assert type(message) == str
@@ -500,9 +503,6 @@ def manage_assistant_response(self, response):
500503
def manage_messages_in_reply(self):
501504
"""
502505
Function to manage message history, executed at each step (after agent response or tool call)
503-
504-
505-
```
506506
"""
507507
pass
508508

@@ -547,25 +547,21 @@ def __call__(self, prompt):
547547
response = ""
548548
reasoning = ""
549549
for token, tool_calls, run in handle_streaming(self.create_stream()) :
550-
if token :
551-
552-
if "<think>" in token or is_thinking :
553-
is_thinking=True
554-
555-
if "</think>" in token :
556-
is_thinking=False
557-
558-
if self.meta["is_thinking_enabled"] :
559-
reasoning += token
560-
else :
561-
response += token
562-
563-
if self.meta["yield_thinking"]:
564-
yield self.manage_token_yield(token, is_thinking)
565-
550+
is_thinking, token, CoT = handle_thinking(token, is_thinking)
551+
552+
if is_thinking:
553+
if self.meta["is_thinking_enabled"]:
554+
reasoning += CoT
566555
else :
567-
yield self.manage_token_yield(token, is_thinking)
568-
response += token
556+
response += CoT
557+
558+
if self.meta["yield_thinking"]:
559+
yield self.manage_token_yield(token, is_thinking=True)
560+
561+
else :
562+
yield self.manage_token_yield(token, is_thinking=False)
563+
if token : response += token
564+
569565

570566
if run:
571567

src/open_taranis/web_front.py

Lines changed: 17 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1,63 +1,19 @@
11
import open_taranis as T
22

3-
class chat_fn_gradio:
4-
def __init__(self,
5-
client:T.openai.OpenAI,
6-
request:T.openai.Stream,
7-
model:str,
8-
_system_prompt:str=""
9-
):
10-
11-
self.client:T.openai.OpenAI = client
12-
self.request:T.openai.Stream = request
13-
self.model = model
14-
self._system_prompt = [{"role":"system", "content":_system_prompt}]
15-
16-
def create_stream(self, messages):
17-
"""
18-
TO IMPLEMENT
19-
"""
20-
21-
return self.request(
22-
self.client,
23-
messages=self._system_prompt+messages,
24-
model=self.model
25-
)
26-
27-
def create_fn(self):
28-
29-
# Gradio chat function
30-
# Gradio sends: message, history
31-
def fn(message, history, *args):
32-
33-
messages=[]
34-
35-
for user, assistant in history :
36-
messages.append(T.create_user_prompt(user))
37-
messages.append(T.create_assistant_response(assistant))
38-
messages.append(T.create_user_prompt(message))
39-
40-
stream = self.create_stream(
41-
messages=messages
42-
)
43-
44-
partial = ""
45-
is_thinking = False
46-
47-
for token, _, _ in T.handle_streaming(stream):
48-
if token :
49-
50-
if "<think>" in token or is_thinking :
51-
is_thinking = True
52-
53-
if "</think>" in token :
54-
is_thinking = False
55-
56-
yield "Thinking...."
57-
continue
58-
59-
else : partial += token
60-
61-
yield partial
62-
return
63-
return fn
3+
def create_fn_gradio(Agent):
4+
5+
# Gradio chat function
6+
# Gradio sends: message, history
7+
def fn(message, history, *args):
8+
9+
partial = ""
10+
for token, is_thinking in Agent(message):
11+
if is_thinking :
12+
yield partial + "\nThinking...."
13+
continue
14+
15+
if token : partial += token
16+
yield partial
17+
18+
return
19+
return fn

0 commit comments

Comments
 (0)