Skip to content

Commit 34d33bb

Browse files
committed
update docs
1 parent 9fba5d5 commit 34d33bb

File tree

11 files changed

+367
-12
lines changed

11 files changed

+367
-12
lines changed

ajet/tuner_lib/weight_tuner/experimental/as_oai_model_client.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,11 @@ async def _service_loop(self):
149149
continue
150150

151151
try:
152-
parsed_msg = InterchangeCompletionRequest.model_validate_json(data)
152+
try:
153+
parsed_msg = InterchangeCompletionRequest(**json.loads(data))
154+
except Exception as e:
155+
logger.error(f"Failed to parse SSE event data: {e}" + data)
156+
continue
153157

154158
result = await self.llm_infer(
155159
req=parsed_msg.completion_request,

ajet/tuner_lib/weight_tuner/experimental/as_oai_model_server.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,7 @@ async def chat_completions(request: Request, authorization: str = Header(None)):
250250
ajet_remote_handler_received[key][timeline_uuid] = int_req
251251

252252
# Wait for response (with periodic checks for client disconnect)
253-
max_wait_time = 1800 # 30 minutes timeout
253+
max_wait_time = 600 # 10 minutes timeout
254254
elapsed_time = 0
255255

256256
try:

ajet/utils/lowlevel_hook.py

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# def patch_task_creation():
2+
# pass
3+
4+
5+
import inspect
6+
import asyncio
7+
from functools import wraps
8+
9+
def patch_task_creation():
10+
# Hook asyncio.create_task
11+
original_create_task = asyncio.create_task
12+
13+
@wraps(original_create_task)
14+
def debug_create_task(coro, name=None, **kwargs):
15+
if not name:
16+
caller = inspect.stack()[1]
17+
coro_name = getattr(coro, '__name__', str(coro))
18+
name = f"DEBUG_{coro_name}_at_{caller.filename.split('/')[-1]}:{caller.lineno}"
19+
print(f"🎯 asyncio.create_task: {name}")
20+
return original_create_task(coro, name=name, **kwargs)
21+
22+
# Hook loop.create_task
23+
original_loop_create_task = asyncio.AbstractEventLoop.create_task
24+
25+
def debug_loop_create_task(self, coro, name=None, context=None):
26+
if not name:
27+
caller = inspect.stack()[1]
28+
coro_name = getattr(coro, '__name__', str(coro))
29+
name = f"DEBUG_{coro_name}_at_{caller.filename.split('/')[-1]}:{caller.lineno}"
30+
print(f"🎯 loop.create_task: {name}")
31+
return original_loop_create_task(self, coro, name=name, context=context)
32+
33+
# Hook Task.__init__
34+
original_task_init = asyncio.Task.__init__
35+
36+
def debug_task_init(self, coro, loop=None, name=None, context=None):
37+
if not name:
38+
caller = inspect.stack()[2] # 需要往上找更远的调用者
39+
coro_name = getattr(coro, '__name__', str(coro))
40+
name = f"DEBUG_{coro_name}_at_{caller.filename.split('/')[-1]}:{caller.lineno}"
41+
print(f"🎯 Task.__init__: {name}")
42+
return original_task_init(self, coro, loop=loop, name=name, context=context)
43+
44+
asyncio.create_task = debug_create_task
45+
asyncio.AbstractEventLoop.create_task = debug_loop_create_task
46+
47+
patch_task_creation()

docs/en/agent_framework_support.md

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
11
# Agent Framework Support
22

3+
AgentJet currently supports (and has been tested with) the following agentic frameworks.
34

45
<div class="card-grid">
5-
<a href="en/tune_your_first_agent/" class="feature-card">
6+
<a href="../support_agentscope" class="feature-card">
67
<div class="card-header"><img src="https://api.iconify.design/lucide:rocket.svg"
78
class="card-icon card-icon-agent" alt="">
89
<h3>AgentScope</h3>
@@ -11,7 +12,7 @@
1112
Agent-Oriented Programming for Building LLM Applications.
1213
</p>
1314
</a>
14-
<a href="#example-library" class="feature-card">
15+
<a href="../support_langchain" class="feature-card">
1516
<div class="card-header"><img src="https://api.iconify.design/lucide:library.svg"
1617
class="card-icon card-icon-general" alt="">
1718
<h3>LangChain</h3>
@@ -20,16 +21,16 @@
2021
LangChain provides the engineering platform and open source frameworks developers use to build, test, and deploy reliable AI agents.
2122
</p>
2223
</a>
23-
<a href="https://benchmark.agent-matrix.com/" class="feature-card">
24-
<div class="card-header"><img src="https://api.iconify.design/lucide:shield-check.svg" class="card-icon card-icon-tool"
24+
<a href="../support_oaisdk" class="feature-card">
25+
<div class="card-header"><img src="https://api.iconify.design/lucide:shield.svg" class="card-icon card-icon-tool"
2526
alt="">
2627
<h3>OpenAI SDK</h3>
2728
</div>
2829
<p class="card-desc">
2930
The OpenAI Agents SDK allows you to build agentic AI applications in a lightweight and easy-to-use package with minimal abstractions. By the way, both vLLM and SGLang offer compatible services.
3031
</p>
3132
</a>
32-
<a href="https://benchmark.agent-matrix.com/" class="feature-card">
33+
<a href="../support_http" class="feature-card">
3334
<div class="card-header"><img src="https://api.iconify.design/lucide:shield-check.svg" class="card-icon card-icon-tool"
3435
alt="">
3536
<h3>Raw HTTP</h3>
@@ -39,4 +40,15 @@
3940
in this AI era, you can always start from scratch and build your own "high-scrapers".
4041
</p>
4142
</a>
43+
<a href="../support_http" class="feature-card">
44+
<div class="card-header"><img src="https://api.iconify.design/lucide:rocket.svg" class="card-icon card-icon-tool"
45+
alt="">
46+
<h3>More Agentic Frameworks</h3>
47+
</div>
48+
<p class="card-desc">
49+
We are still testing all other agentic frameworks.
50+
Meanwhile, "Raw HTTP" methods can provide `base_url` and `api_key` as AgentJet endpoint,
51+
which theoretically can support most agentic frameworks.
52+
</p>
53+
</a>
4254
</div>

docs/en/support_agentscope.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ This article introduce the way to convert different types of ways to convert you
2323
)
2424
```
2525

26-
=== "After Convertion (`as_agentscope_model`)"
26+
=== "After Convertion (`as_agentscope_model()`)"
2727

2828
```python
2929
model = tuner.as_agentscope_model() # 🛩️ change here
@@ -35,7 +35,7 @@ This article introduce the way to convert different types of ways to convert you
3535
)
3636
```
3737

38-
=== "After Convertion (`as_agentscope_model`)"
38+
=== "After Convertion (`as_oai_baseurl_apikey()`)"
3939

4040
```python
4141
url_and_apikey = tuner.as_oai_baseurl_apikey()

docs/en/support_http.md

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
# Without Any Agentic Framework
2+
3+
Why use the Agent SDKs and all these abstractions? If you want to take control of the foundation of LLM Agents,
4+
in this AI era, you can always start from scratch and build your own "high-scrapers".
5+
6+
## Http
7+
8+
- use `tuner.as_oai_baseurl_apikey()` to obtain baseurl + apikey arguments
9+
10+
### Explain with examples
11+
12+
=== "Before Convertion"
13+
14+
```python
15+
# tuner to api key
16+
base_url = "https://openrouter.ai/api/v1"
17+
api_key = "sk-1234567"
18+
19+
# take out query
20+
query = workflow_task.task.main_query
21+
22+
messages = [
23+
{
24+
"role": "system",
25+
"content": self.system_prompt
26+
},
27+
{
28+
"role": "user",
29+
"content": query
30+
}
31+
]
32+
33+
# use raw http requests (non-streaming) to get response
34+
response = requests.post(
35+
f"{base_url}/chat/completions",
36+
json={
37+
"model": "fill_whatever_model", # Of course, this `model` field will be ignored.
38+
"messages": messages,
39+
},
40+
headers={
41+
"Authorization": f"Bearer {api_key}"
42+
}
43+
)
44+
final_answer = response.json()['choices'][0]['message']['content']
45+
```
46+
47+
=== "After Convertion (`as_oai_baseurl_apikey`)"
48+
49+
```python
50+
# tuner to api key
51+
url_and_apikey = tuner.as_oai_baseurl_apikey()
52+
base_url = url_and_apikey.base_url
53+
api_key = url_and_apikey.api_key
54+
55+
# take out query
56+
query = workflow_task.task.main_query
57+
58+
messages = [
59+
{
60+
"role": "system",
61+
"content": self.system_prompt
62+
},
63+
{
64+
"role": "user",
65+
"content": query
66+
}
67+
]
68+
69+
# use raw http requests (non-streaming) to get response
70+
response = requests.post(
71+
f"{base_url}/chat/completions",
72+
json={
73+
"model": "fill_whatever_model", # Of course, this `model` field will be ignored.
74+
"messages": messages,
75+
},
76+
headers={
77+
"Authorization": f"Bearer {api_key}"
78+
}
79+
)
80+
final_answer = response.json()['choices'][0]['message']['content']
81+
```
82+
83+
84+
85+
!!! warning ""
86+
- when you are using the `tuner.as_oai_baseurl_apikey()` api, you must enable the following feature in the yaml configuration.
87+
88+
```yaml
89+
90+
ajet:
91+
...
92+
enable_experimental_reverse_proxy: True
93+
...
94+
95+
```
96+
97+

docs/en/support_langchain.md

Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
# Supported Agent Frameworks: AgentScope
2+
3+
This article introduce the way to convert different types of ways to convert your existing workflows into AgentJet workflows.
4+
5+
6+
## AgentScope
7+
8+
1. use `tuner.as_oai_baseurl_apikey()` to override OpenAIChatModel's baseurl + apikey argument
9+
10+
### Explain with examples
11+
12+
=== "Before Convertion"
13+
14+
```python
15+
from langchain_openai import ChatOpenAI
16+
17+
18+
19+
20+
# create openai model
21+
llm = ChatOpenAI(
22+
model="gpt-5",
23+
)
24+
agent=create_agent(
25+
model=llm,
26+
system_prompt=self.system_prompt,
27+
)
28+
29+
# take out query
30+
query = workflow_task.task.main_query
31+
32+
response = agent.invoke({
33+
"messages": [
34+
{
35+
"role": "user",
36+
"content": query
37+
}
38+
],
39+
})
40+
```
41+
42+
=== "After Convertion (`as_oai_baseurl_apikey`)"
43+
44+
```python
45+
from langchain_openai import ChatOpenAI
46+
47+
url_and_apikey = tuner.as_oai_baseurl_apikey()
48+
base_url = url_and_apikey.base_url
49+
api_key = url_and_apikey.api_key
50+
51+
llm = ChatOpenAI(
52+
model="whatever",
53+
base_url=base_url,
54+
api_key=lambda:api_key,
55+
)
56+
agent = create_agent(
57+
model=llm,
58+
system_prompt=self.system_prompt,
59+
)
60+
61+
# take out query
62+
query = workflow_task.task.main_query
63+
64+
response = agent.invoke({
65+
"messages": [
66+
{
67+
"role": "user",
68+
"content": query
69+
}
70+
],
71+
})
72+
```
73+
74+
75+
76+
!!! warning ""
77+
- when you are using the `tuner.as_oai_baseurl_apikey()` api, you must enable the following feature in the yaml configuration.
78+
79+
```yaml
80+
81+
ajet:
82+
...
83+
enable_experimental_reverse_proxy: True
84+
...
85+
86+
```
87+
88+

0 commit comments

Comments
 (0)