Skip to content

Commit 8c7e644

Browse files
committed
update flow
1 parent d1d43af commit 8c7e644

File tree

3 files changed

+27
-23
lines changed

3 files changed

+27
-23
lines changed

veadk/agent.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ class Agent(LlmAgent):
121121

122122
enable_responses: bool = False
123123

124-
enable_shadow_agent: bool = False
124+
enable_supervisor_flow: bool = False
125125

126126
context_cache_config: Optional[ContextCacheConfig] = None
127127

@@ -306,15 +306,15 @@ def _llm_flow(self) -> BaseLlmFlow:
306306
):
307307
from veadk.flows.supervisor_single_flow import SupervisorSingleFlow
308308

309-
if self.enable_shadow_agent:
309+
if self.enable_supervisor_flow:
310310
logger.debug(f"Enable supervisor flow for agent: {self.name}")
311311
return SupervisorSingleFlow(supervised_agent=self)
312312
else:
313313
return SingleFlow()
314314
else:
315315
from veadk.flows.supervisor_auto_flow import SupervisorAutoFlow
316316

317-
if self.enable_shadow_agent:
317+
if self.enable_supervisor_flow:
318318
logger.debug(f"Enable supervisor flow for agent: {self.name}")
319319
return SupervisorAutoFlow(supervised_agent=self)
320320
return AutoFlow()

veadk/agents/supervise_agent.py

Lines changed: 11 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,41 +1,27 @@
11
from google.adk.models.llm_request import LlmRequest
22
from jinja2 import Template
3-
from pydantic import BaseModel
43

54
from veadk import Agent, Runner
5+
from veadk.utils.logger import get_logger
66

7-
8-
class SupervisorAgentOutput(BaseModel):
9-
advice: str = ""
10-
"""
11-
Advices for the worker agent.
12-
For example, suggested function call / actions / responses.
13-
"""
14-
7+
logger = get_logger(__name__)
158

169
instruction = Template("""You are a supervisor of an agent system. The system prompt of worker agent is:
1710
1811
```system prompt
1912
{{ system_prompt }}
2013
```
2114
22-
```worker agent tools
23-
{{ agent_tools }}
24-
```
25-
2615
You should guide the agent to finish task. If you think the history execution is not correct, you should give your advice to the worker agent. If you think the history execution is correct, you should output an empty string.
27-
28-
Your final response should be in `json` format.
2916
""")
3017

3118

3219
def build_supervisor(supervised_agent: Agent) -> Agent:
3320
custom_instruction = instruction.render(system_prompt=supervised_agent.instruction)
3421
agent = Agent(
3522
name="supervisor",
36-
description="",
23+
description="A supervisor for agent execution",
3724
instruction=custom_instruction,
38-
output_schema=SupervisorAgentOutput,
3925
)
4026

4127
return agent
@@ -55,4 +41,11 @@ async def generate_advice(agent: Agent, llm_request: LlmRequest) -> str:
5541
if part.function_response:
5642
messages += f"{content.role}: {part.function_response}"
5743

58-
return await runner.run(messages="History trajectory is: " + messages)
44+
prompt = (
45+
f"Tools of agent is {llm_request.tools_dict}. History trajectory is: "
46+
+ messages
47+
)
48+
49+
logger.debug(f"Prompt for supervisor: {prompt}")
50+
51+
return await runner.run(messages=prompt)

veadk/flows/supervisor_auto_flow.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@
1010
from veadk import Agent
1111
from veadk.agents.supervise_agent import generate_advice
1212
from veadk.flows.supervisor_single_flow import SupervisorSingleFlow
13+
from veadk.utils.logger import get_logger
14+
15+
logger = get_logger(__name__)
1316

1417

1518
class SupervisorAutoFlow(SupervisorSingleFlow):
@@ -24,9 +27,17 @@ async def _call_llm_async(
2427
model_response_event: Event,
2528
) -> AsyncGenerator[LlmResponse, None]:
2629
advice = await generate_advice(self._supervisor, llm_request)
27-
print(f"Advice: {advice}")
30+
logger.debug(f"Advice from supervisor: {advice}")
31+
32+
llm_request.contents.append(
33+
Content(
34+
parts=[Part(text=f"Message from your supervisor: {advice}")],
35+
role="user",
36+
)
37+
)
2838

29-
llm_request.contents.append(Content(parts=[Part(text=advice)], role="model"))
39+
print("====")
40+
print(llm_request)
3041

3142
async for llm_response in super()._call_llm_async(
3243
invocation_context, llm_request, model_response_event

0 commit comments

Comments
 (0)