Skip to content

Commit cb0fa20

Browse files
committed
Add Comprehensive Debug Logging to LLM and Agent Components
- Enhanced LLM and Agent classes with detailed debug logging capabilities - Added parameter and configuration logging for get_response and chat methods - Implemented time tracking for method execution in debug mode - Masked sensitive information like API keys in logs - Created new Wikipedia agent example script demonstrating agent initialization
1 parent 9db7033 commit cb0fa20

File tree

3 files changed

+253
-1
lines changed

3 files changed

+253
-1
lines changed
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from praisonaiagents import Agent, Task, PraisonAIAgents
2+
from praisonaiagents.tools import wiki_search, wiki_summary, wiki_page, wiki_random, wiki_language
3+
4+
agent = Agent(
5+
instructions="You are a Wikipedia Agent",
6+
tools=[wiki_search, wiki_summary, wiki_page, wiki_random, wiki_language],
7+
llm="openai/gpt-4o-mini",
8+
verbose=10
9+
)
10+
agent.start("history of AI")

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 67 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -714,6 +714,22 @@ def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, r
714714
return None
715715

716716
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
717+
# Log all parameter values when in debug mode
718+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
719+
param_info = {
720+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
721+
"temperature": temperature,
722+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
723+
"output_json": str(output_json.__class__.__name__) if output_json else None,
724+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
725+
"reasoning_steps": reasoning_steps,
726+
"agent_name": self.name,
727+
"agent_role": self.role,
728+
"agent_goal": self.goal
729+
}
730+
logging.debug(f"Agent.chat parameters: {json.dumps(param_info, indent=2, default=str)}")
731+
732+
start_time = time.time()
717733
reasoning_steps = reasoning_steps or self.reasoning_steps
718734
# Search for existing knowledge if any knowledge is provided
719735
if self.knowledge:
@@ -757,6 +773,11 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
757773
self.chat_history.append({"role": "user", "content": prompt})
758774
self.chat_history.append({"role": "assistant", "content": response_text})
759775

776+
# Log completion time if in debug mode
777+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
778+
total_time = time.time() - start_time
779+
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
780+
760781
return response_text
761782
except Exception as e:
762783
display_error(f"Error in LLM chat: {e}")
@@ -944,6 +965,13 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
944965
display_error(f"Error in chat: {e}", console=self.console)
945966
return None
946967

968+
# Log completion time if in debug mode
969+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
970+
total_time = time.time() - start_time
971+
logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
972+
973+
return response_text
974+
947975
def clean_json_output(self, output: str) -> str:
948976
"""Clean and extract JSON from response text."""
949977
cleaned = output.strip()
@@ -958,6 +986,22 @@ def clean_json_output(self, output: str) -> str:
958986

959987
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False):
960988
"""Async version of chat method. TODO: Requires Syncing with chat method."""
989+
# Log all parameter values when in debug mode
990+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
991+
param_info = {
992+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
993+
"temperature": temperature,
994+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
995+
"output_json": str(output_json.__class__.__name__) if output_json else None,
996+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
997+
"reasoning_steps": reasoning_steps,
998+
"agent_name": self.name,
999+
"agent_role": self.role,
1000+
"agent_goal": self.goal
1001+
}
1002+
logging.debug(f"Agent.achat parameters: {json.dumps(param_info, indent=2, default=str)}")
1003+
1004+
start_time = time.time()
9611005
reasoning_steps = reasoning_steps or self.reasoning_steps
9621006
try:
9631007
# Search for existing knowledge if any knowledge is provided
@@ -996,9 +1040,15 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
9961040
self.chat_history.append({"role": "user", "content": prompt})
9971041
self.chat_history.append({"role": "assistant", "content": response_text})
9981042

1043+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1044+
total_time = time.time() - start_time
1045+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
9991046
return response_text
10001047
except Exception as e:
10011048
display_error(f"Error in LLM chat: {e}")
1049+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1050+
total_time = time.time() - start_time
1051+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
10021052
return None
10031053

10041054
# For OpenAI client
@@ -1081,7 +1131,11 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
10811131
temperature=temperature,
10821132
tools=formatted_tools
10831133
)
1084-
return await self._achat_completion(response, tools)
1134+
result = await self._achat_completion(response, tools)
1135+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1136+
total_time = time.time() - start_time
1137+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1138+
return result
10851139
elif output_json or output_pydantic:
10861140
response = await async_client.chat.completions.create(
10871141
model=self.llm,
@@ -1090,19 +1144,31 @@ async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None
10901144
response_format={"type": "json_object"}
10911145
)
10921146
# Return the raw response
1147+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1148+
total_time = time.time() - start_time
1149+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
10931150
return response.choices[0].message.content
10941151
else:
10951152
response = await async_client.chat.completions.create(
10961153
model=self.llm,
10971154
messages=messages,
10981155
temperature=temperature
10991156
)
1157+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1158+
total_time = time.time() - start_time
1159+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
11001160
return response.choices[0].message.content
11011161
except Exception as e:
11021162
display_error(f"Error in chat completion: {e}")
1163+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1164+
total_time = time.time() - start_time
1165+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
11031166
return None
11041167
except Exception as e:
11051168
display_error(f"Error in achat: {e}")
1169+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1170+
total_time = time.time() - start_time
1171+
logging.debug(f"Agent.achat failed in {total_time:.2f} seconds: {str(e)}")
11061172
return None
11071173

11081174
async def _achat_completion(self, response, tools, reasoning_steps=False):

src/praisonai-agents/praisonaiagents/llm/llm.py

Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,36 @@ def __init__(
172172
# Enable error dropping for cleaner output
173173
litellm.drop_params = True
174174
self._setup_event_tracking(events)
175+
176+
# Log all initialization parameters when in debug mode
177+
if not isinstance(verbose, bool) and verbose >= 10:
178+
debug_info = {
179+
"model": self.model,
180+
"timeout": self.timeout,
181+
"temperature": self.temperature,
182+
"top_p": self.top_p,
183+
"n": self.n,
184+
"max_tokens": self.max_tokens,
185+
"presence_penalty": self.presence_penalty,
186+
"frequency_penalty": self.frequency_penalty,
187+
"logit_bias": self.logit_bias,
188+
"response_format": self.response_format,
189+
"seed": self.seed,
190+
"logprobs": self.logprobs,
191+
"top_logprobs": self.top_logprobs,
192+
"api_version": self.api_version,
193+
"stop_phrases": self.stop_phrases,
194+
"api_key": "***" if self.api_key else None, # Mask API key for security
195+
"base_url": self.base_url,
196+
"verbose": self.verbose,
197+
"markdown": self.markdown,
198+
"self_reflect": self.self_reflect,
199+
"max_reflect": self.max_reflect,
200+
"min_reflect": self.min_reflect,
201+
"reasoning_steps": self.reasoning_steps,
202+
"extra_settings": {k: v for k, v in self.extra_settings.items() if k not in ["api_key"]}
203+
}
204+
logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
175205

176206
def get_response(
177207
self,
@@ -195,6 +225,56 @@ def get_response(
195225
**kwargs
196226
) -> str:
197227
"""Enhanced get_response with all OpenAI-like features"""
228+
logging.info(f"Getting response from {self.model}")
229+
# Log all self values when in debug mode
230+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
231+
debug_info = {
232+
"model": self.model,
233+
"timeout": self.timeout,
234+
"temperature": self.temperature,
235+
"top_p": self.top_p,
236+
"n": self.n,
237+
"max_tokens": self.max_tokens,
238+
"presence_penalty": self.presence_penalty,
239+
"frequency_penalty": self.frequency_penalty,
240+
"logit_bias": self.logit_bias,
241+
"response_format": self.response_format,
242+
"seed": self.seed,
243+
"logprobs": self.logprobs,
244+
"top_logprobs": self.top_logprobs,
245+
"api_version": self.api_version,
246+
"stop_phrases": self.stop_phrases,
247+
"api_key": "***" if self.api_key else None, # Mask API key for security
248+
"base_url": self.base_url,
249+
"verbose": self.verbose,
250+
"markdown": self.markdown,
251+
"self_reflect": self.self_reflect,
252+
"max_reflect": self.max_reflect,
253+
"min_reflect": self.min_reflect,
254+
"reasoning_steps": self.reasoning_steps
255+
}
256+
logging.debug(f"LLM instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
257+
258+
# Log the parameter values passed to get_response
259+
param_info = {
260+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
261+
"system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
262+
"chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
263+
"temperature": temperature,
264+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
265+
"output_json": str(output_json.__class__.__name__) if output_json else None,
266+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
267+
"verbose": verbose,
268+
"markdown": markdown,
269+
"self_reflect": self_reflect,
270+
"max_reflect": max_reflect,
271+
"min_reflect": min_reflect,
272+
"agent_name": agent_name,
273+
"agent_role": agent_role,
274+
"agent_tools": agent_tools,
275+
"kwargs": str(kwargs)
276+
}
277+
logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
198278
try:
199279
import litellm
200280
# This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
@@ -552,6 +632,11 @@ def get_response(
552632
except Exception as error:
553633
display_error(f"Error in get_response: {str(error)}")
554634
raise
635+
636+
# Log completion time if in debug mode
637+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
638+
total_time = time.time() - start_time
639+
logging.debug(f"get_response completed in {total_time:.2f} seconds")
555640

556641
async def get_response_async(
557642
self,
@@ -577,6 +662,56 @@ async def get_response_async(
577662
"""Async version of get_response with identical functionality."""
578663
try:
579664
import litellm
665+
logging.info(f"Getting async response from {self.model}")
666+
# Log all self values when in debug mode
667+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
668+
debug_info = {
669+
"model": self.model,
670+
"timeout": self.timeout,
671+
"temperature": self.temperature,
672+
"top_p": self.top_p,
673+
"n": self.n,
674+
"max_tokens": self.max_tokens,
675+
"presence_penalty": self.presence_penalty,
676+
"frequency_penalty": self.frequency_penalty,
677+
"logit_bias": self.logit_bias,
678+
"response_format": self.response_format,
679+
"seed": self.seed,
680+
"logprobs": self.logprobs,
681+
"top_logprobs": self.top_logprobs,
682+
"api_version": self.api_version,
683+
"stop_phrases": self.stop_phrases,
684+
"api_key": "***" if self.api_key else None, # Mask API key for security
685+
"base_url": self.base_url,
686+
"verbose": self.verbose,
687+
"markdown": self.markdown,
688+
"self_reflect": self.self_reflect,
689+
"max_reflect": self.max_reflect,
690+
"min_reflect": self.min_reflect,
691+
"reasoning_steps": self.reasoning_steps
692+
}
693+
logging.debug(f"LLM async instance configuration: {json.dumps(debug_info, indent=2, default=str)}")
694+
695+
# Log the parameter values passed to get_response_async
696+
param_info = {
697+
"prompt": str(prompt)[:100] + "..." if isinstance(prompt, str) and len(str(prompt)) > 100 else str(prompt),
698+
"system_prompt": system_prompt[:100] + "..." if system_prompt and len(system_prompt) > 100 else system_prompt,
699+
"chat_history": f"[{len(chat_history)} messages]" if chat_history else None,
700+
"temperature": temperature,
701+
"tools": [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools] if tools else None,
702+
"output_json": str(output_json.__class__.__name__) if output_json else None,
703+
"output_pydantic": str(output_pydantic.__class__.__name__) if output_pydantic else None,
704+
"verbose": verbose,
705+
"markdown": markdown,
706+
"self_reflect": self_reflect,
707+
"max_reflect": max_reflect,
708+
"min_reflect": min_reflect,
709+
"agent_name": agent_name,
710+
"agent_role": agent_role,
711+
"agent_tools": agent_tools,
712+
"kwargs": str(kwargs)
713+
}
714+
logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
580715
reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
581716
litellm.set_verbose = False
582717

@@ -983,6 +1118,11 @@ async def get_response_async(
9831118
raise LLMContextLengthExceededException(str(error))
9841119
display_error(f"Error in get_response_async: {str(error)}")
9851120
raise
1121+
1122+
# Log completion time if in debug mode
1123+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1124+
total_time = time.time() - start_time
1125+
logging.debug(f"get_response_async completed in {total_time:.2f} seconds")
9861126

9871127
def can_use_tools(self) -> bool:
9881128
"""Check if this model can use tool functions"""
@@ -1065,6 +1205,24 @@ def response(
10651205

10661206
logger.debug("Using synchronous response function")
10671207

1208+
# Log all self values when in debug mode
1209+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1210+
debug_info = {
1211+
"model": self.model,
1212+
"timeout": self.timeout,
1213+
"temperature": temperature,
1214+
"top_p": self.top_p,
1215+
"n": self.n,
1216+
"max_tokens": self.max_tokens,
1217+
"presence_penalty": self.presence_penalty,
1218+
"frequency_penalty": self.frequency_penalty,
1219+
"stream": stream,
1220+
"verbose": verbose,
1221+
"markdown": markdown,
1222+
"kwargs": str(kwargs)
1223+
}
1224+
logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1225+
10681226
# Build messages list
10691227
messages = []
10701228
if system_prompt:
@@ -1150,6 +1308,24 @@ async def aresponse(
11501308

11511309
logger.debug("Using asynchronous response function")
11521310

1311+
# Log all self values when in debug mode
1312+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1313+
debug_info = {
1314+
"model": self.model,
1315+
"timeout": self.timeout,
1316+
"temperature": temperature,
1317+
"top_p": self.top_p,
1318+
"n": self.n,
1319+
"max_tokens": self.max_tokens,
1320+
"presence_penalty": self.presence_penalty,
1321+
"frequency_penalty": self.frequency_penalty,
1322+
"stream": stream,
1323+
"verbose": verbose,
1324+
"markdown": markdown,
1325+
"kwargs": str(kwargs)
1326+
}
1327+
logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1328+
11531329
# Build messages list
11541330
messages = []
11551331
if system_prompt:

0 commit comments

Comments
 (0)