Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions src/praisonai-agents/gemini_self_reflection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/usr/bin/env python3
"""Test script to verify the Gemini JSON parsing fix."""

from praisonaiagents import Agent

# Test with minimal configuration to isolate the issue
llm_config = {
"model": "gemini/gemini-1.5-flash-latest",
"temperature": 0.7,
"max_tokens": 500,
}

# Create agent with self-reflection enabled
agent = Agent(
instructions="You are a helpful assistant. Be concise and clear.",
llm=llm_config,
verbose=True,
self_reflect=True,
max_reflect=2,
min_reflect=1
)

# Test with a simple prompt
print("Testing Gemini with self-reflection...")
try:
response = agent.start("What is 2+2? Explain briefly.")
print(f"\nFinal response: {response}")
print("\nTest completed successfully!")
except Exception as e:
print(f"\nError occurred: {e}")
import traceback
traceback.print_exc()
57 changes: 36 additions & 21 deletions src/praisonai-agents/praisonaiagents/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -1274,27 +1274,40 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
messages.append({"role": "user", "content": reflection_prompt})

try:
# Check if OpenAI client is available
if self._openai_client is None:
# For custom LLMs, self-reflection with structured output is not supported
if self.verbose:
display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
# Return the original response without reflection
self.chat_history.append({"role": "user", "content": prompt})
self.chat_history.append({"role": "assistant", "content": response_text})
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
if self.verbose and not self._using_custom_llm:
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
return response_text

reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
model=self.reflect_llm if self.reflect_llm else self.llm,
messages=messages,
temperature=temperature,
response_format=ReflectionOutput
)
# Check if we're using a custom LLM (like Gemini)
if self._using_custom_llm or self._openai_client is None:
# For custom LLMs, we need to handle reflection differently
# Use non-streaming to get complete JSON response
reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)

if not reflection_response or not reflection_response.choices:
raise Exception("No response from reflection request")
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Using the generic Exception class is generally discouraged as it can catch unexpected errors and make debugging more difficult. It's better to use a more specific exception type to allow for more granular error handling. In this case, ValueError would be more appropriate, as the error condition relates to an invalid or missing response from the reflection request.

Suggested change
raise Exception("No response from reflection request")
raise ValueError("No response from reflection request")


reflection_text = reflection_response.choices[0].message.content.strip()

# Clean the JSON output
cleaned_json = self.clean_json_output(reflection_text)

# Parse the JSON manually
reflection_data = json.loads(cleaned_json)

# Create a reflection output object manually
class CustomReflectionOutput:
def __init__(self, data):
self.reflection = data.get('reflection', '')
self.satisfactory = data.get('satisfactory', 'no').lower()

reflection_output = CustomReflectionOutput(reflection_data)
Comment on lines +1294 to +1300
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Defining a class inside a method is generally not recommended as it can be inefficient (the class is redefined on every call) and can harm readability.

For better consistency and maintainability, I suggest using the existing ReflectionOutput Pydantic model, which is already used for the OpenAI-specific path. This ensures a consistent data structure for reflection output across all LLM providers and leverages Pydantic's validation capabilities.

Suggested change
# Create a reflection output object manually
class CustomReflectionOutput:
def __init__(self, data):
self.reflection = data.get('reflection', '')
self.satisfactory = data.get('satisfactory', 'no').lower()
reflection_output = CustomReflectionOutput(reflection_data)
# Create a reflection output object using the existing ReflectionOutput model
# for consistency and validation.
reflection_data['satisfactory'] = reflection_data.get('satisfactory', 'no').lower()
if 'reflection' not in reflection_data:
reflection_data['reflection'] = ''
reflection_output = ReflectionOutput.model_validate(reflection_data)

Comment on lines +1277 to +1300
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Refactor inline class definition and improve error handling.

The inline CustomReflectionOutput class should be defined at the module level for better maintainability and reusability. Additionally, the JSON parsing should validate required fields and provide more specific error messages.

Define the class at the module level near other imports:

class CustomReflectionOutput:
    """Custom reflection output for non-OpenAI LLMs."""
    def __init__(self, data: dict):
        if 'reflection' not in data:
            raise ValueError("Missing required field 'reflection' in reflection response")
        if 'satisfactory' not in data:
            raise ValueError("Missing required field 'satisfactory' in reflection response")
        self.reflection = data['reflection']
        self.satisfactory = data.get('satisfactory', 'no').lower()

Then update the reflection parsing:

-# Create a reflection output object manually
-class CustomReflectionOutput:
-    def __init__(self, data):
-        self.reflection = data.get('reflection', '')
-        self.satisfactory = data.get('satisfactory', 'no').lower()
-
-reflection_output = CustomReflectionOutput(reflection_data)
+try:
+    reflection_output = CustomReflectionOutput(reflection_data)
+except ValueError as e:
+    raise Exception(f"Invalid reflection response format: {e}")
🤖 Prompt for AI Agents
In src/praisonai-agents/praisonaiagents/agent/agent.py around lines 1277 to
1300, move the inline CustomReflectionOutput class definition to the module
level near other imports for better maintainability. Update the class
constructor to validate that the required fields 'reflection' and 'satisfactory'
exist in the input data and raise ValueError with clear messages if missing.
Then, in the reflection parsing code, instantiate this module-level class and
handle JSON parsing errors with specific error messages to improve error
handling.

else:
# Use OpenAI's structured output for OpenAI models
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
model=self.reflect_llm if self.reflect_llm else self.llm,
messages=messages,
temperature=temperature,
response_format=ReflectionOutput
)

reflection_output = reflection_response.choices[0].message.parsed
reflection_output = reflection_response.choices[0].message.parsed

if self.verbose:
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
Expand Down Expand Up @@ -1337,7 +1350,9 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd

logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream)
# For custom LLMs during reflection, always use non-streaming to ensure complete responses
use_stream = self.stream if not self._using_custom_llm else False
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
response_text = response.choices[0].message.content.strip()
reflection_count += 1
continue # Continue the loop for more reflections
Expand Down
Loading