-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
fix: handle JSON parsing for Gemini self-reflection #800
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,32 @@ | ||
| #!/usr/bin/env python3 | ||
| """Test script to verify the Gemini JSON parsing fix.""" | ||
|
|
||
| from praisonaiagents import Agent | ||
|
|
||
| # Test with minimal configuration to isolate the issue | ||
| llm_config = { | ||
| "model": "gemini/gemini-1.5-flash-latest", | ||
| "temperature": 0.7, | ||
| "max_tokens": 500, | ||
| } | ||
|
|
||
| # Create agent with self-reflection enabled | ||
| agent = Agent( | ||
| instructions="You are a helpful assistant. Be concise and clear.", | ||
| llm=llm_config, | ||
| verbose=True, | ||
| self_reflect=True, | ||
| max_reflect=2, | ||
| min_reflect=1 | ||
| ) | ||
|
|
||
| # Test with a simple prompt | ||
| print("Testing Gemini with self-reflection...") | ||
| try: | ||
| response = agent.start("What is 2+2? Explain briefly.") | ||
| print(f"\nFinal response: {response}") | ||
| print("\nTest completed successfully!") | ||
| except Exception as e: | ||
| print(f"\nError occurred: {e}") | ||
| import traceback | ||
| traceback.print_exc() |
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -1274,27 +1274,40 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd | |||||||||||||||||||||||||||
| messages.append({"role": "user", "content": reflection_prompt}) | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| try: | ||||||||||||||||||||||||||||
| # Check if OpenAI client is available | ||||||||||||||||||||||||||||
| if self._openai_client is None: | ||||||||||||||||||||||||||||
| # For custom LLMs, self-reflection with structured output is not supported | ||||||||||||||||||||||||||||
| if self.verbose: | ||||||||||||||||||||||||||||
| display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console) | ||||||||||||||||||||||||||||
| # Return the original response without reflection | ||||||||||||||||||||||||||||
| self.chat_history.append({"role": "user", "content": prompt}) | ||||||||||||||||||||||||||||
| self.chat_history.append({"role": "assistant", "content": response_text}) | ||||||||||||||||||||||||||||
| # Only display interaction if not using custom LLM (to avoid double output) and verbose is True | ||||||||||||||||||||||||||||
| if self.verbose and not self._using_custom_llm: | ||||||||||||||||||||||||||||
| display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console) | ||||||||||||||||||||||||||||
| return response_text | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| reflection_response = self._openai_client.sync_client.beta.chat.completions.parse( | ||||||||||||||||||||||||||||
| model=self.reflect_llm if self.reflect_llm else self.llm, | ||||||||||||||||||||||||||||
| messages=messages, | ||||||||||||||||||||||||||||
| temperature=temperature, | ||||||||||||||||||||||||||||
| response_format=ReflectionOutput | ||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||
| # Check if we're using a custom LLM (like Gemini) | ||||||||||||||||||||||||||||
| if self._using_custom_llm or self._openai_client is None: | ||||||||||||||||||||||||||||
| # For custom LLMs, we need to handle reflection differently | ||||||||||||||||||||||||||||
| # Use non-streaming to get complete JSON response | ||||||||||||||||||||||||||||
| reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False) | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| if not reflection_response or not reflection_response.choices: | ||||||||||||||||||||||||||||
| raise Exception("No response from reflection request") | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| reflection_text = reflection_response.choices[0].message.content.strip() | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| # Clean the JSON output | ||||||||||||||||||||||||||||
| cleaned_json = self.clean_json_output(reflection_text) | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| # Parse the JSON manually | ||||||||||||||||||||||||||||
| reflection_data = json.loads(cleaned_json) | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| # Create a reflection output object manually | ||||||||||||||||||||||||||||
| class CustomReflectionOutput: | ||||||||||||||||||||||||||||
| def __init__(self, data): | ||||||||||||||||||||||||||||
| self.reflection = data.get('reflection', '') | ||||||||||||||||||||||||||||
| self.satisfactory = data.get('satisfactory', 'no').lower() | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| reflection_output = CustomReflectionOutput(reflection_data) | ||||||||||||||||||||||||||||
|
Comment on lines
+1294
to
+1300
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Defining a class inside a method is generally not recommended as it can be inefficient (the class is redefined on every call) and can harm readability. For better consistency and maintainability, I suggest using the existing
Suggested change
Comment on lines
+1277
to
+1300
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🛠️ Refactor suggestion Refactor inline class definition and improve error handling. The inline Define the class at the module level near other imports: class CustomReflectionOutput:
"""Custom reflection output for non-OpenAI LLMs."""
def __init__(self, data: dict):
if 'reflection' not in data:
raise ValueError("Missing required field 'reflection' in reflection response")
if 'satisfactory' not in data:
raise ValueError("Missing required field 'satisfactory' in reflection response")
self.reflection = data['reflection']
self.satisfactory = data.get('satisfactory', 'no').lower()Then update the reflection parsing: -# Create a reflection output object manually
-class CustomReflectionOutput:
- def __init__(self, data):
- self.reflection = data.get('reflection', '')
- self.satisfactory = data.get('satisfactory', 'no').lower()
-
-reflection_output = CustomReflectionOutput(reflection_data)
+try:
+ reflection_output = CustomReflectionOutput(reflection_data)
+except ValueError as e:
+ raise Exception(f"Invalid reflection response format: {e}")🤖 Prompt for AI Agents |
||||||||||||||||||||||||||||
| else: | ||||||||||||||||||||||||||||
| # Use OpenAI's structured output for OpenAI models | ||||||||||||||||||||||||||||
| reflection_response = self._openai_client.sync_client.beta.chat.completions.parse( | ||||||||||||||||||||||||||||
| model=self.reflect_llm if self.reflect_llm else self.llm, | ||||||||||||||||||||||||||||
| messages=messages, | ||||||||||||||||||||||||||||
| temperature=temperature, | ||||||||||||||||||||||||||||
| response_format=ReflectionOutput | ||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| reflection_output = reflection_response.choices[0].message.parsed | ||||||||||||||||||||||||||||
| reflection_output = reflection_response.choices[0].message.parsed | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| if self.verbose: | ||||||||||||||||||||||||||||
| display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console) | ||||||||||||||||||||||||||||
|
|
@@ -1337,7 +1350,9 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd | |||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
| logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process") | ||||||||||||||||||||||||||||
| messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"}) | ||||||||||||||||||||||||||||
| response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream) | ||||||||||||||||||||||||||||
| # For custom LLMs during reflection, always use non-streaming to ensure complete responses | ||||||||||||||||||||||||||||
| use_stream = self.stream if not self._using_custom_llm else False | ||||||||||||||||||||||||||||
| response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream) | ||||||||||||||||||||||||||||
| response_text = response.choices[0].message.content.strip() | ||||||||||||||||||||||||||||
| reflection_count += 1 | ||||||||||||||||||||||||||||
| continue # Continue the loop for more reflections | ||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Using the generic
Exceptionclass is generally discouraged as it can catch unexpected errors and make debugging more difficult. It's better to use a more specific exception type to allow for more granular error handling. In this case,ValueErrorwould be more appropriate, as the error condition relates to an invalid or missing response from the reflection request.