Skip to content

Commit 016f020

Browse files
Merge pull request #800 from MervinPraison/claude/issue-796-20250710_222402
fix: handle JSON parsing for Gemini self-reflection
2 parents b71009b + fc20e4c commit 016f020

File tree

2 files changed

+68
-21
lines changed

2 files changed

+68
-21
lines changed
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
#!/usr/bin/env python3
2+
"""Test script to verify the Gemini JSON parsing fix."""
3+
4+
from praisonaiagents import Agent
5+
6+
# Test with minimal configuration to isolate the issue
7+
llm_config = {
8+
"model": "gemini/gemini-1.5-flash-latest",
9+
"temperature": 0.7,
10+
"max_tokens": 500,
11+
}
12+
13+
# Create agent with self-reflection enabled
14+
agent = Agent(
15+
instructions="You are a helpful assistant. Be concise and clear.",
16+
llm=llm_config,
17+
verbose=True,
18+
self_reflect=True,
19+
max_reflect=2,
20+
min_reflect=1
21+
)
22+
23+
# Test with a simple prompt
24+
print("Testing Gemini with self-reflection...")
25+
try:
26+
response = agent.start("What is 2+2? Explain briefly.")
27+
print(f"\nFinal response: {response}")
28+
print("\nTest completed successfully!")
29+
except Exception as e:
30+
print(f"\nError occurred: {e}")
31+
import traceback
32+
traceback.print_exc()

src/praisonai-agents/praisonaiagents/agent/agent.py

Lines changed: 36 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1274,27 +1274,40 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
12741274
messages.append({"role": "user", "content": reflection_prompt})
12751275

12761276
try:
1277-
# Check if OpenAI client is available
1278-
if self._openai_client is None:
1279-
# For custom LLMs, self-reflection with structured output is not supported
1280-
if self.verbose:
1281-
display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
1282-
# Return the original response without reflection
1283-
self.chat_history.append({"role": "user", "content": prompt})
1284-
self.chat_history.append({"role": "assistant", "content": response_text})
1285-
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
1286-
if self.verbose and not self._using_custom_llm:
1287-
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1288-
return response_text
1289-
1290-
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1291-
model=self.reflect_llm if self.reflect_llm else self.llm,
1292-
messages=messages,
1293-
temperature=temperature,
1294-
response_format=ReflectionOutput
1295-
)
1277+
# Check if we're using a custom LLM (like Gemini)
1278+
if self._using_custom_llm or self._openai_client is None:
1279+
# For custom LLMs, we need to handle reflection differently
1280+
# Use non-streaming to get complete JSON response
1281+
reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
1282+
1283+
if not reflection_response or not reflection_response.choices:
1284+
raise Exception("No response from reflection request")
1285+
1286+
reflection_text = reflection_response.choices[0].message.content.strip()
1287+
1288+
# Clean the JSON output
1289+
cleaned_json = self.clean_json_output(reflection_text)
1290+
1291+
# Parse the JSON manually
1292+
reflection_data = json.loads(cleaned_json)
1293+
1294+
# Create a reflection output object manually
1295+
class CustomReflectionOutput:
1296+
def __init__(self, data):
1297+
self.reflection = data.get('reflection', '')
1298+
self.satisfactory = data.get('satisfactory', 'no').lower()
1299+
1300+
reflection_output = CustomReflectionOutput(reflection_data)
1301+
else:
1302+
# Use OpenAI's structured output for OpenAI models
1303+
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1304+
model=self.reflect_llm if self.reflect_llm else self.llm,
1305+
messages=messages,
1306+
temperature=temperature,
1307+
response_format=ReflectionOutput
1308+
)
12961309

1297-
reflection_output = reflection_response.choices[0].message.parsed
1310+
reflection_output = reflection_response.choices[0].message.parsed
12981311

12991312
if self.verbose:
13001313
display_self_reflection(f"Agent {self.name} self reflection (using {self.reflect_llm if self.reflect_llm else self.llm}): reflection='{reflection_output.reflection}' satisfactory='{reflection_output.satisfactory}'", console=self.console)
@@ -1337,7 +1350,9 @@ def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pyd
13371350

13381351
logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
13391352
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
1340-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=self.stream)
1353+
# For custom LLMs during reflection, always use non-streaming to ensure complete responses
1354+
use_stream = self.stream if not self._using_custom_llm else False
1355+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
13411356
response_text = response.choices[0].message.content.strip()
13421357
reflection_count += 1
13431358
continue # Continue the loop for more reflections

0 commit comments

Comments
 (0)