Skip to content

Commit ecdcd62

Browse files
Merge pull request #530 from MervinPraison/claude/issue-311-20250528_143819
fix: Memory error with wrong LLM model #311
2 parents 41fa293 + 085af69 commit ecdcd62

1 file changed

Lines changed: 12 additions & 1 deletion

File tree

  • src/praisonai-agents/praisonaiagents/task

src/praisonai-agents/praisonaiagents/task/task.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,9 +215,20 @@ async def execute_callback(self, task_output: TaskOutput) -> None:
215215
logger.info(f"Task {self.id}: Calculating quality metrics for output: {task_output.raw[:100]}...")
216216

217217
# Get quality metrics from LLM
218+
# Determine which LLM model to use based on agent configuration
219+
llm_model = None
220+
if self.agent:
221+
if getattr(self.agent, '_using_custom_llm', False) and hasattr(self.agent, 'llm_instance'):
222+
# For custom LLM instances (like Ollama)
223+
llm_model = self.agent.llm_instance
224+
elif hasattr(self.agent, 'llm') and self.agent.llm:
225+
# For standard model strings
226+
llm_model = self.agent.llm
227+
218228
metrics = self.memory.calculate_quality_metrics(
219229
task_output.raw,
220-
self.expected_output
230+
self.expected_output,
231+
llm=llm_model
221232
)
222233
logger.info(f"Task {self.id}: Quality metrics calculated: {metrics}")
223234

0 commit comments

Comments
 (0)