We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent c53c195 commit 74f7b0fCopy full SHA for 74f7b0f
1 file changed
openevolve/evaluator.py
@@ -208,7 +208,8 @@ async def evaluate_program(
208
if "combined_score" in eval_result.metrics:
209
# Original combined_score is just accuracy
210
accuracy = eval_result.metrics["combined_score"]
211
- # Combine with LLM average (70% accuracy, 30% LLM quality)
+ # Combine accuracy with LLM average using dynamic weighting:
212
+ # (1 - llm_feedback_weight) * accuracy + llm_feedback_weight * LLM quality
213
eval_result.metrics["combined_score"] = (
214
accuracy * (1-self.config.llm_feedback_weight) + llm_average * self.config.llm_feedback_weight
215
)
0 commit comments