We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 74f7b0f commit 7672b73Copy full SHA for 7672b73
1 file changed
openevolve/evaluator.py
@@ -208,10 +208,10 @@ async def evaluate_program(
208
if "combined_score" in eval_result.metrics:
209
# Original combined_score is just accuracy
210
accuracy = eval_result.metrics["combined_score"]
211
- # Combine accuracy with LLM average using dynamic weighting:
212
- # (1 - llm_feedback_weight) * accuracy + llm_feedback_weight * LLM quality
+ # Combine with LLM average using dynamic weighting
213
eval_result.metrics["combined_score"] = (
214
- accuracy * (1-self.config.llm_feedback_weight) + llm_average * self.config.llm_feedback_weight
+ accuracy * (1 - self.config.llm_feedback_weight) +
+ llm_average * self.config.llm_feedback_weight
215
)
216
217
# Store artifacts if enabled and present
0 commit comments