Skip to content

Commit 7672b73

Browse files
committed
Adjust formatting
1 parent 74f7b0f commit 7672b73

1 file changed

Lines changed: 3 additions & 3 deletions

File tree

openevolve/evaluator.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -208,10 +208,10 @@ async def evaluate_program(
208208
if "combined_score" in eval_result.metrics:
209209
# Original combined_score is just accuracy
210210
accuracy = eval_result.metrics["combined_score"]
211-
# Combine accuracy with LLM average using dynamic weighting:
212-
# (1 - llm_feedback_weight) * accuracy + llm_feedback_weight * LLM quality
211+
# Combine with LLM average using dynamic weighting
213212
eval_result.metrics["combined_score"] = (
214-
accuracy * (1-self.config.llm_feedback_weight) + llm_average * self.config.llm_feedback_weight
213+
accuracy * (1 - self.config.llm_feedback_weight) +
214+
llm_average * self.config.llm_feedback_weight
215215
)
216216

217217
# Store artifacts if enabled and present

0 commit comments

Comments
 (0)