Skip to content

Commit 7215694

Browse files
codelionclaude
andcommitted
Fix library API evaluators broken with process-based parallelism
The callable evaluators in evolve_function and evolve_algorithm used closure-based functions stored in globals(), which don't survive across process boundaries. Since the switch to process-based parallelism (c2f668a), subprocess workers cannot access the parent process memory, causing "module has no attribute '_openevolve_evaluator_*'" errors. Fix by serializing evaluators as self-contained code strings instead of closures. Also adds combined_score to returned metrics to prevent the misleading score averaging warning. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 65cbbe8 commit 7215694

File tree

1 file changed

+122
-70
lines changed

1 file changed

+122
-70
lines changed

openevolve/api.py

Lines changed: 122 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -239,13 +239,40 @@ def _prepare_evaluator(
239239

240240
# If it's a callable, create a wrapper module
241241
if callable(evaluator):
242-
# Create a unique global name for this evaluator
243-
evaluator_id = f"_openevolve_evaluator_{uuid.uuid4().hex[:8]}"
242+
# Try to get the source code of the callable so it can be serialized
243+
# into a standalone file that works in subprocesses
244+
try:
245+
func_source = inspect.getsource(evaluator)
246+
# Dedent in case the function was defined inside another scope
247+
import textwrap
248+
249+
func_source = textwrap.dedent(func_source)
250+
func_name = evaluator.__name__
251+
252+
# Build a self-contained evaluator module with the function source
253+
# and an evaluate() entry point that calls it
254+
evaluator_code = f"""
255+
# Auto-generated evaluator from user-provided callable
256+
import importlib.util
257+
import sys
258+
import os
259+
import copy
260+
import json
261+
import time
244262
245-
# Store in globals so the wrapper can find it
246-
globals()[evaluator_id] = evaluator
263+
{func_source}
247264
248-
evaluator_code = f"""
265+
def evaluate(program_path):
266+
'''Wrapper that calls the user-provided evaluator function'''
267+
return {func_name}(program_path)
268+
"""
269+
except (OSError, TypeError):
270+
# If we can't get source (e.g. built-in, lambda, or closure),
271+
# fall back to the globals-based approach
272+
evaluator_id = f"_openevolve_evaluator_{uuid.uuid4().hex[:8]}"
273+
globals()[evaluator_id] = evaluator
274+
275+
evaluator_code = f"""
249276
# Wrapper for user-provided evaluator function
250277
import {__name__} as api_module
251278
@@ -335,57 +362,67 @@ def initial_sort(arr):
335362
lines.insert(func_end + 1, " " * (indent + 4) + "# EVOLVE-BLOCK-END")
336363
func_source = "\n".join(lines)
337364

338-
# Create evaluator that tests the function
339-
def evaluator(program_path):
340-
import importlib.util
341-
import sys
365+
# Create a self-contained evaluator as a code string so it works in subprocesses.
366+
# Closure-based evaluators fail with process-based parallelism because subprocess
367+
# workers cannot access the parent process's memory.
368+
evaluator_code = f"""
369+
import importlib.util
370+
import copy
342371
343-
# Load the evolved program
344-
spec = importlib.util.spec_from_file_location("evolved", program_path)
345-
if spec is None or spec.loader is None:
346-
return {"score": 0.0, "error": "Failed to load program"}
372+
FUNC_NAME = {func_name!r}
373+
TEST_CASES = {test_cases!r}
347374
348-
module = importlib.util.module_from_spec(spec)
375+
def evaluate(program_path):
376+
'''Auto-generated evaluator for evolve_function'''
377+
# Load the evolved program
378+
spec = importlib.util.spec_from_file_location("evolved", program_path)
379+
if spec is None or spec.loader is None:
380+
return {{"combined_score": 0.0, "score": 0.0, "error": "Failed to load program"}}
349381
350-
try:
351-
spec.loader.exec_module(module)
352-
except Exception as e:
353-
return {"score": 0.0, "error": f"Failed to execute program: {str(e)}"}
382+
module = importlib.util.module_from_spec(spec)
354383
355-
if not hasattr(module, func_name):
356-
return {"score": 0.0, "error": f"Function '{func_name}' not found"}
384+
try:
385+
spec.loader.exec_module(module)
386+
except Exception as e:
387+
return {{"combined_score": 0.0, "score": 0.0, "error": f"Failed to execute program: {{str(e)}}"}}
357388
358-
evolved_func = getattr(module, func_name)
359-
correct = 0
360-
total = len(test_cases)
361-
errors = []
389+
if not hasattr(module, FUNC_NAME):
390+
return {{"combined_score": 0.0, "score": 0.0, "error": f"Function '{{FUNC_NAME}}' not found"}}
362391
363-
for input_val, expected in test_cases:
364-
try:
365-
# Handle case where input is a list/mutable - make a copy
366-
if isinstance(input_val, list):
367-
test_input = input_val.copy()
368-
else:
369-
test_input = input_val
370-
371-
result = evolved_func(test_input)
372-
if result == expected:
373-
correct += 1
374-
else:
375-
errors.append(f"Input {input_val}: expected {expected}, got {result}")
376-
except Exception as e:
377-
errors.append(f"Input {input_val}: {str(e)}")
378-
379-
return {
380-
"score": correct / total,
381-
"test_pass_rate": correct / total,
382-
"tests_passed": correct,
383-
"total_tests": total,
384-
"errors": errors[:3], # Limit error details
385-
}
392+
evolved_func = getattr(module, FUNC_NAME)
393+
correct = 0
394+
total = len(TEST_CASES)
395+
errors = []
396+
397+
for input_val, expected in TEST_CASES:
398+
try:
399+
# Handle case where input is a list/mutable - make a copy
400+
if isinstance(input_val, list):
401+
test_input = input_val.copy()
402+
else:
403+
test_input = input_val
404+
405+
result = evolved_func(test_input)
406+
if result == expected:
407+
correct += 1
408+
else:
409+
errors.append(f"Input {{input_val}}: expected {{expected}}, got {{result}}")
410+
except Exception as e:
411+
errors.append(f"Input {{input_val}}: {{str(e)}}")
412+
413+
score = correct / total if total > 0 else 0.0
414+
return {{
415+
"combined_score": score,
416+
"score": score,
417+
"test_pass_rate": score,
418+
"tests_passed": correct,
419+
"total_tests": total,
420+
"errors": errors[:3],
421+
}}
422+
"""
386423

387424
return run_evolution(
388-
initial_program=func_source, evaluator=evaluator, iterations=iterations, **kwargs
425+
initial_program=func_source, evaluator=evaluator_code, iterations=iterations, **kwargs
389426
)
390427

391428

@@ -447,36 +484,51 @@ def benchmark_sort(instance):
447484
lines.append(" " * (indent + 4) + "# EVOLVE-BLOCK-END")
448485
class_source = "\n".join(lines)
449486

450-
# Create evaluator
451-
def evaluator(program_path):
452-
import importlib.util
487+
# Create a self-contained evaluator as a code string so it works in subprocesses.
488+
import textwrap
453489

454-
# Load the evolved program
455-
spec = importlib.util.spec_from_file_location("evolved", program_path)
456-
if spec is None or spec.loader is None:
457-
return {"score": 0.0, "error": "Failed to load program"}
490+
class_name = algorithm_class.__name__
491+
benchmark_source = textwrap.dedent(inspect.getsource(benchmark))
458492

459-
module = importlib.util.module_from_spec(spec)
493+
evaluator_code = f"""
494+
import importlib.util
460495
461-
try:
462-
spec.loader.exec_module(module)
463-
except Exception as e:
464-
return {"score": 0.0, "error": f"Failed to execute program: {str(e)}"}
496+
CLASS_NAME = {class_name!r}
465497
466-
if not hasattr(module, algorithm_class.__name__):
467-
return {"score": 0.0, "error": f"Class '{algorithm_class.__name__}' not found"}
498+
{benchmark_source}
468499
469-
AlgorithmClass = getattr(module, algorithm_class.__name__)
500+
def evaluate(program_path):
501+
'''Auto-generated evaluator for evolve_algorithm'''
502+
spec = importlib.util.spec_from_file_location("evolved", program_path)
503+
if spec is None or spec.loader is None:
504+
return {{"combined_score": 0.0, "score": 0.0, "error": "Failed to load program"}}
470505
471-
try:
472-
instance = AlgorithmClass()
473-
metrics = benchmark(instance)
474-
return metrics if isinstance(metrics, dict) else {"score": metrics}
475-
except Exception as e:
476-
return {"score": 0.0, "error": str(e)}
506+
module = importlib.util.module_from_spec(spec)
507+
508+
try:
509+
spec.loader.exec_module(module)
510+
except Exception as e:
511+
return {{"combined_score": 0.0, "score": 0.0, "error": f"Failed to execute program: {{str(e)}}"}}
512+
513+
if not hasattr(module, CLASS_NAME):
514+
return {{"combined_score": 0.0, "score": 0.0, "error": f"Class '{{CLASS_NAME}}' not found"}}
515+
516+
AlgorithmClass = getattr(module, CLASS_NAME)
517+
518+
try:
519+
instance = AlgorithmClass()
520+
metrics = {benchmark.__name__}(instance)
521+
if not isinstance(metrics, dict):
522+
metrics = {{"score": metrics}}
523+
if "combined_score" not in metrics:
524+
metrics["combined_score"] = metrics.get("score", 0.0)
525+
return metrics
526+
except Exception as e:
527+
return {{"combined_score": 0.0, "score": 0.0, "error": str(e)}}
528+
"""
477529

478530
return run_evolution(
479-
initial_program=class_source, evaluator=evaluator, iterations=iterations, **kwargs
531+
initial_program=class_source, evaluator=evaluator_code, iterations=iterations, **kwargs
480532
)
481533

482534

0 commit comments

Comments
 (0)