forked from algorithmicsuperintelligence/openevolve
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.yaml
More file actions
44 lines (39 loc) · 1.41 KB
/
config.yaml
File metadata and controls
44 lines (39 loc) · 1.41 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# Configuration for function minimization example
max_iterations: 100
checkpoint_interval: 10
log_level: "INFO"
# LLM configuration
llm:
# primary_model: "gemini-2.0-flash-lite"
primary_model: "llama3.1-8b"
primary_model_weight: 0.8
# secondary_model: "gemini-2.0-flash"
secondary_model: "llama-4-scout-17b-16e-instruct"
secondary_model_weight: 0.2
# api_base: "https://generativelanguage.googleapis.com/v1beta/openai/"
api_base: "https://api.cerebras.ai/v1"
temperature: 0.7
top_p: 0.95
max_tokens: 4096
# Prompt configuration
prompt:
system_message: "You are an expert programmer specializing in optimization algorithms. Your task is to improve a function minimization algorithm to find the global minimum of a complex function with many local minima. The function is f(x, y) = sin(x) * cos(y) + sin(x*y) + (x^2 + y^2)/20. Focus on improving the search_algorithm function to reliably find the global minimum, escaping local minima that might trap simple algorithms."
num_top_programs: 3
use_template_stochasticity: true
# Database configuration
database:
population_size: 50
archive_size: 20
num_islands: 3
elite_selection_ratio: 0.2
exploitation_ratio: 0.7
# Evaluator configuration
evaluator:
timeout: 60
cascade_evaluation: true
cascade_thresholds: [0.5, 0.75]
parallel_evaluations: 4
use_llm_feedback: false
# Evolution settings
diff_based_evolution: true
allow_full_rewrites: false