Skip to content

Commit 3ae4ca1

Browse files
committed
feat(awa): improve adaptive weight adjustment and evaluation metrics
1 parent 9d8dc42 commit 3ae4ca1

4 files changed

Lines changed: 185 additions & 88 deletions

File tree

scripts/experiment_awa.py

Lines changed: 36 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
RiskObjective,
3535
DiversificationObjective,
3636
)
37-
from src.portfolio.metrics import calculate_igd, calculate_spacing
37+
from src.portfolio.metrics import calculate_igd, calculate_spacing, calculate_spread
3838

3939

4040
def run_awa_experiment():
@@ -75,8 +75,8 @@ def run_awa_experiment():
7575
)
7676
ref_front = np.column_stack([ref_metrics["Return"], ref_metrics["Risk"]])
7777

78-
# ── Experiment 1: Distribution Uniformity (Spacing) ──────────────────────
79-
print("Running Experiment 1: Distribution Uniformity...")
78+
# ── Experiment 1 & 3 Combined: Distribution and Quality Comparison ──────
79+
print("Running Quality Comparison (IGD, Spacing, Spread)...")
8080
pop_size = NUM_PARETO_POINTS
8181
gens = MOEAD_GENERATIONS
8282

@@ -91,7 +91,12 @@ def run_awa_experiment():
9191
**data_kwargs,
9292
)
9393
front1 = np.column_stack([metrics1["Return"], metrics1["Risk"]])
94-
spacing1 = calculate_spacing(front1)
94+
95+
m_dict1 = {
96+
"IGD": calculate_igd(front1, ref_front),
97+
"Spacing": calculate_spacing(front1),
98+
"Spread": calculate_spread(front1, ref_front),
99+
}
95100

96101
# MOEA/D-AWA
97102
print(" Evaluating MOEA/D-AWA...")
@@ -105,33 +110,33 @@ def run_awa_experiment():
105110
**data_kwargs,
106111
)
107112
front2 = np.column_stack([metrics2["Return"], metrics2["Risk"]])
108-
spacing2 = calculate_spacing(front2)
109113

110-
print(f" Spacing - MOEA/D: {spacing1:.6f}")
111-
print(f" Spacing - MOEA/D-AWA: {spacing2:.6f}")
114+
m_dict2 = {
115+
"IGD": calculate_igd(front2, ref_front),
116+
"Spacing": calculate_spacing(front2),
117+
"Spread": calculate_spread(front2, ref_front),
118+
}
112119

113-
# Plot Comparison
114-
plt.figure(figsize=(10, 6))
115-
plt.scatter(
116-
front1[:, 1],
117-
front1[:, 0],
118-
label=f"Standard MOEA/D (Spacing: {spacing1:.6f})",
119-
alpha=0.6,
120+
print(
121+
f" MOEA/D - IGD: {m_dict1['IGD']:.6f}, Spacing: {m_dict1['Spacing']:.6f}, Spread: {m_dict1['Spread']:.6f}"
120122
)
121-
plt.scatter(
122-
front2[:, 1],
123-
front2[:, 0],
124-
label=f"MOEA/D-AWA (Spacing: {spacing2:.6f})",
125-
alpha=0.6,
123+
print(
124+
f" MOEA/D-AWA - IGD: {m_dict2['IGD']:.6f}, Spacing: {m_dict2['Spacing']:.6f}, Spread: {m_dict2['Spread']:.6f}"
126125
)
127-
plt.xlabel("Risk (Standard Deviation)")
128-
plt.ylabel("Return")
129-
plt.title("Experiment 1: Pareto Front Distribution Quality")
130-
plt.legend()
131-
plt.grid(True)
132-
plt.savefig(os.path.join(run_folder, "exp1_uniformity.png"))
133-
plt.close()
134126

127+
# Plot Comparison (Consolidated)
128+
from src.visualization import plot_two_variants_comparison_2d
129+
130+
plot_two_variants_comparison_2d(
131+
ref_metrics,
132+
metrics1,
133+
metrics2,
134+
name1="Standard MOEA/D",
135+
name2="MOEA/D-AWA",
136+
metrics_dict1=m_dict1,
137+
metrics_dict2=m_dict2,
138+
save_path=os.path.join(run_folder, "awa_comparison_2d.png"),
139+
)
135140
# ── Experiment 2: Performance vs Generations (Adaptive Progress) ──────────
136141
print("Running Experiment 2: Convergence Progress...")
137142
# Compare IGD history
@@ -160,29 +165,16 @@ def run_awa_experiment():
160165
gens_list = np.arange(0, gens + 1, 20)
161166

162167
plt.figure(figsize=(10, 6))
163-
plt.plot(gens_list, igd1, "o-", label="Standard MOEA/D")
164-
plt.plot(gens_list, igd2, "s-", label="MOEA/D-AWA")
168+
plt.plot(gens_list, igd1, "o-", label="Standard MOEA/D", color="#1f77b4")
169+
plt.plot(gens_list, igd2, "s-", label="MOEA/D-AWA", color="#ff7f0e")
165170
plt.xlabel("Generations")
166171
plt.ylabel("IGD")
167-
plt.title("Experiment 2: Convergence Progress with Weight Adaptation")
172+
plt.title("Convergence Progress with Weight Adaptation", fontsize=14)
168173
plt.legend()
169-
plt.grid(True)
170-
plt.savefig(os.path.join(run_folder, "exp2_convergence.png"))
174+
plt.grid(True, linestyle="--", alpha=0.6)
175+
plt.savefig(os.path.join(run_folder, "awa_convergence_progress.png"), dpi=150)
171176
plt.close()
172177

173-
# ── Experiment 3: 2D Pareto Comparison ──────────────────────────────────
174-
print("Running Experiment 3: 2D Pareto Comparison...")
175-
from src.visualization import plot_two_variants_comparison_2d
176-
177-
plot_two_variants_comparison_2d(
178-
ref_metrics,
179-
metrics1,
180-
metrics2,
181-
name1="Standard MOEA/D",
182-
name2="MOEA/D-AWA",
183-
save_path=os.path.join(run_folder, "exp3_2d_comparison.png"),
184-
)
185-
186178
# ── Experiment 4: 3D Pareto Comparison ──────────────────────────────────
187179
print("Running Experiment 4: 3D Pareto Comparison...")
188180
prob_3d = PortfolioProblem(

src/portfolio/metrics.py

Lines changed: 61 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -111,17 +111,25 @@ def calculate_spacing(obtained_front):
111111
Measures the uniformity of the distribution of solutions in the obtained front.
112112
Lower is better (0 means perfectly uniform).
113113
114+
This implementation normalizes the front to [0, 1] internally to ensure
115+
scale independence between different objectives.
116+
114117
S = sqrt(1/(n-1) * sum((d_i - d_mean)^2))
115118
where d_i is the distance to the nearest neighbor.
116119
"""
117120
if len(obtained_front) < 2:
118121
return 0.0
119122

120-
# Calculate pairwise distances
121-
n = len(obtained_front)
122-
dists = np.sqrt(
123-
np.sum((obtained_front[:, None] - obtained_front[None, :]) ** 2, axis=2)
124-
)
123+
# 1. Normalization
124+
z_min = np.min(obtained_front, axis=0)
125+
z_max = np.max(obtained_front, axis=0)
126+
span = z_max - z_min
127+
span[span < 1e-10] = 1e-10
128+
front_norm = (obtained_front - z_min) / span
129+
130+
# 2. Calculate pairwise distances
131+
n = len(front_norm)
132+
dists = np.sqrt(np.sum((front_norm[:, None] - front_norm[None, :]) ** 2, axis=2))
125133

126134
# Fill diagonal with infinity to ignore self-distance
127135
np.fill_diagonal(dists, np.inf)
@@ -132,3 +140,51 @@ def calculate_spacing(obtained_front):
132140

133141
s = np.sqrt(np.sum((d - d_mean) ** 2) / (n - 1))
134142
return s
143+
144+
145+
def calculate_spread(obtained_front, reference_front):
146+
"""
147+
Calculates the Spread (Delta) metric for 2D fronts.
148+
Measures both the uniformity and the extent of the distribution.
149+
Lower is better (0 means perfect uniformity and full coverage of extreme points).
150+
151+
reference_front: array of shape (N_ref, 2)
152+
obtained_front: array of shape (N_obt, 2)
153+
"""
154+
if len(obtained_front) < 2:
155+
return 1.0
156+
157+
# 1. Normalization based on reference front bounds
158+
z_min = np.min(reference_front, axis=0)
159+
z_max = np.max(reference_front, axis=0)
160+
span = z_max - z_min
161+
span[span < 1e-10] = 1e-10
162+
163+
ref_norm = (reference_front - z_min) / span
164+
obt_norm = (obtained_front - z_min) / span
165+
166+
# 2. Find extreme points in reference front (min risk, max return)
167+
# Objective 0 (Return) max, Objective 1 (Risk) min
168+
# In normalized space:
169+
# Extreme 1: Max Return (1.0), likely high risk
170+
# Extreme 2: Min Risk (0.0), likely low return
171+
ext1_ref = ref_norm[np.argmax(ref_norm[:, 0])]
172+
ext2_ref = ref_norm[np.argmin(ref_norm[:, 1])]
173+
174+
# 3. Sort obtained points by first objective for neighbor distances
175+
idx = np.argsort(obt_norm[:, 0])
176+
pts = obt_norm[idx]
177+
178+
# 4. Distances to extremes
179+
df = np.sqrt(np.sum((pts[0] - ext1_ref) ** 2))
180+
dl = np.sqrt(np.sum((pts[-1] - ext2_ref) ** 2))
181+
182+
# 5. Distances between consecutive points
183+
d = np.sqrt(np.sum((pts[1:] - pts[:-1]) ** 2, axis=1))
184+
d_mean = np.mean(d)
185+
186+
# 6. Delta calculation
187+
sum_diff = np.sum(np.abs(d - d_mean))
188+
delta = (df + dl + sum_diff) / (df + dl + (len(pts) - 1) * d_mean)
189+
190+
return delta

src/portfolio/moead_awa.py

Lines changed: 40 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -175,35 +175,64 @@ def repair(w):
175175
ep_f_norm = ep_f_norm[eff_idx]
176176

177177
if len(ep_f_norm) > num_points:
178-
# Identify sparse and dense regions for weight adjustment
179-
n_add = int(0.05 * num_points) + 1
178+
# Identify sparse regions in EP and dense regions in current population
179+
# We adjust a smaller percentage to ensure stability (3% instead of 5%)
180+
n_add = max(1, int(0.03 * num_points))
180181

181-
# Sparsity in EP
182+
# 1. Sparsity in EP (Distance to 2nd nearest neighbor)
182183
dists_ep = np.sqrt(
183184
np.sum((ep_f_norm[:, None] - ep_f_norm[None, :]) ** 2, axis=2)
184185
)
185186
dists_ep.sort(axis=1)
187+
# Use a combination of nearest and 2nd nearest for robustness
186188
sparsity = dists_ep[:, 1] + dists_ep[:, 2]
187189
top_sparse_idx = np.argsort(sparsity)[-n_add:]
188190

189-
# Density in current population
191+
# 2. Density in current population
190192
dists_pop = np.sqrt(
191193
np.sum((f_norm[:, None] - f_norm[None, :]) ** 2, axis=2)
192194
)
193195
dists_pop.sort(axis=1)
194196
pop_density = 1.0 / (dists_pop[:, 1] + 1e-10)
195-
to_remove = np.argsort(pop_density)[-n_add:]
196197

197-
for k, idx in enumerate(top_sparse_idx):
198-
f_sparse = ep_f_norm[idx] + 1e-10
199-
new_wv = (1.0 / f_sparse) / np.sum(1.0 / f_sparse)
198+
# Sort by density (highest first)
199+
potential_to_remove = np.argsort(pop_density)[::-1]
200+
201+
# 3. Filter to_remove: Avoid removing extreme weight vectors
202+
# (those close to axis boundaries like [1, 0] or [0, 1])
203+
to_remove = []
204+
for idx in potential_to_remove:
205+
# Check if it's an extreme weight vector (any component > 0.9)
206+
if np.any(weight_vectors[idx] > 0.9):
207+
continue
208+
to_remove.append(idx)
209+
if len(to_remove) >= n_add:
210+
break
211+
212+
# 4. Perform adjustment
213+
for k, sparse_idx in enumerate(top_sparse_idx):
214+
if k >= len(to_remove):
215+
break
216+
217+
f_sparse = ep_f_norm[sparse_idx]
218+
219+
# Generate new weight vector based on reference type
220+
if reference_type == "ideal":
221+
# Tchebycheff: lambda_i * f_i = const => lambda_i = 1/f_i
222+
new_wv = 1.0 / (f_sparse + 1e-6)
223+
else:
224+
# Nadir-based: (1 - f_i) / lambda_i = const => lambda_i = 1 - f_i
225+
new_wv = 1.0 - f_sparse + 1e-6
226+
227+
new_wv /= np.sum(new_wv)
200228

201229
replace_idx = to_remove[k]
202230
weight_vectors[replace_idx] = new_wv
203-
population[replace_idx] = ep_weights[idx]
204-
f_phys[replace_idx] = ep_f_phys[idx]
205-
f_norm[replace_idx] = ep_f_norm[idx]
231+
population[replace_idx] = ep_weights[sparse_idx]
232+
f_phys[replace_idx] = ep_f_phys[sparse_idx]
233+
f_norm[replace_idx] = ep_f_norm[sparse_idx]
206234

235+
# Re-calculate neighbors for all subproblems after weight changes
207236
neighbors = self._get_neighbors(weight_vectors, T)
208237

209238
if kwargs.get("record_history", False):

0 commit comments

Comments
 (0)