Skip to content

Commit 86ab229

Browse files
authored
Fix objective sensitivity when parameters appear in the objective (#340)
* fix parametric obj * format * split fd test * add math * typo
1 parent 97c3629 commit 86ab229

File tree

2 files changed

+44
-3
lines changed

2 files changed

+44
-3
lines changed

src/NonLinearProgram/nlp_utilities.jl

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -489,9 +489,16 @@ function _compute_sensitivity(model::Model; tol = 1e-6)
489489
# Dual bounds upper
490490
∂s[((num_w+num_cons+num_lower+1):end), :] *= -_sense_multiplier
491491

492-
# dual wrt parameter
492+
grad = _compute_gradient(model)
493+
# `grad` = [∇ₓf(x,p); ∇ₚf(x,p)] where `x` is the primal vars, `p` is the params,
494+
# and `f(x,p)` is the objective function. we extract the components
495+
# so we can form `∇ₚfᵒ(x,p) = ∇ₓf(x,p) * ∇ₚxᵒ(p) + ∇ₚf(x,p) * ∇ₚpᵒ(p)`
496+
# where `ᵒ` denotes "optimal". note that parameters are fixed, so
497+
# pᵒ(p) = p and ∇ₚpᵒ(p) = 𝐈ₚ.
493498
primal_idx = [i.value for i in model.cache.primal_vars]
494-
df_dx = _compute_gradient(model)[primal_idx]
495-
df_dp = df_dx'∂s[1:num_vars, :]
499+
params_idx = [i.value for i in model.cache.params]
500+
df_dx = grad[primal_idx] # ∇ₓf(x,p)
501+
df_dp_direct = grad[params_idx] # ∇ₚf(x,p)
502+
df_dp = df_dx'∂s[1:num_vars, :] + df_dp_direct' # ∇ₚfᵒ(x,p) = ∇ₓf(x,p) * ∇ₚxᵒ(p) + ∇ₚf(x,p) * 𝐈ₚ
496503
return ∂s, df_dp
497504
end

test/nlp_program.jl

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -740,6 +740,40 @@ function test_ObjectiveSensitivity_model2()
740740
@test isapprox(dp, -1.5; atol = 1e-4)
741741
end
742742

743+
function test_ObjectiveSensitivity_direct_param_contrib()
744+
model = DiffOpt.nonlinear_diff_model(Ipopt.Optimizer)
745+
set_silent(model)
746+
747+
p_val = 3.0
748+
@variable(model, p MOI.Parameter(p_val))
749+
@variable(model, x 1)
750+
@objective(model, Min, p^2 * x^2)
751+
752+
optimize!(model)
753+
@assert is_solved_and_feasible(model)
754+
755+
Δp = 0.1
756+
DiffOpt.set_forward_parameter(model, p, Δp)
757+
DiffOpt.forward_differentiate!(model)
758+
759+
df_dp = MOI.get(model, DiffOpt.ForwardObjectiveSensitivity())
760+
@test isapprox(df_dp, 2 * p_val * Δp, atol = 1e-8) # ≈ 0.6 for p=3
761+
762+
ε = 1e-6
763+
df_dp_fdpos = begin
764+
set_parameter_value(p, p_val + ε)
765+
optimize!(model)
766+
Δp * objective_value(model)
767+
end
768+
df_dp_fdneg = begin
769+
set_parameter_value(p, p_val - ε)
770+
optimize!(model)
771+
Δp * objective_value(model)
772+
end
773+
df_dp_fd = (df_dp_fdpos - df_dp_fdneg) / (2ε)
774+
775+
@test isapprox(df_dp, df_dp_fd, atol = 1e-4)
776+
end
743777
function test_ObjectiveSensitivity_subset_parameters()
744778
# Model with 10 parameters, differentiate only w.r.t. 3rd and 7th
745779
model = Model(() -> DiffOpt.diff_optimizer(Ipopt.Optimizer))

0 commit comments

Comments
 (0)