Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
167 changes: 107 additions & 60 deletions src/NLPModelsIpopt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,30 +56,99 @@ const ipopt_internal_statuses = Dict(

Returns an `IpoptSolver` structure to solve the problem `nlp` with `ipopt`.
"""
mutable struct IpoptSolver <: AbstractOptimizationSolver
problem::IpoptProblem
mutable struct IpoptSolver{F, G, GF, JG, H, I} <: AbstractOptimizationSolver
problem::IpoptProblem{F, G, GF, JG, H, I}
set_callback::Bool
wrapped_callback::Bool
end

function IpoptSolver(nlp::AbstractNLPModel)
function create_solver_callback(callback, stats)
function solver_callback(
alg_mod,
iter_count,
obj_value,
inf_pr,
inf_du,
mu,
d_norm,
regularization_size,
alpha_du,
alpha_pr,
ls_trials,
args...
)
set_residuals!(stats, inf_pr, inf_du)
set_iter!(stats, Int(iter_count))
return callback(
alg_mod,
iter_count,
obj_value,
inf_pr,
inf_du,
mu,
d_norm,
regularization_size,
alpha_du,
alpha_pr,
ls_trials,
args...,
)
end
end

function IpoptSolver(
nlp::AbstractNLPModel,
stats::GenericExecutionStats = GenericExecutionStats(nlp);
callback = nothing,
wrapped_callback=true
)
@assert get_grad_available(nlp.meta) && (get_ncon(nlp.meta) == 0 || get_jac_available(nlp.meta))
eval_f, eval_g, eval_grad_f, eval_jac_g, eval_h = set_callbacks(nlp)
if callback !== nothing
callback = create_solver_callback(callback, stats)
end
if wrapped_callback
problem = CreateIpoptProblem(
get_nvar(nlp.meta),
get_lvar(nlp.meta),
get_uvar(nlp.meta),
get_ncon(nlp.meta),
get_lcon(nlp.meta),
get_ucon(nlp.meta),
get_nnzj(nlp.meta),
get_nnzh(nlp.meta),
eval_f,
eval_g,
eval_grad_f,
eval_jac_g,
eval_h,
)
if callback !== nothing
SetIntermediateCallback(problem, callback)
end
else
if callback === nothing
callback = create_solver_callback((args...)->true, stats)
end
problem = CreateIpoptProblem(
get_nvar(nlp.meta),
get_lvar(nlp.meta),
get_uvar(nlp.meta),
get_ncon(nlp.meta),
get_lcon(nlp.meta),
get_ucon(nlp.meta),
get_nnzj(nlp.meta),
get_nnzh(nlp.meta),
eval_f,
eval_g,
eval_grad_f,
eval_jac_g,
eval_h,
callback
)
end

problem = CreateIpoptProblem(
get_nvar(nlp.meta),
get_lvar(nlp.meta),
get_uvar(nlp.meta),
get_ncon(nlp.meta),
get_lcon(nlp.meta),
get_ucon(nlp.meta),
get_nnzj(nlp.meta),
get_nnzh(nlp.meta),
eval_f,
eval_g,
eval_grad_f,
eval_jac_g,
eval_h,
)
return IpoptSolver(problem)
return IpoptSolver(problem, callback !== nothing, wrapped_callback)
end

"""
Expand All @@ -103,7 +172,11 @@ function SolverCore.reset!(solver::IpoptSolver, nlp::AbstractNLPModel)
problem.eval_grad_f = eval_grad_f
problem.eval_jac_g = eval_jac_g
problem.eval_h = eval_h
problem.intermediate = nothing
if !solver.wrapped_callback
@warn "when using unwrapped callback, reset! does not reset the callback"
else
problem.intermediate.f = (args...) -> Cint(1)
end

# TODO: reset problem.ipopt_problem
return problem
Expand All @@ -114,7 +187,11 @@ function SolverCore.reset!(solver::IpoptSolver)

problem.obj_val = Inf
problem.status = -1 # Use -1 to indicate not solved yet
problem.intermediate = nothing
if !solver.wrapped_callback
@warn "when using unwrapped callback, reset! does not reset the callback"
else
problem.intermediate.f = (args...) -> Cint(1)
end

return solver
end
Expand Down Expand Up @@ -187,9 +264,9 @@ solver = IpoptSolver(nlp);
stats = solve!(solver, nlp, print_level = 0)
```
"""
function ipopt(nlp::AbstractNLPModel; kwargs...)
solver = IpoptSolver(nlp)
function ipopt(nlp::AbstractNLPModel; callback = nothing, wrapped_callback=false, kwargs...)
stats = GenericExecutionStats(nlp)
solver = IpoptSolver(nlp, stats; callback = callback, wrapped_callback=wrapped_callback)
return solve!(solver, nlp, stats; kwargs...)
end

Expand All @@ -212,9 +289,9 @@ nls = ADNLSModel(x -> [x[1] - 1, x[2] - 2], [0.0, 0.0], 2)
stats = ipopt(nls, print_level = 0)
```
"""
function ipopt(ff_nls::FeasibilityFormNLS; kwargs...)
solver = IpoptSolver(ff_nls)
function ipopt(ff_nls::FeasibilityFormNLS; callback = nothing, kwargs...)
stats = GenericExecutionStats(ff_nls)
solver = IpoptSolver(ff_nls, stats; callback = callback, wrapped_callback=false)
stats = solve!(solver, ff_nls, stats; kwargs...)

return stats
Expand Down Expand Up @@ -242,13 +319,17 @@ function SolverCore.solve!(
solver::IpoptSolver,
nlp::AbstractNLPModel,
stats::GenericExecutionStats;
callback = (args...) -> true,
callback=(args...) -> Cint(1),
kwargs...,
)
problem = solver.problem
SolverCore.reset!(stats)
kwargs = Dict(kwargs)

if !solver.set_callback
SetIntermediateCallback(problem, create_solver_callback(callback, stats))
end

# Use L-BFGS if the sparse hessian of the Lagrangian is not available
if !get_hess_available(nlp.meta)
AddIpoptStrOption(problem, "hessian_approximation", "limited-memory")
Expand Down Expand Up @@ -297,40 +378,6 @@ function SolverCore.solve!(
AddIpoptNumOption(problem, "obj_scaling_factor", -1.0)
end

# Callback
function solver_callback(
alg_mod,
iter_count,
obj_value,
inf_pr,
inf_du,
mu,
d_norm,
regularization_size,
alpha_du,
alpha_pr,
ls_trials,
args...;
stats = stats,
)
set_residuals!(stats, inf_pr, inf_du)
set_iter!(stats, Int(iter_count))
return callback(
alg_mod,
iter_count,
obj_value,
inf_pr,
inf_du,
mu,
d_norm,
regularization_size,
alpha_du,
alpha_pr,
ls_trials,
args...,
)
end
SetIntermediateCallback(problem, solver_callback)

real_time = time()
status = IpoptSolve(problem)
Expand Down
2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ end
return iter_count < 1
end
nlp = ADNLPModel(x -> (x[1] - 1)^2 + 100 * (x[2] - x[1]^2)^2, [-1.2; 1.0])
stats = ipopt(nlp, tol = 1e-12, callback = callback, print_level = 0)
stats = ipopt(nlp, tol = 1e-12, callback = callback, wrapped_callback=false, print_level = 0)
@test stats.status == :user
@test stats.solver_specific[:internal_msg] == :User_Requested_Stop
@test stats.iter == 1
Expand Down
Loading