From f79c7f7e7720b51b652b0dc13aae9a38c61d5dde Mon Sep 17 00:00:00 2001 From: Archim J Date: Wed, 1 Apr 2026 19:17:31 -0400 Subject: [PATCH 1/3] set intermediate callback in problem constructor --- src/NLPModelsIpopt.jl | 87 ++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/src/NLPModelsIpopt.jl b/src/NLPModelsIpopt.jl index cd92290..1f2ae54 100644 --- a/src/NLPModelsIpopt.jl +++ b/src/NLPModelsIpopt.jl @@ -56,14 +56,51 @@ const ipopt_internal_statuses = Dict( Returns an `IpoptSolver` structure to solve the problem `nlp` with `ipopt`. """ -mutable struct IpoptSolver <: AbstractOptimizationSolver - problem::IpoptProblem +mutable struct IpoptSolver{F, G, GF, JG, H, I} <: AbstractOptimizationSolver + problem::IpoptProblem{F, G, GF, JG, H, I} end -function IpoptSolver(nlp::AbstractNLPModel) +function IpoptSolver( + nlp::AbstractNLPModel, + stats::GenericExecutionStats = GenericExecutionStats(nlp); + callback = (args...) -> true, +) @assert get_grad_available(nlp.meta) && (get_ncon(nlp.meta) == 0 || get_jac_available(nlp.meta)) eval_f, eval_g, eval_grad_f, eval_jac_g, eval_h = set_callbacks(nlp) + # Callback + function solver_callback( + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + args... + ) + set_residuals!(stats, inf_pr, inf_du) + set_iter!(stats, Int(iter_count)) + return callback( + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + args..., + ) + end + problem = CreateIpoptProblem( get_nvar(nlp.meta), get_lvar(nlp.meta), @@ -78,6 +115,7 @@ function IpoptSolver(nlp::AbstractNLPModel) eval_grad_f, eval_jac_g, eval_h, + solver_callback ) return IpoptSolver(problem) end @@ -187,9 +225,9 @@ solver = IpoptSolver(nlp); stats = solve!(solver, nlp, print_level = 0) ``` """ -function ipopt(nlp::AbstractNLPModel; kwargs...) - solver = IpoptSolver(nlp) +function ipopt(nlp::AbstractNLPModel; callback = (args...) -> true, kwargs...) stats = GenericExecutionStats(nlp) + solver = IpoptSolver(nlp, stats; callback = callback) return solve!(solver, nlp, stats; kwargs...) end @@ -212,9 +250,9 @@ nls = ADNLSModel(x -> [x[1] - 1, x[2] - 2], [0.0, 0.0], 2) stats = ipopt(nls, print_level = 0) ``` """ -function ipopt(ff_nls::FeasibilityFormNLS; kwargs...) - solver = IpoptSolver(ff_nls) +function ipopt(ff_nls::FeasibilityFormNLS; callback = (args...) -> true, kwargs...) stats = GenericExecutionStats(ff_nls) + solver = IpoptSolver(ff_nls, stats; callback = callback) stats = solve!(solver, ff_nls, stats; kwargs...) return stats @@ -242,7 +280,6 @@ function SolverCore.solve!( solver::IpoptSolver, nlp::AbstractNLPModel, stats::GenericExecutionStats; - callback = (args...) -> true, kwargs..., ) problem = solver.problem @@ -297,40 +334,6 @@ function SolverCore.solve!( AddIpoptNumOption(problem, "obj_scaling_factor", -1.0) end - # Callback - function solver_callback( - alg_mod, - iter_count, - obj_value, - inf_pr, - inf_du, - mu, - d_norm, - regularization_size, - alpha_du, - alpha_pr, - ls_trials, - args...; - stats = stats, - ) - set_residuals!(stats, inf_pr, inf_du) - set_iter!(stats, Int(iter_count)) - return callback( - alg_mod, - iter_count, - obj_value, - inf_pr, - inf_du, - mu, - d_norm, - regularization_size, - alpha_du, - alpha_pr, - ls_trials, - args..., - ) - end - SetIntermediateCallback(problem, solver_callback) real_time = time() status = IpoptSolve(problem) From d3d819bf6c41f8c052940a5a16fb54273931e0eb Mon Sep 17 00:00:00 2001 From: Archim J Date: Wed, 1 Apr 2026 20:34:52 -0400 Subject: [PATCH 2/3] changes --- src/NLPModelsIpopt.jl | 121 ++++++++++++++++++++++++------------------ 1 file changed, 70 insertions(+), 51 deletions(-) diff --git a/src/NLPModelsIpopt.jl b/src/NLPModelsIpopt.jl index 1f2ae54..514f849 100644 --- a/src/NLPModelsIpopt.jl +++ b/src/NLPModelsIpopt.jl @@ -60,63 +60,82 @@ mutable struct IpoptSolver{F, G, GF, JG, H, I} <: AbstractOptimizationSolver problem::IpoptProblem{F, G, GF, JG, H, I} end +function define_intermediate_callback(stats::GenericExecutionStats, callback) +end + function IpoptSolver( nlp::AbstractNLPModel, stats::GenericExecutionStats = GenericExecutionStats(nlp); - callback = (args...) -> true, + callback = nothing, ) @assert get_grad_available(nlp.meta) && (get_ncon(nlp.meta) == 0 || get_jac_available(nlp.meta)) eval_f, eval_g, eval_grad_f, eval_jac_g, eval_h = set_callbacks(nlp) - - # Callback - function solver_callback( - alg_mod, - iter_count, - obj_value, - inf_pr, - inf_du, - mu, - d_norm, - regularization_size, - alpha_du, - alpha_pr, - ls_trials, - args... - ) - set_residuals!(stats, inf_pr, inf_du) - set_iter!(stats, Int(iter_count)) - return callback( - alg_mod, - iter_count, - obj_value, - inf_pr, - inf_du, - mu, - d_norm, - regularization_size, - alpha_du, - alpha_pr, - ls_trials, - args..., - ) + if callback = nothing + problem = CreateIpoptProblem( + get_nvar(nlp.meta), + get_lvar(nlp.meta), + get_uvar(nlp.meta), + get_ncon(nlp.meta), + get_lcon(nlp.meta), + get_ucon(nlp.meta), + get_nnzj(nlp.meta), + get_nnzh(nlp.meta), + eval_f, + eval_g, + eval_grad_f, + eval_jac_g, + eval_h, + ) + else + function solver_callback( + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + args... + ) + set_residuals!(stats, inf_pr, inf_du) + set_iter!(stats, Int(iter_count)) + return callback( + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + args..., + ) + end + problem = CreateIpoptProblem( + get_nvar(nlp.meta), + get_lvar(nlp.meta), + get_uvar(nlp.meta), + get_ncon(nlp.meta), + get_lcon(nlp.meta), + get_ucon(nlp.meta), + get_nnzj(nlp.meta), + get_nnzh(nlp.meta), + eval_f, + eval_g, + eval_grad_f, + eval_jac_g, + eval_h, + solver_callback + ) end - problem = CreateIpoptProblem( - get_nvar(nlp.meta), - get_lvar(nlp.meta), - get_uvar(nlp.meta), - get_ncon(nlp.meta), - get_lcon(nlp.meta), - get_ucon(nlp.meta), - get_nnzj(nlp.meta), - get_nnzh(nlp.meta), - eval_f, - eval_g, - eval_grad_f, - eval_jac_g, - eval_h, - solver_callback - ) return IpoptSolver(problem) end @@ -225,7 +244,7 @@ solver = IpoptSolver(nlp); stats = solve!(solver, nlp, print_level = 0) ``` """ -function ipopt(nlp::AbstractNLPModel; callback = (args...) -> true, kwargs...) +function ipopt(nlp::AbstractNLPModel; callback = nothing, kwargs...) stats = GenericExecutionStats(nlp) solver = IpoptSolver(nlp, stats; callback = callback) return solve!(solver, nlp, stats; kwargs...) @@ -250,7 +269,7 @@ nls = ADNLSModel(x -> [x[1] - 1, x[2] - 2], [0.0, 0.0], 2) stats = ipopt(nls, print_level = 0) ``` """ -function ipopt(ff_nls::FeasibilityFormNLS; callback = (args...) -> true, kwargs...) +function ipopt(ff_nls::FeasibilityFormNLS; callback = nothing, kwargs...) stats = GenericExecutionStats(ff_nls) solver = IpoptSolver(ff_nls, stats; callback = callback) stats = solve!(solver, ff_nls, stats; kwargs...) From 8565589081d14e9a2a0daba197d90af7ebb0b140 Mon Sep 17 00:00:00 2001 From: Archim J Date: Thu, 2 Apr 2026 11:56:19 -0400 Subject: [PATCH 3/3] added optional wrapped_callback for backwards compatibility. (defaults to allocating intermediate callback) --- src/NLPModelsIpopt.jl | 103 ++++++++++++++++++++++++++---------------- test/runtests.jl | 2 +- 2 files changed, 65 insertions(+), 40 deletions(-) diff --git a/src/NLPModelsIpopt.jl b/src/NLPModelsIpopt.jl index 514f849..7e233f4 100644 --- a/src/NLPModelsIpopt.jl +++ b/src/NLPModelsIpopt.jl @@ -58,19 +58,56 @@ Returns an `IpoptSolver` structure to solve the problem `nlp` with `ipopt`. """ mutable struct IpoptSolver{F, G, GF, JG, H, I} <: AbstractOptimizationSolver problem::IpoptProblem{F, G, GF, JG, H, I} + set_callback::Bool + wrapped_callback::Bool end -function define_intermediate_callback(stats::GenericExecutionStats, callback) +function create_solver_callback(callback, stats) + function solver_callback( + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + args... + ) + set_residuals!(stats, inf_pr, inf_du) + set_iter!(stats, Int(iter_count)) + return callback( + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + args..., + ) + end end function IpoptSolver( nlp::AbstractNLPModel, stats::GenericExecutionStats = GenericExecutionStats(nlp); callback = nothing, + wrapped_callback=true ) @assert get_grad_available(nlp.meta) && (get_ncon(nlp.meta) == 0 || get_jac_available(nlp.meta)) eval_f, eval_g, eval_grad_f, eval_jac_g, eval_h = set_callbacks(nlp) - if callback = nothing + if callback !== nothing + callback = create_solver_callback(callback, stats) + end + if wrapped_callback problem = CreateIpoptProblem( get_nvar(nlp.meta), get_lvar(nlp.meta), @@ -86,37 +123,12 @@ function IpoptSolver( eval_jac_g, eval_h, ) + if callback !== nothing + SetIntermediateCallback(problem, callback) + end else - function solver_callback( - alg_mod, - iter_count, - obj_value, - inf_pr, - inf_du, - mu, - d_norm, - regularization_size, - alpha_du, - alpha_pr, - ls_trials, - args... - ) - set_residuals!(stats, inf_pr, inf_du) - set_iter!(stats, Int(iter_count)) - return callback( - alg_mod, - iter_count, - obj_value, - inf_pr, - inf_du, - mu, - d_norm, - regularization_size, - alpha_du, - alpha_pr, - ls_trials, - args..., - ) + if callback === nothing + callback = create_solver_callback((args...)->true, stats) end problem = CreateIpoptProblem( get_nvar(nlp.meta), @@ -132,11 +144,11 @@ function IpoptSolver( eval_grad_f, eval_jac_g, eval_h, - solver_callback + callback ) end - return IpoptSolver(problem) + return IpoptSolver(problem, callback !== nothing, wrapped_callback) end """ @@ -160,7 +172,11 @@ function SolverCore.reset!(solver::IpoptSolver, nlp::AbstractNLPModel) problem.eval_grad_f = eval_grad_f problem.eval_jac_g = eval_jac_g problem.eval_h = eval_h - problem.intermediate = nothing + if !solver.wrapped_callback + @warn "when using unwrapped callback, reset! does not reset the callback" + else + problem.intermediate.f = (args...) -> Cint(1) + end # TODO: reset problem.ipopt_problem return problem @@ -171,7 +187,11 @@ function SolverCore.reset!(solver::IpoptSolver) problem.obj_val = Inf problem.status = -1 # Use -1 to indicate not solved yet - problem.intermediate = nothing + if !solver.wrapped_callback + @warn "when using unwrapped callback, reset! does not reset the callback" + else + problem.intermediate.f = (args...) -> Cint(1) + end return solver end @@ -244,9 +264,9 @@ solver = IpoptSolver(nlp); stats = solve!(solver, nlp, print_level = 0) ``` """ -function ipopt(nlp::AbstractNLPModel; callback = nothing, kwargs...) +function ipopt(nlp::AbstractNLPModel; callback = nothing, wrapped_callback=false, kwargs...) stats = GenericExecutionStats(nlp) - solver = IpoptSolver(nlp, stats; callback = callback) + solver = IpoptSolver(nlp, stats; callback = callback, wrapped_callback=wrapped_callback) return solve!(solver, nlp, stats; kwargs...) end @@ -271,7 +291,7 @@ stats = ipopt(nls, print_level = 0) """ function ipopt(ff_nls::FeasibilityFormNLS; callback = nothing, kwargs...) stats = GenericExecutionStats(ff_nls) - solver = IpoptSolver(ff_nls, stats; callback = callback) + solver = IpoptSolver(ff_nls, stats; callback = callback, wrapped_callback=false) stats = solve!(solver, ff_nls, stats; kwargs...) return stats @@ -299,12 +319,17 @@ function SolverCore.solve!( solver::IpoptSolver, nlp::AbstractNLPModel, stats::GenericExecutionStats; + callback=(args...) -> Cint(1), kwargs..., ) problem = solver.problem SolverCore.reset!(stats) kwargs = Dict(kwargs) + if !solver.set_callback + SetIntermediateCallback(problem, create_solver_callback(callback, stats)) + end + # Use L-BFGS if the sparse hessian of the Lagrangian is not available if !get_hess_available(nlp.meta) AddIpoptStrOption(problem, "hessian_approximation", "limited-memory") diff --git a/test/runtests.jl b/test/runtests.jl index 50d993c..c24f442 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -57,7 +57,7 @@ end return iter_count < 1 end nlp = ADNLPModel(x -> (x[1] - 1)^2 + 100 * (x[2] - x[1]^2)^2, [-1.2; 1.0]) - stats = ipopt(nlp, tol = 1e-12, callback = callback, print_level = 0) + stats = ipopt(nlp, tol = 1e-12, callback = callback, wrapped_callback=false, print_level = 0) @test stats.status == :user @test stats.solver_specific[:internal_msg] == :User_Requested_Stop @test stats.iter == 1