diff --git a/src/NLPModels.jl b/src/NLPModels.jl index 17299f9b..5dcc6572 100644 --- a/src/NLPModels.jl +++ b/src/NLPModels.jl @@ -37,7 +37,7 @@ Base type for a nonlinear least-squares model. """ abstract type AbstractNLSModel{T, S} <: AbstractNLPModel{T, S} end -for f in ["utils", "api", "counters", "meta", "show", "tools"] +for f in ["utils", "api", "defaults", "counters", "meta", "show", "tools"] include("nlp/$f.jl") include("nls/$f.jl") end diff --git a/src/nlp/api.jl b/src/nlp/api.jl index 22863b45..79bcb17a 100644 --- a/src/nlp/api.jl +++ b/src/nlp/api.jl @@ -19,6 +19,14 @@ export varscale, lagscale, conscale f = obj(nlp, x) Evaluate ``f(x)``, the objective function of `nlp` at `x`. + +For `nls::AbstractNLSModel`, the objective is defined as ``f(x) = \\frac{1}{2}\\|F(x)\\|^2`` +where ``F(x)`` is the residual. Additional signatures for NLS models: + + f = obj(nls, x, Fx; recompute::Bool=true) + +where `Fx` is overwritten with the value of the residual ``F(x)``. +If `recompute` is `true`, then `Fx` is updated with the residual at `x`. """ function obj end @@ -27,16 +35,19 @@ function obj end Evaluate ``∇f(x)``, the gradient of the objective function at `x`. """ -function grad(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x - g = S(undef, nlp.meta.nvar) - return grad!(nlp, x, g) -end +function grad end """ g = grad!(nlp, x, g) Evaluate ``∇f(x)``, the gradient of the objective function at `x` in place. + +For `nls::AbstractNLSModel`, additional signature: + + g = grad!(nls, x, g, Fx; recompute::Bool=true) + +where `Fx` is overwritten with the value of the residual ``F(x)``. +If `recompute` is `true`, then `Fx` is updated with the residual at `x`. """ function grad! end @@ -45,48 +56,21 @@ function grad! end Evaluate ``c(x)``, the constraints at `x`. """ -function cons(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x - c = S(undef, nlp.meta.ncon) - return cons!(nlp, x, c) -end +function cons end """ c = cons!(nlp, x, c) Evaluate ``c(x)``, the constraints at `x` in place. """ -function cons!(nlp::AbstractNLPModel, x::AbstractVector, cx::AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.ncon cx - increment!(nlp, :neval_cons) - if nlp.meta.nlin > 0 - if nlp.meta.nnln == 0 - cons_lin!(nlp, x, cx) - else - cons_lin!(nlp, x, view(cx, nlp.meta.lin)) - end - end - if nlp.meta.nnln > 0 - if nlp.meta.nlin == 0 - cons_nln!(nlp, x, cx) - else - cons_nln!(nlp, x, view(cx, nlp.meta.nln)) - end - end - return cx -end +function cons! end """ c = cons_lin(nlp, x) Evaluate the linear constraints at `x`. """ -function cons_lin(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x - c = S(undef, nlp.meta.nlin) - return cons_lin!(nlp, x, c) -end +function cons_lin end """ c = cons_lin!(nlp, x, c) @@ -100,11 +84,7 @@ function cons_lin! end Evaluate the nonlinear constraints at `x`. """ -function cons_nln(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x - c = S(undef, nlp.meta.nnln) - return cons_nln!(nlp, x, c) -end +function cons_nln end """ c = cons_nln!(nlp, x, c) @@ -115,11 +95,7 @@ function cons_nln! end function jth_con end -function jth_congrad(nlp::AbstractNLPModel{T, S}, x::AbstractVector, j::Integer) where {T, S} - @lencheck nlp.meta.nvar x - g = S(undef, nlp.meta.nvar) - return jth_congrad!(nlp, x, j, g) -end +function jth_congrad end function jth_congrad! end @@ -130,106 +106,64 @@ function jth_sparse_congrad end Evaluate ``f(x)`` and ``c(x)`` at `x`. """ -function objcons(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x - c = S(undef, nlp.meta.ncon) - return objcons!(nlp, x, c) -end +function objcons end """ f, c = objcons!(nlp, x, c) Evaluate ``f(x)`` and ``c(x)`` at `x`. `c` is overwritten with the value of ``c(x)``. + +For `nls::AbstractNLSModel`, additional signature: + + f, c = objcons!(nls, x, c, Fx; recompute::Bool=true) + +where `Fx` is overwritten with the value of the residual ``F(x)``. +If `recompute` is `true`, then `Fx` is updated with the residual at `x`. """ -function objcons!(nlp::AbstractNLPModel, x::AbstractVector, c::AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.ncon c - f = obj(nlp, x) - cons!(nlp, x, c) - return f, c -end +function objcons! end """ f, g = objgrad(nlp, x) Evaluate ``f(x)`` and ``∇f(x)`` at `x`. """ -function objgrad(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nlp.meta.nvar x - g = S(undef, nlp.meta.nvar) - return objgrad!(nlp, x, g) -end +function objgrad end """ f, g = objgrad!(nlp, x, g) Evaluate ``f(x)`` and ``∇f(x)`` at `x`. `g` is overwritten with the value of ``∇f(x)``. + +For `nls::AbstractNLSModel`, additional signature: + + f, g = objgrad!(nls, x, g, Fx; recompute::Bool=true) + +where `Fx` is overwritten with the value of the residual ``F(x)``. +If `recompute` is `true`, then `Fx` is updated with the residual at `x`. """ -function objgrad!(nlp::AbstractNLPModel, x::AbstractVector, g::AbstractVector) - @lencheck nlp.meta.nvar x g - f = obj(nlp, x) - grad!(nlp, x, g) - return f, g -end +function objgrad! end """ (rows,cols) = jac_structure(nlp) Return the structure of the constraints Jacobian in sparse coordinate format. """ -function jac_structure(nlp::AbstractNLPModel) - rows = Vector{Int}(undef, nlp.meta.nnzj) - cols = Vector{Int}(undef, nlp.meta.nnzj) - jac_structure!(nlp, rows, cols) -end +function jac_structure end """ jac_structure!(nlp, rows, cols) Return the structure of the constraints Jacobian in sparse coordinate format in place. """ -function jac_structure!( - nlp::AbstractNLPModel, - rows::AbstractVector{T}, - cols::AbstractVector{T}, -) where {T} - @lencheck nlp.meta.nnzj rows cols - lin_ind = 1:(nlp.meta.lin_nnzj) - if nlp.meta.nlin > 0 - if nlp.meta.nnln == 0 - jac_lin_structure!(nlp, rows, cols) - else - jac_lin_structure!(nlp, view(rows, lin_ind), view(cols, lin_ind)) - for i in lin_ind - rows[i] += count(x < nlp.meta.lin[rows[i]] for x in nlp.meta.nln) - end - end - end - if nlp.meta.nnln > 0 - if nlp.meta.nlin == 0 - jac_nln_structure!(nlp, rows, cols) - else - nln_ind = (nlp.meta.lin_nnzj + 1):(nlp.meta.lin_nnzj + nlp.meta.nln_nnzj) - jac_nln_structure!(nlp, view(rows, nln_ind), view(cols, nln_ind)) - for i in nln_ind - rows[i] += count(x < nlp.meta.nln[rows[i]] for x in nlp.meta.lin) - end - end - end - return rows, cols -end +function jac_structure! end """ (rows,cols) = jac_lin_structure(nlp) Return the structure of the linear constraints Jacobian in sparse coordinate format. """ -function jac_lin_structure(nlp::AbstractNLPModel) - rows = Vector{Int}(undef, nlp.meta.lin_nnzj) - cols = Vector{Int}(undef, nlp.meta.lin_nnzj) - jac_lin_structure!(nlp, rows, cols) -end +function jac_lin_structure end """ jac_lin_structure!(nlp, rows, cols) @@ -243,11 +177,7 @@ function jac_lin_structure! end Return the structure of the nonlinear constraints Jacobian in sparse coordinate format. """ -function jac_nln_structure(nlp::AbstractNLPModel) - rows = Vector{Int}(undef, nlp.meta.nln_nnzj) - cols = Vector{Int}(undef, nlp.meta.nln_nnzj) - jac_nln_structure!(nlp, rows, cols) -end +function jac_nln_structure end """ jac_nln_structure!(nlp, rows, cols) @@ -263,26 +193,6 @@ Evaluate ``J(x)``, the constraints Jacobian at `x` in sparse coordinate format, rewriting `vals`. """ function jac_coord!(nlp::AbstractNLPModel, x::AbstractVector, vals::AbstractVector) - @lencheck nlp.meta.nvar x - @lencheck nlp.meta.nnzj vals - increment!(nlp, :neval_jac) - if nlp.meta.nlin > 0 - if nlp.meta.nnln == 0 - jac_lin_coord!(nlp, vals) - else - lin_ind = 1:(nlp.meta.lin_nnzj) - jac_lin_coord!(nlp, view(vals, lin_ind)) - end - end - if nlp.meta.nnln > 0 - if nlp.meta.nlin == 0 - jac_nln_coord!(nlp, x, vals) - else - nln_ind = (nlp.meta.lin_nnzj + 1):(nlp.meta.lin_nnzj + nlp.meta.nln_nnzj) - jac_nln_coord!(nlp, x, view(vals, nln_ind)) - end - end - return vals end """ @@ -575,11 +485,6 @@ function jtprod!( v::AbstractVector, Jtv::AbstractVector, ) - @lencheck nlp.meta.nnzj rows cols vals - @lencheck nlp.meta.ncon v - @lencheck nlp.meta.nvar Jtv - increment!(nlp, :neval_jtprod) - coo_prod!(cols, rows, vals, v, Jtv) end """ @@ -625,11 +530,6 @@ function jtprod_lin!( v::AbstractVector, Jtv::AbstractVector, ) - @lencheck nlp.meta.lin_nnzj rows cols vals - @lencheck nlp.meta.nlin v - @lencheck nlp.meta.nvar Jtv - increment!(nlp, :neval_jtprod_lin) - coo_prod!(cols, rows, vals, v, Jtv) end """ @@ -665,11 +565,6 @@ function jtprod_nln!( v::AbstractVector, Jtv::AbstractVector, ) - @lencheck nlp.meta.nln_nnzj rows cols vals - @lencheck nlp.meta.nnln v - @lencheck nlp.meta.nvar Jtv - increment!(nlp, :neval_jtprod_nln) - coo_prod!(cols, rows, vals, v, Jtv) end """ @@ -737,29 +632,7 @@ function jac_op!( vals::AbstractVector{T}, Jv::AbstractVector, Jtv::AbstractVector, -) where {T, S} - @lencheck nlp.meta.nnzj rows cols vals - @lencheck nlp.meta.ncon Jv - @lencheck nlp.meta.nvar Jtv - prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res - jprod!(nlp, rows, cols, vals, v, Jv) - if β == 0 - res .= α .* Jv - else - res .= α .* Jv .+ β .* res - end - return res - end - ctprod! = @closure (res, v, α, β) -> begin - jtprod!(nlp, rows, cols, vals, v, Jtv) - if β == 0 - res .= α .* Jtv - else - res .= α .* Jtv .+ β .* res - end - return res - end - return LinearOperator{T}(nlp.meta.ncon, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) + ) where {T, S} end """ @@ -789,28 +662,7 @@ function jac_lin_op!( nlp::AbstractNLPModel{T, S}, Jv::AbstractVector, Jtv::AbstractVector, -) where {T, S} - @lencheck nlp.meta.nlin Jv - @lencheck nlp.meta.nvar Jtv - prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res - jprod_lin!(nlp, v, Jv) - if β == 0 - res .= α .* Jv - else - res .= α .* Jv .+ β .* res - end - return res - end - ctprod! = @closure (res, v, α, β) -> begin - jtprod_lin!(nlp, v, Jtv) - if β == 0 - res .= α .* Jtv - else - res .= α .* Jtv .+ β .* res - end - return res - end - return LinearOperator{T}(nlp.meta.nlin, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) + ) where {T, S} end @deprecate jac_lin_op!( @@ -834,29 +686,7 @@ function jac_lin_op!( vals::AbstractVector{T}, Jv::AbstractVector, Jtv::AbstractVector, -) where {T, S} - @lencheck nlp.meta.lin_nnzj rows cols vals - @lencheck nlp.meta.nlin Jv - @lencheck nlp.meta.nvar Jtv - prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res - jprod_lin!(nlp, rows, cols, vals, v, Jv) - if β == 0 - res .= α .* Jv - else - res .= α .* Jv .+ β .* res - end - return res - end - ctprod! = @closure (res, v, α, β) -> begin - jtprod_lin!(nlp, rows, cols, vals, v, Jtv) - if β == 0 - res .= α .* Jtv - else - res .= α .* Jtv .+ β .* res - end - return res - end - return LinearOperator{T}(nlp.meta.nlin, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) + ) where {T, S} end """ @@ -886,28 +716,7 @@ function jac_nln_op!( x::AbstractVector{T}, Jv::AbstractVector, Jtv::AbstractVector, -) where {T, S} - @lencheck nlp.meta.nvar x Jtv - @lencheck nlp.meta.nnln Jv - prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res - jprod_nln!(nlp, x, v, Jv) - if β == 0 - res .= α .* Jv - else - res .= α .* Jv .+ β .* res - end - return res - end - ctprod! = @closure (res, v, α, β) -> begin - jtprod_nln!(nlp, x, v, Jtv) - if β == 0 - res .= α .* Jtv - else - res .= α .* Jtv .+ β .* res - end - return res - end - return LinearOperator{T}(nlp.meta.nnln, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) + ) where {T, S} end """ @@ -924,29 +733,7 @@ function jac_nln_op!( vals::AbstractVector{T}, Jv::AbstractVector, Jtv::AbstractVector, -) where {T, S} - @lencheck nlp.meta.nln_nnzj rows cols vals - @lencheck nlp.meta.nnln Jv - @lencheck nlp.meta.nvar Jtv - prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res - jprod_nln!(nlp, rows, cols, vals, v, Jv) - if β == 0 - res .= α .* Jv - else - res .= α .* Jv .+ β .* res - end - return res - end - ctprod! = @closure (res, v, α, β) -> begin - jtprod_nln!(nlp, rows, cols, vals, v, Jtv) - if β == 0 - res .= α .* Jtv - else - res .= α .* Jtv .+ β .* res - end - return res - end - return LinearOperator{T}(nlp.meta.nnln, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) + ) where {T, S} end """ @@ -1234,10 +1021,6 @@ function hprod!( v::AbstractVector, Hv::AbstractVector, ) - @lencheck nlp.meta.nnzh rows cols vals - @lencheck nlp.meta.nvar v Hv - increment!(nlp, :neval_hprod) - coo_sym_prod!(cols, rows, vals, v, Hv) end """ @@ -1303,17 +1086,6 @@ function hess_op!( Hv::AbstractVector; obj_weight::Real = one(T), ) where {T, S} - @lencheck nlp.meta.nvar x Hv - prod! = @closure (res, v, α, β) -> begin - hprod!(nlp, x, v, Hv; obj_weight = obj_weight) - if β == 0 - res .= α .* Hv - else - res .= α .* Hv .+ β .* res - end - return res - end - return LinearOperator{T}(nlp.meta.nvar, nlp.meta.nvar, true, true, prod!, prod!, prod!) end """ @@ -1333,18 +1105,6 @@ function hess_op!( vals::AbstractVector, Hv::AbstractVector, ) where {T, S} - @lencheck nlp.meta.nnzh rows cols vals - @lencheck nlp.meta.nvar Hv - prod! = @closure (res, v, α, β) -> begin - hprod!(nlp, rows, cols, vals, v, Hv) - if β == 0 - res .= α .* Hv - else - res .= α .* Hv .+ β .* res - end - return res - end - return LinearOperator{T}(nlp.meta.nvar, nlp.meta.nvar, true, true, prod!, prod!, prod!) end """ @@ -1364,18 +1124,6 @@ function hess_op!( Hv::AbstractVector; obj_weight::Real = one(T), ) where {T, S} - @lencheck nlp.meta.nvar x Hv - @lencheck nlp.meta.ncon y - prod! = @closure (res, v, α, β) -> begin - hprod!(nlp, x, y, v, Hv; obj_weight = obj_weight) - if β == 0 - res .= α .* Hv - else - res .= α .* Hv .+ β .* res - end - return res - end - return LinearOperator{T}(nlp.meta.nvar, nlp.meta.nvar, true, true, prod!, prod!, prod!) end """ diff --git a/src/nlp/defaults.jl b/src/nlp/defaults.jl new file mode 100644 index 00000000..e569c775 --- /dev/null +++ b/src/nlp/defaults.jl @@ -0,0 +1,623 @@ +# Default implementations for NLPModel API functions +function grad(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + g = S(undef, nlp.meta.nvar) + return grad!(nlp, x, g) +end + +function cons(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + c = S(undef, nlp.meta.ncon) + return cons!(nlp, x, c) +end + +function cons!(nlp::AbstractNLPModel, x::AbstractVector, cx::AbstractVector) + @lencheck nlp.meta.nvar x + @lencheck nlp.meta.ncon cx + increment!(nlp, :neval_cons) + if nlp.meta.nlin > 0 + if nlp.meta.nnln == 0 + cons_lin!(nlp, x, cx) + else + cons_lin!(nlp, x, view(cx, nlp.meta.lin)) + end + end + if nlp.meta.nnln > 0 + if nlp.meta.nlin == 0 + cons_nln!(nlp, x, cx) + else + cons_nln!(nlp, x, view(cx, nlp.meta.nln)) + end + end + return cx +end + +function cons_lin(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + c = S(undef, nlp.meta.nlin) + return cons_lin!(nlp, x, c) +end + +function cons_nln(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + c = S(undef, nlp.meta.nnln) + return cons_nln!(nlp, x, c) +end + +function jth_congrad(nlp::AbstractNLPModel{T, S}, x::AbstractVector, j::Integer) where {T, S} + @lencheck nlp.meta.nvar x + g = S(undef, nlp.meta.nvar) + return jth_congrad!(nlp, x, j, g) +end + +function objcons(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + c = S(undef, nlp.meta.ncon) + return objcons!(nlp, x, c) +end + +function objcons!(nlp::AbstractNLPModel, x::AbstractVector, c::AbstractVector) + @lencheck nlp.meta.nvar x + @lencheck nlp.meta.ncon c + f = obj(nlp, x) + cons!(nlp, x, c) + return f, c +end + +function objgrad(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + g = S(undef, nlp.meta.nvar) + return objgrad!(nlp, x, g) +end + +function objgrad!(nlp::AbstractNLPModel, x::AbstractVector, g::AbstractVector) + @lencheck nlp.meta.nvar x g + f = obj(nlp, x) + grad!(nlp, x, g) + return f, g +end + +function jac_structure(nlp::AbstractNLPModel) + rows = Vector{Int}(undef, nlp.meta.nnzj) + cols = Vector{Int}(undef, nlp.meta.nnzj) + jac_structure!(nlp, rows, cols) +end + +function jac_structure!( + nlp::AbstractNLPModel, + rows::AbstractVector{T}, + cols::AbstractVector{T}, +) where {T} + @lencheck nlp.meta.nnzj rows cols + lin_ind = 1:(nlp.meta.lin_nnzj) + if nlp.meta.nlin > 0 + if nlp.meta.nnln == 0 + jac_lin_structure!(nlp, rows, cols) + else + jac_lin_structure!(nlp, view(rows, lin_ind), view(cols, lin_ind)) + for i in lin_ind + rows[i] += count(x < nlp.meta.lin[rows[i]] for x in nlp.meta.nln) + end + end + end + if nlp.meta.nnln > 0 + if nlp.meta.nlin == 0 + jac_nln_structure!(nlp, rows, cols) + else + nln_ind = (nlp.meta.lin_nnzj + 1):(nlp.meta.lin_nnzj + nlp.meta.nln_nnzj) + jac_nln_structure!(nlp, view(rows, nln_ind), view(cols, nln_ind)) + for i in nln_ind + rows[i] += count(x < nlp.meta.nln[rows[i]] for x in nlp.meta.lin) + end + end + end + return rows, cols +end + +function jac_lin_structure(nlp::AbstractNLPModel) + rows = Vector{Int}(undef, nlp.meta.lin_nnzj) + cols = Vector{Int}(undef, nlp.meta.lin_nnzj) + jac_lin_structure!(nlp, rows, cols) +end + +function jac_nln_structure(nlp::AbstractNLPModel) + rows = Vector{Int}(undef, nlp.meta.nln_nnzj) + cols = Vector{Int}(undef, nlp.meta.nln_nnzj) + jac_nln_structure!(nlp, rows, cols) +end + +function jac_coord!(nlp::AbstractNLPModel, x::AbstractVector, vals::AbstractVector) + @lencheck nlp.meta.nvar x + @lencheck nlp.meta.nnzj vals + increment!(nlp, :neval_jac) + if nlp.meta.nlin > 0 + if nlp.meta.nnln == 0 + jac_lin_coord!(nlp, vals) + else + lin_ind = 1:(nlp.meta.lin_nnzj) + jac_lin_coord!(nlp, view(vals, lin_ind)) + end + end + if nlp.meta.nnln > 0 + if nlp.meta.nlin == 0 + jac_nln_coord!(nlp, x, vals) + else + nln_ind = (nlp.meta.lin_nnzj + 1):(nlp.meta.lin_nnzj + nlp.meta.nln_nnzj) + jac_nln_coord!(nlp, x, view(vals, nln_ind)) + end + end + return vals +end + +function jac_coord(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + vals = S(undef, nlp.meta.nnzj) + return jac_coord!(nlp, x, vals) +end + +function jac(nlp::AbstractNLPModel, x::AbstractVector) + @lencheck nlp.meta.nvar x + rows, cols = jac_structure(nlp) + vals = jac_coord(nlp, x) + sparse(rows, cols, vals, nlp.meta.ncon, nlp.meta.nvar) +end + +function jprod(nlp::AbstractNLPModel{T, S}, x::AbstractVector, v::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x v + Jv = S(undef, nlp.meta.ncon) + return jprod!(nlp, x, v, Jv) +end + +function jprod!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jv::AbstractVector) + @lencheck nlp.meta.nvar x v + @lencheck nlp.meta.ncon Jv + increment!(nlp, :neval_jprod) + if nlp.meta.nlin > 0 + if nlp.meta.nnln == 0 + jprod_lin!(nlp, v, Jv) + else + jprod_lin!(nlp, v, view(Jv, nlp.meta.lin)) + end + end + if nlp.meta.nnln > 0 + if nlp.meta.nlin == 0 + jprod_nln!(nlp, x, v, Jv) + else + jprod_nln!(nlp, x, v, view(Jv, nlp.meta.nln)) + end + end + return Jv +end + +function jprod!( + nlp::AbstractNLPModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Jv::AbstractVector, +) + @lencheck nlp.meta.nnzj rows cols vals + @lencheck nlp.meta.nvar v + @lencheck nlp.meta.ncon Jv + increment!(nlp, :neval_jprod) + coo_prod!(rows, cols, vals, v, Jv) +end + +function jprod_lin(nlp::AbstractNLPModel{T, S}, v::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar v + Jv = S(undef, nlp.meta.nlin) + return jprod_lin!(nlp, v, Jv) +end + +function jprod_lin!( + nlp::AbstractNLPModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Jv::AbstractVector, +) + @lencheck nlp.meta.lin_nnzj rows cols vals + @lencheck nlp.meta.nvar v + @lencheck nlp.meta.nlin Jv + increment!(nlp, :neval_jprod_lin) + coo_prod!(rows, cols, vals, v, Jv) +end + +function jprod_nln(nlp::AbstractNLPModel{T, S}, x::AbstractVector, v::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x v + Jv = S(undef, nlp.meta.nnln) + return jprod_nln!(nlp, x, v, Jv) +end + +function jprod_nln!( + nlp::AbstractNLPModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Jv::AbstractVector, +) + @lencheck nlp.meta.nln_nnzj rows cols vals + @lencheck nlp.meta.nvar v + @lencheck nlp.meta.nnln Jv + increment!(nlp, :neval_jprod_nln) + coo_prod!(rows, cols, vals, v, Jv) +end + + +function hprod!( + nlp::AbstractNLPModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Hv::AbstractVector, +) + @lencheck nlp.meta.nnzh rows cols vals + @lencheck nlp.meta.nvar v Hv + increment!(nlp, :neval_hprod) + coo_sym_prod!(cols, rows, vals, v, Hv) +end + + +function hess_op!( + nlp::AbstractNLPModel{T, S}, + x::AbstractVector, + Hv::AbstractVector; + obj_weight::Real = one(T), +) where {T, S} + @lencheck nlp.meta.nvar x Hv + prod! = @closure (res, v, α, β) -> begin + hprod!(nlp, x, v, Hv; obj_weight = obj_weight) + if β == 0 + res .= α .* Hv + else + res .= α .* Hv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.nvar, nlp.meta.nvar, true, true, prod!, prod!, prod!) +end + + +function hess_op!( + nlp::AbstractNLPModel{T, S}, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + Hv::AbstractVector, +) where {T, S} + @lencheck nlp.meta.nnzh rows cols vals + @lencheck nlp.meta.nvar Hv + prod! = @closure (res, v, α, β) -> begin + hprod!(nlp, rows, cols, vals, v, Hv) + if β == 0 + res .= α .* Hv + else + res .= α .* Hv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.nvar, nlp.meta.nvar, true, true, prod!, prod!, prod!) +end + + +function hess_op!( + nlp::AbstractNLPModel{T, S}, + x::AbstractVector, + y::AbstractVector, + Hv::AbstractVector; + obj_weight::Real = one(T), +) where {T, S} + @lencheck nlp.meta.nvar x Hv + @lencheck nlp.meta.ncon y + prod! = @closure (res, v, α, β) -> begin + hprod!(nlp, x, y, v, Hv; obj_weight = obj_weight) + if β == 0 + res .= α .* Hv + else + res .= α .* Hv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.nvar, nlp.meta.nvar, true, true, prod!, prod!, prod!) +end + + +function jtprod(nlp::AbstractNLPModel{T, S}, x::AbstractVector, v::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + @lencheck nlp.meta.ncon v + Jtv = S(undef, nlp.meta.nvar) + return jtprod!(nlp, x, v, Jtv) +end + +function jtprod!(nlp::AbstractNLPModel, x::AbstractVector, v::AbstractVector, Jtv::AbstractVector) + @lencheck nlp.meta.nvar x Jtv + @lencheck nlp.meta.ncon v + increment!(nlp, :neval_jtprod) + if nlp.meta.nnln == 0 + (nlp.meta.nlin > 0) && jtprod_lin!(nlp, v, Jtv) + elseif nlp.meta.nlin == 0 + (nlp.meta.nnln > 0) && jtprod_nln!(nlp, x, v, Jtv) + elseif nlp.meta.nlin >= nlp.meta.nnln + jtprod_lin!(nlp, view(v, nlp.meta.lin), Jtv) + if nlp.meta.nnln > 0 + Jtv .+= jtprod_nln(nlp, x, view(v, nlp.meta.nln)) + end + else + jtprod_nln!(nlp, x, view(v, nlp.meta.nln), Jtv) + if nlp.meta.nlin > 0 + Jtv .+= jtprod_lin(nlp, view(v, nlp.meta.lin)) + end + end + return Jtv +end + +function jtprod!( + nlp::AbstractNLPModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Jtv::AbstractVector, +) + @lencheck nlp.meta.nnzj rows cols vals + @lencheck nlp.meta.ncon v + @lencheck nlp.meta.nvar Jtv + increment!(nlp, :neval_jtprod) + coo_prod!(cols, rows, vals, v, Jtv) +end + +function jtprod_lin(nlp::AbstractNLPModel{T, S}, v::AbstractVector) where {T, S} + @lencheck nlp.meta.nlin v + Jtv = S(undef, nlp.meta.nvar) + return jtprod_lin!(nlp, v, Jtv) +end + +function jtprod_lin! end + +function jtprod_lin!( + nlp::AbstractNLPModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Jtv::AbstractVector, +) + @lencheck nlp.meta.lin_nnzj rows cols vals + @lencheck nlp.meta.nlin v + @lencheck nlp.meta.nvar Jtv + increment!(nlp, :neval_jtprod_lin) + coo_prod!(cols, rows, vals, v, Jtv) +end + +function jtprod_nln(nlp::AbstractNLPModel{T, S}, x::AbstractVector, v::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + @lencheck nlp.meta.nnln v + Jtv = S(undef, nlp.meta.nvar) + return jtprod_nln!(nlp, x, v, Jtv) +end + +function jtprod_nln! end + +function jtprod_nln!( + nlp::AbstractNLPModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Jtv::AbstractVector, +) + @lencheck nlp.meta.nln_nnzj rows cols vals + @lencheck nlp.meta.nnln v + @lencheck nlp.meta.nvar Jtv + increment!(nlp, :neval_jtprod_nln) + coo_prod!(cols, rows, vals, v, Jtv) +end + + +function jac_op(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + Jv = S(undef, nlp.meta.ncon) + Jtv = S(undef, nlp.meta.nvar) + return jac_op!(nlp, x, Jv, Jtv) +end + +function jac_op!( + nlp::AbstractNLPModel{T, S}, + x::AbstractVector{T}, + Jv::AbstractVector, + Jtv::AbstractVector, +) where {T, S} + @lencheck nlp.meta.nvar x Jtv + @lencheck nlp.meta.ncon Jv + prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res + jprod!(nlp, x, v, Jv) + if β == 0 + res .= α .* Jv + else + res .= α .* Jv .+ β .* res + end + return res + end + ctprod! = @closure (res, v, α, β) -> begin + jtprod!(nlp, x, v, Jtv) + if β == 0 + res .= α .* Jtv + else + res .= α .* Jtv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.ncon, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) +end + +function jac_op!( + nlp::AbstractNLPModel{T, S}, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector{T}, + Jv::AbstractVector, + Jtv::AbstractVector, +) where {T, S} + @lencheck nlp.meta.nnzj rows cols vals + @lencheck nlp.meta.ncon Jv + @lencheck nlp.meta.nvar Jtv + prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res + jprod!(nlp, rows, cols, vals, v, Jv) + if β == 0 + res .= α .* Jv + else + res .= α .* Jv .+ β .* res + end + return res + end + ctprod! = @closure (res, v, α, β) -> begin + jtprod!(nlp, rows, cols, vals, v, Jtv) + if β == 0 + res .= α .* Jtv + else + res .= α .* Jtv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.ncon, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) +end + +function jac_lin_op(nlp::AbstractNLPModel{T, S}) where {T, S} + Jv = S(undef, nlp.meta.nlin) + Jtv = S(undef, nlp.meta.nvar) + return jac_lin_op!(nlp, Jv, Jtv) +end + +function jac_lin_op!( + nlp::AbstractNLPModel{T, S}, + Jv::AbstractVector, + Jtv::AbstractVector, +) where {T, S} + @lencheck nlp.meta.nlin Jv + @lencheck nlp.meta.nvar Jtv + prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res + jprod_lin!(nlp, v, Jv) + if β == 0 + res .= α .* Jv + else + res .= α .* Jv .+ β .* res + end + return res + end + ctprod! = @closure (res, v, α, β) -> begin + jtprod_lin!(nlp, v, Jtv) + if β == 0 + res .= α .* Jtv + else + res .= α .* Jtv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.nlin, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) +end + +function jac_lin_op!( + nlp::AbstractNLPModel{T, S}, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector{T}, + Jv::AbstractVector, + Jtv::AbstractVector, +) where {T, S} + @lencheck nlp.meta.lin_nnzj rows cols vals + @lencheck nlp.meta.nlin Jv + @lencheck nlp.meta.nvar Jtv + prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res + jprod_lin!(nlp, rows, cols, vals, v, Jv) + if β == 0 + res .= α .* Jv + else + res .= α .* Jv .+ β .* res + end + return res + end + ctprod! = @closure (res, v, α, β) -> begin + jtprod_lin!(nlp, rows, cols, vals, v, Jtv) + if β == 0 + res .= α .* Jtv + else + res .= α .* Jtv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.nlin, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) +end + +function jac_nln_op(nlp::AbstractNLPModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nlp.meta.nvar x + Jv = S(undef, nlp.meta.nnln) + Jtv = S(undef, nlp.meta.nvar) + return jac_nln_op!(nlp, x, Jv, Jtv) +end + +function jac_nln_op!( + nlp::AbstractNLPModel{T, S}, + x::AbstractVector{T}, + Jv::AbstractVector, + Jtv::AbstractVector, +) where {T, S} + @lencheck nlp.meta.nvar x Jtv + @lencheck nlp.meta.nnln Jv + prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res + jprod_nln!(nlp, x, v, Jv) + if β == 0 + res .= α .* Jv + else + res .= α .* Jv .+ β .* res + end + return res + end + ctprod! = @closure (res, v, α, β) -> begin + jtprod_nln!(nlp, x, v, Jtv) + if β == 0 + res .= α .* Jtv + else + res .= α .* Jtv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.nnln, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) +end + +function jac_nln_op!( + nlp::AbstractNLPModel{T, S}, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector{T}, + Jv::AbstractVector, + Jtv::AbstractVector, +) where {T, S} + @lencheck nlp.meta.nln_nnzj rows cols vals + @lencheck nlp.meta.nnln Jv + @lencheck nlp.meta.nvar Jtv + prod! = @closure (res, v, α, β) -> begin # res = α * J * v + β * res + jprod_nln!(nlp, rows, cols, vals, v, Jv) + if β == 0 + res .= α .* Jv + else + res .= α .* Jv .+ β .* res + end + return res + end + ctprod! = @closure (res, v, α, β) -> begin + jtprod_nln!(nlp, rows, cols, vals, v, Jtv) + if β == 0 + res .= α .* Jtv + else + res .= α .* Jtv .+ β .* res + end + return res + end + return LinearOperator{T}(nlp.meta.nnln, nlp.meta.nvar, false, false, prod!, ctprod!, ctprod!) +end + + diff --git a/src/nls/api.jl b/src/nls/api.jl index 6cf0ee0c..f8f05636 100644 --- a/src/nls/api.jl +++ b/src/nls/api.jl @@ -11,11 +11,7 @@ export hprod_residual, hprod_residual!, hess_op_residual, hess_op_residual! Computes ``F(x)``, the residual at x. """ -function residual(nls::AbstractNLSModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nls.meta.nvar x - Fx = S(undef, nls_meta(nls).nequ) - residual!(nls, x, Fx) -end +function residual end """ Fx = residual!(nls, x, Fx) @@ -29,12 +25,7 @@ function residual! end Computes ``J(x)``, the Jacobian of the residual at x. """ -function jac_residual(nls::AbstractNLSModel, x::AbstractVector) - @lencheck nls.meta.nvar x - rows, cols = jac_structure_residual(nls) - vals = jac_coord_residual(nls, x) - sparse(rows, cols, vals, nls.nls_meta.nequ, nls.meta.nvar) -end +function jac_residual end """ (rows,cols) = jac_structure_residual!(nls, rows, cols) @@ -48,11 +39,7 @@ function jac_structure_residual! end Returns the structure of the constraint's Jacobian in sparse coordinate format. """ -function jac_structure_residual(nls::AbstractNLSModel) - rows = Vector{Int}(undef, nls.nls_meta.nnzj) - cols = Vector{Int}(undef, nls.nls_meta.nnzj) - jac_structure_residual!(nls, rows, cols) -end +function jac_structure_residual end """ vals = jac_coord_residual!(nls, x, vals) @@ -67,26 +54,14 @@ function jac_coord_residual! end Computes the Jacobian of the residual at `x` in sparse coordinate format. """ -function jac_coord_residual(nls::AbstractNLSModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nls.meta.nvar x - vals = S(undef, nls.nls_meta.nnzj) - jac_coord_residual!(nls, x, vals) -end +function jac_coord_residual end """ Jv = jprod_residual(nls, x, v) Computes the product of the Jacobian of the residual at x and a vector, i.e., ``J(x)v``. """ -function jprod_residual( - nls::AbstractNLSModel{T, S}, - x::AbstractVector, - v::AbstractVector, -) where {T, S} - @lencheck nls.meta.nvar x v - Jv = S(undef, nls_meta(nls).nequ) - jprod_residual!(nls, x, v, Jv) -end +function jprod_residual end """ Jv = jprod_residual!(nls, x, v, Jv) @@ -101,36 +76,14 @@ function jprod_residual! end Computes the product of the Jacobian of the residual given by `(rows, cols, vals)` and a vector, i.e., ``J(x)v``, storing it in `Jv`. """ -function jprod_residual!( - nls::AbstractNLSModel, - rows::AbstractVector{<:Integer}, - cols::AbstractVector{<:Integer}, - vals::AbstractVector, - v::AbstractVector, - Jv::AbstractVector, -) - @lencheck nls.nls_meta.nnzj rows cols vals - @lencheck nls.meta.nvar v - @lencheck nls.nls_meta.nequ Jv - increment!(nls, :neval_jprod_residual) - coo_prod!(rows, cols, vals, v, Jv) -end +function jprod_residual! end """ Jtv = jtprod_residual(nls, x, v) Computes the product of the transpose of the Jacobian of the residual at x and a vector, i.e., ``J(x)^Tv``. """ -function jtprod_residual( - nls::AbstractNLSModel{T, S}, - x::AbstractVector, - v::AbstractVector, -) where {T, S} - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - Jtv = S(undef, nls_meta(nls).nvar) - jtprod_residual!(nls, x, v, Jtv) -end +function jtprod_residual end """ Jtv = jtprod_residual!(nls, x, v, Jtv) @@ -145,32 +98,14 @@ function jtprod_residual! end Computes the product of the transpose of the Jacobian of the residual given by `(rows, cols, vals)` and a vector, i.e., ``J(x)^Tv``, storing it in `Jv`. """ -function jtprod_residual!( - nls::AbstractNLSModel, - rows::AbstractVector{<:Integer}, - cols::AbstractVector{<:Integer}, - vals::AbstractVector, - v::AbstractVector, - Jtv::AbstractVector, -) - @lencheck nls.nls_meta.nnzj rows cols vals - @lencheck nls.nls_meta.nequ v - @lencheck nls.meta.nvar Jtv - increment!(nls, :neval_jtprod_residual) - coo_prod!(cols, rows, vals, v, Jtv) -end +function jtprod_residual! end """ Jx = jac_op_residual(nls, x) Computes ``J(x)``, the Jacobian of the residual at x, in linear operator form. """ -function jac_op_residual(nls::AbstractNLSModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nls.meta.nvar x - Jv = S(undef, nls_meta(nls).nequ) - Jtv = S(undef, nls.meta.nvar) - return jac_op_residual!(nls, x, Jv, Jtv) -end +function jac_op_residual end """ Jx = jac_op_residual!(nls, x, Jv, Jtv) @@ -178,42 +113,7 @@ end Computes ``J(x)``, the Jacobian of the residual at x, in linear operator form. The vectors `Jv` and `Jtv` are used as preallocated storage for the operations. """ -function jac_op_residual!( - nls::AbstractNLSModel{T, S}, - x::AbstractVector, - Jv::AbstractVector, - Jtv::AbstractVector, -) where {T, S} - @lencheck nls.meta.nvar x Jtv - @lencheck nls.nls_meta.nequ Jv - prod! = @closure (res, v, α, β) -> begin - jprod_residual!(nls, x, v, Jv) - if β == 0 - res .= α .* Jv - else - res .= α .* Jv .+ β .* res - end - return res - end - ctprod! = @closure (res, v, α, β) -> begin - jtprod_residual!(nls, x, v, Jtv) - if β == 0 - res .= α .* Jtv - else - res .= α .* Jtv .+ β .* res - end - return res - end - return LinearOperator{T}( - nls_meta(nls).nequ, - nls_meta(nls).nvar, - false, - false, - prod!, - ctprod!, - ctprod!, - ) -end +function jac_op_residual! end """ Jx = jac_op_residual!(nls, rows, cols, vals, Jv, Jtv) @@ -221,45 +121,7 @@ end Computes ``J(x)``, the Jacobian of the residual given by `(rows, cols, vals)`, in linear operator form. The vectors `Jv` and `Jtv` are used as preallocated storage for the operations. """ -function jac_op_residual!( - nls::AbstractNLSModel{T, S}, - rows::AbstractVector{<:Integer}, - cols::AbstractVector{<:Integer}, - vals::AbstractVector, - Jv::AbstractVector, - Jtv::AbstractVector, -) where {T, S} - @lencheck nls.nls_meta.nnzj rows cols vals - @lencheck nls.nls_meta.nequ Jv - @lencheck nls.meta.nvar Jtv - prod! = @closure (res, v, α, β) -> begin - jprod_residual!(nls, rows, cols, vals, v, Jv) - if β == 0 - res .= α .* Jv - else - res .= α .* Jv .+ β .* res - end - return res - end - ctprod! = @closure (res, v, α, β) -> begin - jtprod_residual!(nls, rows, cols, vals, v, Jtv) - if β == 0 - res .= α .* Jtv - else - res .= α .* Jtv .+ β .* res - end - return res - end - return LinearOperator{T}( - nls_meta(nls).nequ, - nls_meta(nls).nvar, - false, - false, - prod!, - ctprod!, - ctprod!, - ) -end +function jac_op_residual! end """ H = hess_residual(nls, x, v) @@ -268,24 +130,14 @@ Computes the linear combination of the Hessians of the residuals at `x` with coe `v`. A `Symmetric` object wrapping the lower triangle is returned. """ -function hess_residual(nls::AbstractNLSModel, x::AbstractVector, v::AbstractVector) - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - rows, cols = hess_structure_residual(nls) - vals = hess_coord_residual(nls, x, v) - Symmetric(sparse(rows, cols, vals, nls.meta.nvar, nls.meta.nvar), :L) -end +function hess_residual end """ (rows,cols) = hess_structure_residual(nls) Returns the structure of the residual Hessian. """ -function hess_structure_residual(nls::AbstractNLSModel) - rows = Vector{Int}(undef, nls.nls_meta.nnzh) - cols = Vector{Int}(undef, nls.nls_meta.nnzh) - hess_structure_residual!(nls, rows, cols) -end +function hess_structure_residual end """ hess_structure_residual!(nls, rows, cols) @@ -308,29 +160,14 @@ function hess_coord_residual! end Computes the linear combination of the Hessians of the residuals at `x` with coefficients `v` in sparse coordinate format. """ -function hess_coord_residual( - nls::AbstractNLSModel{T, S}, - x::AbstractVector, - v::AbstractVector, -) where {T, S} - @lencheck nls.meta.nvar x - @lencheck nls.nls_meta.nequ v - vals = S(undef, nls.nls_meta.nnzh) - hess_coord_residual!(nls, x, v, vals) -end +function hess_coord_residual end """ Hj = jth_hess_residual(nls, x, j) Computes the Hessian of the j-th residual at x. """ -function jth_hess_residual(nls::AbstractNLSModel{T, S}, x::AbstractVector, j::Int) where {T, S} - @lencheck nls.meta.nvar x - @rangecheck 1 nls.nls_meta.nequ j - rows, cols = hess_structure_residual(nls) - vals = jth_hess_residual_coord(nls, x, j) - return Symmetric(sparse(rows, cols, vals, nls.meta.nvar, nls.meta.nvar), :L) -end +function jth_hess_residual end """ vals = jth_hess_residual_coord(nls, x, j) @@ -338,16 +175,7 @@ end Evaluate the Hessian of j-th residual at `x` in sparse coordinate format. Only the lower triangle is returned. """ -function jth_hess_residual_coord( - nls::AbstractNLSModel{T, S}, - x::AbstractVector, - j::Int, -) where {T, S} - @lencheck nls.meta.nvar x - @rangecheck 1 nls.nls_meta.nequ j - vals = S(undef, nls.nls_meta.nnzh) - return jth_hess_residual_coord!(nls, x, j, vals) -end +function jth_hess_residual_coord end """ vals = jth_hess_residual_coord!(nls, x, j, vals) @@ -355,37 +183,14 @@ end Evaluate the Hessian of j-th residual at `x` in sparse coordinate format, with `vals` of length `nls.nls_meta.nnzh`, in place. Only the lower triangle is returned. """ -function jth_hess_residual_coord!( - nls::AbstractNLSModel{T, S}, - x::AbstractVector, - j::Int, - vals::AbstractVector, -) where {T, S} - @lencheck nls.meta.nvar x - @rangecheck 1 nls.nls_meta.nequ j - @lencheck nls.nls_meta.nnzh vals - increment!(nls, :neval_jhess_residual) - decrement!(nls, :neval_hess_residual) - v = [i == j ? one(T) : zero(T) for i = 1:(nls.nls_meta.nequ)] - return hess_coord_residual!(nls, x, v, vals) -end +function jth_hess_residual_coord! end """ Hiv = hprod_residual(nls, x, i, v) Computes the product of the Hessian of the i-th residual at x, times the vector v. """ -function hprod_residual( - nls::AbstractNLSModel{T, S}, - x::AbstractVector, - i::Int, - v::AbstractVector, -) where {T, S} - @lencheck nls.meta.nvar x v - @rangecheck 1 nls.nls_meta.nequ i - Hv = S(undef, nls.meta.nvar) - hprod_residual!(nls, x, i, v, Hv) -end +function hprod_residual end """ Hiv = hprod_residual!(nls, x, i, v, Hiv) @@ -399,137 +204,20 @@ function hprod_residual! end Computes the Hessian of the i-th residual at x, in linear operator form. """ -function hess_op_residual(nls::AbstractNLSModel{T, S}, x::AbstractVector, i::Int) where {T, S} - @lencheck nls.meta.nvar x - @rangecheck 1 nls.nls_meta.nequ i - Hiv = S(undef, nls.meta.nvar) - return hess_op_residual!(nls, x, i, Hiv) -end +function hess_op_residual end """ Hop = hess_op_residual!(nls, x, i, Hiv) Computes the Hessian of the i-th residual at x, in linear operator form. The vector `Hiv` is used as preallocated storage for the operation. """ -function hess_op_residual!( - nls::AbstractNLSModel{T, S}, - x::AbstractVector, - i::Int, - Hiv::AbstractVector, -) where {T, S} - @lencheck nls.meta.nvar x Hiv - @rangecheck 1 nls.nls_meta.nequ i - prod! = @closure (res, v, α, β) -> begin - hprod_residual!(nls, x, i, v, Hiv) - if β == 0 - res .= α .* Hiv - else - res .= α .* Hiv .+ β .* res - end - return res - end - return LinearOperator{T}(nls_meta(nls).nvar, nls_meta(nls).nvar, true, true, prod!, prod!, prod!) -end - -""" - f = obj(nls, x) - f = obj(nls, x, Fx; recompute::Bool=true) - -Evaluate `f(x)`, the objective function of `nls::AbstractNLSModel`. `Fx` is overwritten with the value of the residual `F(x)`. -If `recompute` is `true`, then `Fx` is updated with the residual at `x`. -""" -function obj(nls::AbstractNLSModel, x::AbstractVector, Fx::AbstractVector; recompute::Bool = true) - @lencheck nls.meta.nvar x - increment!(nls, :neval_obj) - recompute && residual!(nls, x, Fx) - return dot(Fx, Fx) / 2 -end - -function obj(nls::AbstractNLSModel{T, S}, x::AbstractVector) where {T, S} - @lencheck nls.meta.nvar x - Fx = S(undef, nls.nls_meta.nequ) - return obj(nls, x, Fx) -end - -""" - f, c = objcons!(nls, x, c) - f, c = objcons!(nls, x, c, Fx; recompute::Bool=true) - -In-place evaluation of constraints and objective for AbstractNLSModel. -`Fx` is overwritten with the value of the residual `F(x)`. -If `recompute` is `true`, then `Fx` is updated with the residual at `x`. -""" -function objcons!(nls::AbstractNLSModel{T, S}, x::AbstractVector, c::AbstractVector) where {T, S} - @lencheck nls.meta.nvar x - @lencheck nls.meta.ncon c - Fx = S(undef, nls.nls_meta.nequ) - return objcons!(nls, x, c, Fx) -end - -function objcons!( - nls::AbstractNLSModel, - x::AbstractVector, - c::AbstractVector, - Fx::AbstractVector; - recompute::Bool = true, -) - cons!(nls, x, c) - return obj(nls, x, Fx; recompute = recompute), c -end - -""" - g = grad!(nls, x, g) - g = grad!(nls, x, g, Fx; recompute::Bool=true) - -Evaluate `∇f(x)`, the gradient of the objective function of `nls::AbstractNLSModel` at `x` in place. `Fx` is overwritten with the value of the residual `F(x)`. -If `recompute` is `true`, then `Fx` is updated with the residual at `x`. -""" -function grad!( - nls::AbstractNLSModel, - x::AbstractVector, - g::AbstractVector, - Fx::AbstractVector; - recompute::Bool = true, -) - @lencheck nls.meta.nvar x g - increment!(nls, :neval_grad) - recompute && residual!(nls, x, Fx) - return jtprod_residual!(nls, x, Fx, g) -end - -function grad!(nls::AbstractNLSModel{T, S}, x::AbstractVector, g::AbstractVector) where {T, S} - @lencheck nls.meta.nvar x g - increment!(nls, :neval_grad) - Fx = S(undef, nls.nls_meta.nequ) - return grad!(nls, x, g, Fx) -end - -""" - f, g = objgrad!(nls, x, g) - f, g = objgrad!(nls, x, g, Fx; recompute::Bool=true) - -Evaluate f(x) and ∇f(x) of `nls::AbstractNLSModel` at `x`. `Fx` is overwritten with the value of the residual `F(x)`. -If `recompute` is `true`, then `Fx` is updated with the residual at `x`. -""" -function objgrad!( - nls::AbstractNLSModel, - x::AbstractVector, - g::AbstractVector, - Fx::AbstractVector; - recompute::Bool = true, -) - @lencheck nls.meta.nvar x g - increment!(nls, :neval_obj) - increment!(nls, :neval_grad) - recompute && residual!(nls, x, Fx) - jtprod_residual!(nls, x, Fx, g) - return dot(Fx, Fx) / 2, g -end - -function objgrad!(nls::AbstractNLSModel{T, S}, x::AbstractVector, g::AbstractVector) where {T, S} - @lencheck nls.meta.nvar x g - increment!(nls, :neval_obj) - increment!(nls, :neval_grad) - Fx = S(undef, nls.nls_meta.nequ) - return objgrad!(nls, x, g, Fx) -end +function hess_op_residual! end + +function obj end + +function objcons! end + +function grad! end + +function objgrad! end + diff --git a/src/nls/defaults.jl b/src/nls/defaults.jl new file mode 100644 index 00000000..5b932d63 --- /dev/null +++ b/src/nls/defaults.jl @@ -0,0 +1,333 @@ +# Default implementations for AbstractNLSModel APIs. + +function residual(nls::AbstractNLSModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nls.meta.nvar x + Fx = S(undef, nls_meta(nls).nequ) + residual!(nls, x, Fx) +end + +function jac_residual(nls::AbstractNLSModel, x::AbstractVector) + @lencheck nls.meta.nvar x + rows, cols = jac_structure_residual(nls) + vals = jac_coord_residual(nls, x) + sparse(rows, cols, vals, nls.nls_meta.nequ, nls.meta.nvar) +end + +function jac_structure_residual(nls::AbstractNLSModel) + rows = Vector{Int}(undef, nls.nls_meta.nnzj) + cols = Vector{Int}(undef, nls.nls_meta.nnzj) + jac_structure_residual!(nls, rows, cols) +end + +function jac_coord_residual(nls::AbstractNLSModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nls.meta.nvar x + vals = S(undef, nls.nls_meta.nnzj) + jac_coord_residual!(nls, x, vals) +end + +function jprod_residual( + nls::AbstractNLSModel{T, S}, + x::AbstractVector, + v::AbstractVector, +) where {T, S} + @lencheck nls.meta.nvar x v + Jv = S(undef, nls_meta(nls).nequ) + jprod_residual!(nls, x, v, Jv) +end + +function jprod_residual!( + nls::AbstractNLSModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Jv::AbstractVector, +) + @lencheck nls.nls_meta.nnzj rows cols vals + @lencheck nls.meta.nvar v + @lencheck nls.nls_meta.nequ Jv + increment!(nls, :neval_jprod_residual) + coo_prod!(rows, cols, vals, v, Jv) +end + +function jtprod_residual( + nls::AbstractNLSModel{T, S}, + x::AbstractVector, + v::AbstractVector, +) where {T, S} + @lencheck nls.meta.nvar x + @lencheck nls.nls_meta.nequ v + Jtv = S(undef, nls_meta(nls).nvar) + jtprod_residual!(nls, x, v, Jtv) +end + +function jtprod_residual!( + nls::AbstractNLSModel, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + v::AbstractVector, + Jtv::AbstractVector, +) + @lencheck nls.nls_meta.nnzj rows cols vals + @lencheck nls.nls_meta.nequ v + @lencheck nls.meta.nvar Jtv + increment!(nls, :neval_jtprod_residual) + coo_prod!(cols, rows, vals, v, Jtv) +end + +function jac_op_residual(nls::AbstractNLSModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nls.meta.nvar x + Jv = S(undef, nls_meta(nls).nequ) + Jtv = S(undef, nls.meta.nvar) + return jac_op_residual!(nls, x, Jv, Jtv) +end + +function jac_op_residual!( + nls::AbstractNLSModel{T, S}, + x::AbstractVector, + Jv::AbstractVector, + Jtv::AbstractVector, +) where {T, S} + @lencheck nls.meta.nvar x Jtv + @lencheck nls.nls_meta.nequ Jv + prod! = @closure (res, v, α, β) -> begin + jprod_residual!(nls, x, v, Jv) + if β == 0 + res .= α .* Jv + else + res .= α .* Jv .+ β .* res + end + return res + end + ctprod! = @closure (res, v, α, β) -> begin + jtprod_residual!(nls, x, v, Jtv) + if β == 0 + res .= α .* Jtv + else + res .= α .* Jtv .+ β .* res + end + return res + end + return LinearOperator{T}( + nls_meta(nls).nequ, + nls_meta(nls).nvar, + false, + false, + prod!, + ctprod!, + ctprod!, + ) +end + +function jac_op_residual!( + nls::AbstractNLSModel{T, S}, + rows::AbstractVector{<:Integer}, + cols::AbstractVector{<:Integer}, + vals::AbstractVector, + Jv::AbstractVector, + Jtv::AbstractVector, +) where {T, S} + @lencheck nls.nls_meta.nnzj rows cols vals + @lencheck nls.nls_meta.nequ Jv + @lencheck nls.meta.nvar Jtv + prod! = @closure (res, v, α, β) -> begin + jprod_residual!(nls, rows, cols, vals, v, Jv) + if β == 0 + res .= α .* Jv + else + res .= α .* Jv .+ β .* res + end + return res + end + ctprod! = @closure (res, v, α, β) -> begin + jtprod_residual!(nls, rows, cols, vals, v, Jtv) + if β == 0 + res .= α .* Jtv + else + res .= α .* Jtv .+ β .* res + end + return res + end + return LinearOperator{T}( + nls_meta(nls).nequ, + nls_meta(nls).nvar, + false, + false, + prod!, + ctprod!, + ctprod!, + ) +end + +function hess_residual(nls::AbstractNLSModel, x::AbstractVector, v::AbstractVector) + @lencheck nls.meta.nvar x + @lencheck nls.nls_meta.nequ v + rows, cols = hess_structure_residual(nls) + vals = hess_coord_residual(nls, x, v) + Symmetric(sparse(rows, cols, vals, nls.meta.nvar, nls.meta.nvar), :L) +end + +function hess_structure_residual(nls::AbstractNLSModel) + rows = Vector{Int}(undef, nls.nls_meta.nnzh) + cols = Vector{Int}(undef, nls.nls_meta.nnzh) + hess_structure_residual!(nls, rows, cols) +end + +function hess_coord_residual( + nls::AbstractNLSModel{T, S}, + x::AbstractVector, + v::AbstractVector, +) where {T, S} + @lencheck nls.meta.nvar x + @lencheck nls.nls_meta.nequ v + vals = S(undef, nls.nls_meta.nnzh) + hess_coord_residual!(nls, x, v, vals) +end + +function jth_hess_residual(nls::AbstractNLSModel{T, S}, x::AbstractVector, j::Int) where {T, S} + @lencheck nls.meta.nvar x + @rangecheck 1 nls.nls_meta.nequ j + rows, cols = hess_structure_residual(nls) + vals = jth_hess_residual_coord(nls, x, j) + return Symmetric(sparse(rows, cols, vals, nls.meta.nvar, nls.meta.nvar), :L) +end + +function jth_hess_residual_coord( + nls::AbstractNLSModel{T, S}, + x::AbstractVector, + j::Int, +) where {T, S} + @lencheck nls.meta.nvar x + @rangecheck 1 nls.nls_meta.nequ j + vals = S(undef, nls.nls_meta.nnzh) + return jth_hess_residual_coord!(nls, x, j, vals) +end + +function jth_hess_residual_coord!( + nls::AbstractNLSModel{T, S}, + x::AbstractVector, + j::Int, + vals::AbstractVector, +) where {T, S} + @lencheck nls.meta.nvar x + @rangecheck 1 nls.nls_meta.nequ j + @lencheck nls.nls_meta.nnzh vals + increment!(nls, :neval_jhess_residual) + decrement!(nls, :neval_hess_residual) + v = [i == j ? one(T) : zero(T) for i = 1:(nls.nls_meta.nequ)] + return hess_coord_residual!(nls, x, v, vals) +end + +function hprod_residual( + nls::AbstractNLSModel{T, S}, + x::AbstractVector, + i::Int, + v::AbstractVector, +) where {T, S} + @lencheck nls.meta.nvar x v + @rangecheck 1 nls.nls_meta.nequ i + Hv = S(undef, nls.meta.nvar) + hprod_residual!(nls, x, i, v, Hv) +end + +function hess_op_residual(nls::AbstractNLSModel{T, S}, x::AbstractVector, i::Int) where {T, S} + @lencheck nls.meta.nvar x + @rangecheck 1 nls.nls_meta.nequ i + Hiv = S(undef, nls.meta.nvar) + return hess_op_residual!(nls, x, i, Hiv) +end + +function hess_op_residual!( + nls::AbstractNLSModel{T, S}, + x::AbstractVector, + i::Int, + Hiv::AbstractVector, +) where {T, S} + @lencheck nls.meta.nvar x Hiv + @rangecheck 1 nls.nls_meta.nequ i + prod! = @closure (res, v, α, β) -> begin + hprod_residual!(nls, x, i, v, Hiv) + if β == 0 + res .= α .* Hiv + else + res .= α .* Hiv .+ β .* res + end + return res + end + return LinearOperator{T}(nls_meta(nls).nvar, nls_meta(nls).nvar, true, true, prod!, prod!, prod!) +end + +function obj(nls::AbstractNLSModel, x::AbstractVector, Fx::AbstractVector; recompute::Bool = true) + @lencheck nls.meta.nvar x + increment!(nls, :neval_obj) + recompute && residual!(nls, x, Fx) + return dot(Fx, Fx) / 2 +end + +function obj(nls::AbstractNLSModel{T, S}, x::AbstractVector) where {T, S} + @lencheck nls.meta.nvar x + Fx = S(undef, nls.nls_meta.nequ) + return obj(nls, x, Fx) +end + +function objcons!(nls::AbstractNLSModel{T, S}, x::AbstractVector, c::AbstractVector) where {T, S} + @lencheck nls.meta.nvar x + @lencheck nls.meta.ncon c + Fx = S(undef, nls.nls_meta.nequ) + return objcons!(nls, x, c, Fx) +end + +function objcons!( + nls::AbstractNLSModel, + x::AbstractVector, + c::AbstractVector, + Fx::AbstractVector; + recompute::Bool = true, +) + cons!(nls, x, c) + return obj(nls, x, Fx; recompute = recompute), c +end + +function grad!( + nls::AbstractNLSModel, + x::AbstractVector, + g::AbstractVector, + Fx::AbstractVector; + recompute::Bool = true, +) + @lencheck nls.meta.nvar x g + increment!(nls, :neval_grad) + recompute && residual!(nls, x, Fx) + return jtprod_residual!(nls, x, Fx, g) +end + +function grad!(nls::AbstractNLSModel{T, S}, x::AbstractVector, g::AbstractVector) where {T, S} + @lencheck nls.meta.nvar x g + increment!(nls, :neval_grad) + Fx = S(undef, nls.nls_meta.nequ) + return grad!(nls, x, g, Fx) +end + +function objgrad!( + nls::AbstractNLSModel, + x::AbstractVector, + g::AbstractVector, + Fx::AbstractVector; + recompute::Bool = true, +) + @lencheck nls.meta.nvar x g + increment!(nls, :neval_obj) + increment!(nls, :neval_grad) + recompute && residual!(nls, x, Fx) + jtprod_residual!(nls, x, Fx, g) + return dot(Fx, Fx) / 2, g +end + +function objgrad!(nls::AbstractNLSModel{T, S}, x::AbstractVector, g::AbstractVector) where {T, S} + @lencheck nls.meta.nvar x g + increment!(nls, :neval_obj) + increment!(nls, :neval_grad) + Fx = S(undef, nls.nls_meta.nequ) + return objgrad!(nls, x, g, Fx) +end