@@ -43,6 +43,7 @@ See also: [`DRLS`](@ref).
4343- `gamma`: stepsize to use, chosen appropriately based on Lf and mf by defaults.
4444- `max_backtracks=20`: maximum number of line-search backtracks.
4545- `directions=LBFGS(5)`: strategy to use to compute line-search directions.
46+ - `monotonicity=1`: parameter controlling the averaging scheme for nonmonotone linesearch; monotonicity ∈ (0,1], monotone scheme by default.
4647
4748# References
48491. Themelis, Stella, Patrinos, "Douglas-Rachford splitting and ADMM for nonconvex optimization: Accelerated and Newton-type linesearch algorithms", Computational Optimization and Applications, vol. 82, no. 2, pp. 395-440 (2022).
@@ -61,6 +62,7 @@ Base.@kwdef struct DRLSIteration{R,Tx,Tf,Tg,Tmf,TLf,D}
6162 dre_sign:: Int = mf === nothing || mf <= 0 ? 1 : - 1
6263 max_backtracks:: Int = 20
6364 directions:: D = LBFGS (5 )
65+ monotonicity:: R = real (eltype (x0))(1 )
6466end
6567
6668Base. IteratorSize (:: Type{<:DRLSIteration} ) = Base. IsInfinite ()
@@ -80,6 +82,7 @@ Base.@kwdef mutable struct DRLSState{R,Tx,TH}
8082 f_u:: R
8183 g_v:: R
8284 H:: TH
85+ merit:: R = zero (gamma)
8386 tau:: R = zero (gamma)
8487 u0:: Tx = similar (x)
8588 u1:: Tx = similar (x)
@@ -116,6 +119,8 @@ function Base.iterate(iter::DRLSIteration)
116119 g_v = g_v,
117120 H = initialize (iter. directions, x),
118121 )
122+ # initialize merit
123+ state. merit = DRE (state)
119124 return state, state
120125end
121126
@@ -141,7 +146,8 @@ update_direction_state!(iter::DRLSIteration, state::DRLSState) =
141146 update_direction_state! (acceleration_style (typeof (iter. directions)), iter, state)
142147
143148function Base. iterate (iter:: DRLSIteration{R,Tx,Tf} , state:: DRLSState ) where {R,Tx,Tf}
144- DRE_curr = DRE (state)
149+ # retrieve merit and set threshold
150+ DRE_curr = state. merit
145151 threshold = iter. dre_sign * DRE_curr - iter. c / iter. gamma * norm (state. res)^ 2
146152
147153 set_next_direction! (iter, state)
@@ -190,6 +196,8 @@ function Base.iterate(iter::DRLSIteration{R,Tx,Tf}, state::DRLSState) where {R,T
190196 state. res .= state. u .- state. v
191197 state. xbar .= state. x .- iter. lambda * state. res
192198 end
199+ # update merit with averaging rule
200+ state. merit = (1 - iter. monotonicity) * state. merit + iter. monotonicity * DRE (state)
193201
194202 return state, state
195203end
0 commit comments