Skip to content

Commit 70e2743

Browse files
authored
Expose backtracking factor, rename increase factor (#92)
- Expose backtracking factor as `reduce_gamma` - Rename increase factor introduced in #91 from `regret_gamma` to `increase_gamma` - Format code
1 parent f58002a commit 70e2743

File tree

6 files changed

+58
-17
lines changed

6 files changed

+58
-17
lines changed

src/algorithms/fast_forward_backward.jl

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,8 @@ See also: [`FastForwardBackward`](@ref).
3333
- `gamma=nothing`: stepsize, defaults to `1/Lf` if `Lf` is set, and `nothing` otherwise.
3434
- `adaptive=true`: makes `gamma` adaptively adjust during the iterations; this is by default `gamma === nothing`.
3535
- `minimum_gamma=1e-7`: lower bound to `gamma` in case `adaptive == true`.
36-
- `regret_gamma=1.0`: factor to enlarge `gamma` in case `adaptive == true`, before backtracking.
36+
- `reduce_gamma=0.5`: factor by which to reduce `gamma` in case `adaptive == true`, during backtracking.
37+
- `increase_gamma=1.0`: factor by which to increase `gamma` in case `adaptive == true`, before backtracking.
3738
- `extrapolation_sequence=nothing`: sequence (iterator) of extrapolation coefficients to use for acceleration.
3839
3940
# References
@@ -49,7 +50,8 @@ Base.@kwdef struct FastForwardBackwardIteration{R,Tx,Tf,Tg,TLf,Tgamma,Textr}
4950
gamma::Tgamma = Lf === nothing ? nothing : (1 / Lf)
5051
adaptive::Bool = gamma === nothing
5152
minimum_gamma::R = real(eltype(x0))(1e-7)
52-
regret_gamma::R = real(eltype(x0))(1.0)
53+
reduce_gamma::R = real(eltype(x0))(0.5)
54+
increase_gamma::R = real(eltype(x0))(1.0)
5355
extrapolation_sequence::Textr = nothing
5456
end
5557

@@ -107,7 +109,7 @@ function Base.iterate(
107109
state::FastForwardBackwardState{R,Tx},
108110
) where {R,Tx}
109111
state.gamma = if iter.adaptive == true
110-
state.gamma *= iter.regret_gamma
112+
state.gamma *= iter.increase_gamma
111113
gamma, state.g_z = backtrack_stepsize!(
112114
state.gamma,
113115
iter.f,
@@ -123,6 +125,7 @@ function Base.iterate(
123125
state.z,
124126
nothing,
125127
minimum_gamma = iter.minimum_gamma,
128+
reduce_gamma = iter.reduce_gamma,
126129
)
127130
gamma
128131
else

src/algorithms/forward_backward.jl

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,8 @@ See also: [`ForwardBackward`](@ref).
2828
- `gamma=nothing`: stepsize to use, defaults to `1/Lf` if not set (but `Lf` is).
2929
- `adaptive=false`: forces the method stepsize to be adaptively adjusted.
3030
- `minimum_gamma=1e-7`: lower bound to `gamma` in case `adaptive == true`.
31-
- `regret_gamma=1.0`: factor to enlarge `gamma` in case `adaptive == true`, before backtracking.
31+
- `reduce_gamma=0.5`: factor by which to reduce `gamma` in case `adaptive == true`, during backtracking.
32+
- `increase_gamma=1.0`: factor by which to increase `gamma` in case `adaptive == true`, before backtracking.
3233
3334
# References
3435
1. Lions, Mercier, “Splitting algorithms for the sum of two nonlinear operators,” SIAM Journal on Numerical Analysis, vol. 16, pp. 964–979 (1979).
@@ -42,7 +43,8 @@ Base.@kwdef struct ForwardBackwardIteration{R,Tx,Tf,Tg,TLf,Tgamma}
4243
gamma::Tgamma = Lf === nothing ? nothing : (1 / Lf)
4344
adaptive::Bool = gamma === nothing
4445
minimum_gamma::R = real(eltype(x0))(1e-7)
45-
regret_gamma::R = real(eltype(x0))(1.0)
46+
reduce_gamma::R = real(eltype(x0))(0.5)
47+
increase_gamma::R = real(eltype(x0))(1.0)
4648
end
4749

4850
Base.IteratorSize(::Type{<:ForwardBackwardIteration}) = Base.IsInfinite()
@@ -87,7 +89,7 @@ function Base.iterate(
8789
state::ForwardBackwardState{R,Tx},
8890
) where {R,Tx}
8991
if iter.adaptive == true
90-
state.gamma *= iter.regret_gamma
92+
state.gamma *= iter.increase_gamma
9193
state.gamma, state.g_z, state.f_x = backtrack_stepsize!(
9294
state.gamma,
9395
iter.f,
@@ -103,6 +105,7 @@ function Base.iterate(
103105
state.z,
104106
state.grad_f_z,
105107
minimum_gamma = iter.minimum_gamma,
108+
reduce_gamma = iter.reduce_gamma,
106109
)
107110
state.x, state.z = state.z, state.x
108111
state.grad_f_x, state.grad_f_z = state.grad_f_z, state.grad_f_x

src/utilities/fb_tools.jl

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,16 @@ function backtrack_stepsize!(
3737
res,
3838
Az,
3939
grad_f_Az = nothing;
40-
alpha = 1,
41-
minimum_gamma = 1e-7,
40+
alpha = R(1),
41+
minimum_gamma = R(1e-7),
42+
reduce_gamma = R(0.5),
4243
) where {R}
4344
f_Az_upp = f_model(f_Ax, At_grad_f_Ax, res, alpha / gamma)
4445
_mul!(Az, A, z)
4546
f_Az, cl = value_and_gradient_closure(f, Az)
4647
tol = 10 * eps(R) * (1 + abs(f_Az))
4748
while f_Az > f_Az_upp + tol && gamma >= minimum_gamma
48-
gamma /= 2
49+
gamma *= reduce_gamma
4950
y .= x .- gamma .* At_grad_f_Ax
5051
g_z = prox!(z, g, y, gamma)
5152
res .= x .- z
@@ -63,7 +64,16 @@ function backtrack_stepsize!(
6364
return gamma, g_z, f_Az, f_Az_upp
6465
end
6566

66-
function backtrack_stepsize!(gamma, f, A, g, x; alpha = 1, minimum_gamma = 1e-7)
67+
function backtrack_stepsize!(
68+
gamma::R,
69+
f,
70+
A,
71+
g,
72+
x;
73+
alpha = R(1),
74+
minimum_gamma = R(1e-7),
75+
reduce_gamma = R(0.5),
76+
) where {R}
6777
Ax = A * x
6878
f_Ax, cl = value_and_gradient_closure(f, Ax)
6979
grad_f_Ax = cl()
@@ -86,5 +96,6 @@ function backtrack_stepsize!(gamma, f, A, g, x; alpha = 1, minimum_gamma = 1e-7)
8696
grad_f_Ax;
8797
alpha = alpha,
8898
minimum_gamma = minimum_gamma,
99+
reduce_gamma = reduce_gamma,
89100
)
90101
end

test/problems/test_lasso_small.jl

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,11 @@ using ProximalAlgorithms:
6868
@testset "ForwardBackward (adaptive step, regret)" begin
6969
x0 = zeros(T, n)
7070
x0_backup = copy(x0)
71-
solver = ProximalAlgorithms.ForwardBackward(tol = TOL, adaptive = true, regret_gamma=R(1.01))
71+
solver = ProximalAlgorithms.ForwardBackward(
72+
tol = TOL,
73+
adaptive = true,
74+
increase_gamma = R(1.01),
75+
)
7276
x, it = @inferred solver(x0 = x0, f = fA_autodiff, g = g)
7377
@test eltype(x) == T
7478
@test norm(x - x_star, Inf) <= TOL
@@ -101,7 +105,11 @@ using ProximalAlgorithms:
101105
@testset "FastForwardBackward (adaptive step, regret)" begin
102106
x0 = zeros(T, n)
103107
x0_backup = copy(x0)
104-
solver = ProximalAlgorithms.FastForwardBackward(tol = TOL, adaptive = true, regret_gamma=R(1.01))
108+
solver = ProximalAlgorithms.FastForwardBackward(
109+
tol = TOL,
110+
adaptive = true,
111+
increase_gamma = R(1.01),
112+
)
105113
x, it = @inferred solver(x0 = x0, f = fA_autodiff, g = g)
106114
@test eltype(x) == T
107115
@test norm(x - x_star, Inf) <= TOL

test/problems/test_lasso_small_strongly_convex.jl

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ using ProximalAlgorithms
7070
@test it < 110
7171
@test x0 == x0_backup
7272
end
73-
73+
7474
@testset "ForwardBackward (adaptive step)" begin
7575
solver = ProximalAlgorithms.ForwardBackward(tol = TOL, adaptive = true)
7676
y, it = solver(x0 = x0, f = fA_autodiff, g = g)
@@ -81,7 +81,11 @@ using ProximalAlgorithms
8181
end
8282

8383
@testset "ForwardBackward (adaptive step, regret)" begin
84-
solver = ProximalAlgorithms.ForwardBackward(tol = TOL, adaptive = true, regret_gamma=T(1.01))
84+
solver = ProximalAlgorithms.ForwardBackward(
85+
tol = TOL,
86+
adaptive = true,
87+
increase_gamma = T(1.01),
88+
)
8589
y, it = solver(x0 = x0, f = fA_autodiff, g = g)
8690
@test eltype(y) == T
8791
@test norm(y - x_star, Inf) <= TOL
@@ -108,7 +112,11 @@ using ProximalAlgorithms
108112
end
109113

110114
@testset "FastForwardBackward (adaptive step, regret)" begin
111-
solver = ProximalAlgorithms.FastForwardBackward(tol = TOL, adaptive = true, regret_gamma=T(1.01))
115+
solver = ProximalAlgorithms.FastForwardBackward(
116+
tol = TOL,
117+
adaptive = true,
118+
increase_gamma = T(1.01),
119+
)
112120
y, it = solver(x0 = x0, f = fA_autodiff, g = g)
113121
@test eltype(y) == T
114122
@test norm(y - x_star, Inf) <= TOL

test/problems/test_sparse_logistic_small.jl

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,11 @@ using LinearAlgebra
4949
@testset "ForwardBackward (adaptive step, regret)" begin
5050
x0 = zeros(T, n)
5151
x0_backup = copy(x0)
52-
solver = ProximalAlgorithms.ForwardBackward(tol = TOL, adaptive = true, regret_gamma=R(1.01))
52+
solver = ProximalAlgorithms.ForwardBackward(
53+
tol = TOL,
54+
adaptive = true,
55+
increase_gamma = R(1.01),
56+
)
5357
x, it = solver(x0 = x0, f = fA_autodiff, g = g)
5458
@test eltype(x) == T
5559
@test norm(x - x_star, Inf) <= 1e-4
@@ -71,7 +75,11 @@ using LinearAlgebra
7175
@testset "FastForwardBackward (adaptive step, regret)" begin
7276
x0 = zeros(T, n)
7377
x0_backup = copy(x0)
74-
solver = ProximalAlgorithms.FastForwardBackward(tol = TOL, adaptive = true, regret_gamma=R(1.01))
78+
solver = ProximalAlgorithms.FastForwardBackward(
79+
tol = TOL,
80+
adaptive = true,
81+
increase_gamma = R(1.01),
82+
)
7583
x, it = solver(x0 = x0, f = fA_autodiff, g = g)
7684
@test eltype(x) == T
7785
@test norm(x - x_star, Inf) <= 1e-4

0 commit comments

Comments
 (0)