Skip to content
This repository was archived by the owner on Sep 28, 2024. It is now read-only.

Commit 98cda1c

Browse files
committed
Apply format
1 parent 9fddf63 commit 98cda1c

File tree

4 files changed

+32
-30
lines changed

4 files changed

+32
-30
lines changed

example/Burgers/src/Burgers.jl

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ function train(; cuda = true, η₀ = 1.0f-3, λ = 1.0f-4, epochs = 500)
6767
return learner
6868
end
6969

70-
function train_nomad(; n=300, cuda=true, learning_rate=0.001, epochs=400)
70+
function train_nomad(; n = 300, cuda = true, learning_rate = 0.001, epochs = 400)
7171
if cuda && has_cuda()
7272
@info "Training on GPU"
7373
device = gpu
@@ -76,32 +76,31 @@ function train_nomad(; n=300, cuda=true, learning_rate=0.001, epochs=400)
7676
device = cpu
7777
end
7878

79-
x, y = get_data_don(n=n)
79+
x, y = get_data_don(n = n)
8080

8181
xtrain = x[1:280, :]'
8282
ytrain = y[1:280, :]
8383

84-
xval = x[end-19:end, :]' |> device
85-
yval = y[end-19:end, :] |> device
84+
xval = x[(end - 19):end, :]' |> device
85+
yval = y[(end - 19):end, :] |> device
8686

8787
# grid = collect(range(0, 1, length=1024)') |> device
8888
grid = rand(collect(0:0.001:1), (280, 1024)) |> device
8989
gridval = rand(collect(0:0.001:1), (20, 1024)) |> device
9090

91-
9291
opt = ADAM(learning_rate)
9392

94-
m = NOMAD((1024,1024), (2048,1024), gelu, gelu) |> device
93+
m = NOMAD((1024, 1024), (2048, 1024), gelu, gelu) |> device
9594

9695
loss(X, y, sensor) = Flux.Losses.mse(m(X, sensor), y)
9796
evalcb() = @show(loss(xval, yval, gridval))
9897

9998
data = [(xtrain, ytrain, grid)] |> device
100-
Flux.@epochs epochs Flux.train!(loss, params(m), data, opt, cb=evalcb)
99+
Flux.@epochs epochs Flux.train!(loss, params(m), data, opt, cb = evalcb)
101100
= m(xval |> device, gridval |> device)
102101

103102
diffvec = vec(abs.(cpu(yval) .- cpu(ỹ)))
104-
mean_diff = sum(diffvec)/length(diffvec)
103+
mean_diff = sum(diffvec) / length(diffvec)
105104
return mean_diff
106105
end
107106

example/Burgers/test/runtests.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,6 @@ using Test
1515
end
1616

1717
@testset "Burger: NOMAD Training Accuracy" begin
18-
ϵ = Burgers.train_nomad(; cuda=true, epochs=100)
18+
ϵ = Burgers.train_nomad(; cuda = true, epochs = 100)
1919
@test ϵ < 0.4 # epoch=100 returns 0.233
20-
end
20+
end

src/NOMAD.jl

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -49,18 +49,18 @@ Approximator net: (Chain(Dense(2 => 128), Dense(128 => 64)))
4949
Decoder net: (Chain(Dense(72 => 24), Dense(24 => 12)))
5050
"""
5151
function NOMAD(architecture_approximator::Tuple, architecture_decoder::Tuple,
52-
act_approximator = identity, act_decoder=true;
53-
init_approximator = Flux.glorot_uniform,
54-
init_decoder = Flux.glorot_uniform,
55-
bias_approximator=true, bias_decoder=true)
56-
52+
act_approximator = identity, act_decoder = true;
53+
init_approximator = Flux.glorot_uniform,
54+
init_decoder = Flux.glorot_uniform,
55+
bias_approximator = true, bias_decoder = true)
5756
approximator_net = construct_subnet(architecture_approximator, act_approximator;
58-
init=init_approximator, bias=bias_approximator)
57+
init = init_approximator, bias = bias_approximator)
5958

6059
decoder_net = construct_subnet(architecture_decoder, act_decoder;
61-
init=init_decoder, bias=bias_decoder)
60+
init = init_decoder, bias = bias_decoder)
6261

63-
return NOMAD{typeof(approximator_net), typeof(decoder_net)}(approximator_net, decoder_net)
62+
return NOMAD{typeof(approximator_net), typeof(decoder_net)}(approximator_net,
63+
decoder_net)
6464
end
6565

6666
Flux.@functor NOMAD
@@ -69,12 +69,12 @@ function (a::NOMAD)(x::AbstractArray, y::AbstractVecOrMat)
6969
# Assign the parameters
7070
approximator, decoder = a.approximator_net, a.decoder_net
7171

72-
return decoder(cat(approximator(x), y', dims=1))'
72+
return decoder(cat(approximator(x), y', dims = 1))'
7373
end
7474

7575
# Print nicely
7676
function Base.show(io::IO, l::NOMAD)
77-
print(io, "NOMAD with\nApproximator net: (",l.approximator_net)
77+
print(io, "NOMAD with\nApproximator net: (", l.approximator_net)
7878
print(io, ")\n")
7979
print(io, "Decoder net: (", l.decoder_net)
8080
print(io, ")\n")

test/nomad.jl

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,28 +1,31 @@
11
@testset "NOMAD" begin
22
@testset "proper construction" begin
3-
nomad = NOMAD((32,64,72), (24,48,72), σ, tanh)
3+
nomad = NOMAD((32, 64, 72), (24, 48, 72), σ, tanh)
44
# approximator net
5-
@test size(nomad.approximator_net.layers[end].weight) == (72,64)
5+
@test size(nomad.approximator_net.layers[end].weight) == (72, 64)
66
@test size(nomad.approximator_net.layers[end].bias) == (72,)
77
# decoder net
8-
@test size(nomad.decoder_net.layers[end].weight) == (72,48)
8+
@test size(nomad.decoder_net.layers[end].weight) == (72, 48)
99
@test size(nomad.decoder_net.layers[end].bias) == (72,)
1010
end
1111

1212
# Accept only Int as architecture parameters
13-
@test_throws MethodError NOMAD((32.5,64,72), (24,48,72), σ, tanh)
14-
@test_throws MethodError NOMAD((32,64,72), (24.1,48,72))
13+
@test_throws MethodError NOMAD((32.5, 64, 72), (24, 48, 72), σ, tanh)
14+
@test_throws MethodError NOMAD((32, 64, 72), (24.1, 48, 72))
1515

1616
# Just the first 16 datapoints from the Burgers' equation dataset
1717
a = [0.83541104, 0.83479851, 0.83404712, 0.83315711, 0.83212979, 0.83096755,
18-
0.82967374, 0.82825263, 0.82670928, 0.82504949, 0.82327962, 0.82140651,
19-
0.81943734, 0.81737952, 0.8152405, 0.81302771]
20-
sensors = collect(range(0, 1, length=16)')
21-
model = NOMAD((length(a), 22, length(a)), (length(a) + length(sensors), length(sensors)), σ, tanh; init_approximator=Flux.glorot_normal, bias_decoder=false)
18+
0.82967374, 0.82825263, 0.82670928, 0.82504949, 0.82327962, 0.82140651,
19+
0.81943734, 0.81737952, 0.8152405, 0.81302771]
20+
sensors = collect(range(0, 1, length = 16)')
21+
model = NOMAD((length(a), 22, length(a)),
22+
(length(a) + length(sensors), length(sensors)), σ, tanh;
23+
init_approximator = Flux.glorot_normal, bias_decoder = false)
2224
y = model(a, sensors)
2325
@test size(y) == (1, 16)
2426
# Check if model description is printed, when defined
25-
@test repr(model) == "NOMAD with\nApproximator net: (Chain(Dense(16 => 22, σ), Dense(22 => 16, σ)))\nDecoder net: (Chain(Dense(32 => 16, tanh; bias=false)))\n"
27+
@test repr(model) ==
28+
"NOMAD with\nApproximator net: (Chain(Dense(16 => 22, σ), Dense(22 => 16, σ)))\nDecoder net: (Chain(Dense(32 => 16, tanh; bias=false)))\n"
2629

2730
mgrad = Flux.Zygote.gradient(() -> sum(model(a, sensors)), Flux.params(model))
2831
@info mgrad.grads

0 commit comments

Comments
 (0)