@@ -8,19 +8,17 @@ import Flux: params
88include (" Burgers_deeponet.jl" )
99
1010function register_burgers ()
11- register (DataDep (
12- " Burgers" ,
13- """
14- Burgers' equation dataset from
15- [fourier_neural_operator](https://github.com/zongyi-li/fourier_neural_operator)
16- """ ,
17- " http://www.med.cgu.edu.tw/NeuralOperators/Burgers_R10.zip" ,
18- " 9cbbe5070556c777b1ba3bacd49da5c36ea8ed138ba51b6ee76a24b971066ecd" ,
19- post_fetch_method= unpack
20- ))
11+ register (DataDep (" Burgers" ,
12+ """
13+ Burgers' equation dataset from
14+ [fourier_neural_operator](https://github.com/zongyi-li/fourier_neural_operator)
15+ """ ,
16+ " http://www.med.cgu.edu.tw/NeuralOperators/Burgers_R10.zip" ,
17+ " 9cbbe5070556c777b1ba3bacd49da5c36ea8ed138ba51b6ee76a24b971066ecd" ,
18+ post_fetch_method = unpack))
2119end
2220
23- function get_data (; n= 2048 , Δsamples= 2 ^ 3 , grid_size= div (2 ^ 13 , Δsamples), T= Float32)
21+ function get_data (; n = 2048 , Δsamples = 2 ^ 3 , grid_size = div (2 ^ 13 , Δsamples), T = Float32)
2422 file = matopen (joinpath (datadep " Burgers" , " burgers_data_R10.mat" ))
2523 x_data = T .(collect (read (file, " a" )[1 : n, 1 : Δsamples: end ]' ))
2624 y_data = T .(collect (read (file, " u" )[1 : n, 1 : Δsamples: end ]' ))
@@ -33,19 +31,19 @@ function get_data(; n=2048, Δsamples=2^3, grid_size=div(2^13, Δsamples), T=Flo
3331 return x_loc_data, reshape (y_data, 1 , :, n)
3432end
3533
36- function get_dataloader (; ratio:: Float64 = 0.9 , batchsize= 100 )
37- 𝐱, 𝐲 = get_data (n= 2048 )
38- data_train, data_test = splitobs ((𝐱, 𝐲), at= ratio)
34+ function get_dataloader (; ratio:: Float64 = 0.9 , batchsize = 100 )
35+ 𝐱, 𝐲 = get_data (n = 2048 )
36+ data_train, data_test = splitobs ((𝐱, 𝐲), at = ratio)
3937
40- loader_train = DataLoader (data_train, batchsize= batchsize, shuffle= true )
41- loader_test = DataLoader (data_test, batchsize= batchsize, shuffle= false )
38+ loader_train = DataLoader (data_train, batchsize = batchsize, shuffle = true )
39+ loader_test = DataLoader (data_test, batchsize = batchsize, shuffle = false )
4240
4341 return loader_train, loader_test
4442end
4543
4644__init__ () = register_burgers ()
4745
48- function train (; cuda= true , η₀= 1f -3 , λ= 1f -4 , epochs= 500 )
46+ function train (; cuda = true , η₀ = 1.0f -3 , λ = 1.0f -4 , epochs = 500 )
4947 if cuda && CUDA. has_cuda ()
5048 device = gpu
5149 CUDA. allowscalar (false )
@@ -55,22 +53,21 @@ function train(; cuda=true, η₀=1f-3, λ=1f-4, epochs=500)
5553 @info " Training on CPU"
5654 end
5755
58- model = FourierNeuralOperator (ch= (2 , 64 , 64 , 64 , 64 , 64 , 128 , 1 ), modes= (16 , ), σ= gelu)
56+ model = FourierNeuralOperator (ch = (2 , 64 , 64 , 64 , 64 , 64 , 128 , 1 ), modes = (16 ,),
57+ σ = gelu)
5958 data = get_dataloader ()
6059 optimiser = Flux. Optimiser (WeightDecay (λ), Flux. ADAM (η₀))
6160 loss_func = l₂loss
6261
63- learner = Learner (
64- model, data, optimiser, loss_func,
65- ToDevice (device, device),
66- )
62+ learner = Learner (model, data, optimiser, loss_func,
63+ ToDevice (device, device))
6764
6865 fit! (learner, epochs)
6966
7067 return learner
7168end
7269
73- function train_nomad (; n= 300 , cuda= true , learning_rate= 0.001 , epochs= 400 )
70+ function train_nomad (; n = 300 , cuda = true , learning_rate = 0.001 , epochs = 400 )
7471 if cuda && has_cuda ()
7572 @info " Training on GPU"
7673 device = gpu
@@ -79,32 +76,31 @@ function train_nomad(; n=300, cuda=true, learning_rate=0.001, epochs=400)
7976 device = cpu
8077 end
8178
82- x, y = get_data_don (n= n)
79+ x, y = get_data_don (n = n)
8380
8481 xtrain = x[1 : 280 , :]'
8582 ytrain = y[1 : 280 , :]
8683
87- xval = x[end - 19 : end , :]' |> device
88- yval = y[end - 19 : end , :] |> device
84+ xval = x[( end - 19 ) : end , :]' |> device
85+ yval = y[( end - 19 ) : end , :] |> device
8986
9087 # grid = collect(range(0, 1, length=1024)') |> device
9188 grid = rand (collect (0 : 0.001 : 1 ), (280 , 1024 )) |> device
9289 gridval = rand (collect (0 : 0.001 : 1 ), (20 , 1024 )) |> device
9390
94-
9591 opt = ADAM (learning_rate)
9692
97- m = NOMAD ((1024 ,1024 ), (2048 ,1024 ), gelu, gelu) |> device
93+ m = NOMAD ((1024 , 1024 ), (2048 , 1024 ), gelu, gelu) |> device
9894
9995 loss (X, y, sensor) = Flux. Losses. mse (m (X, sensor), y)
10096 evalcb () = @show (loss (xval, yval, gridval))
10197
10298 data = [(xtrain, ytrain, grid)] |> device
103- Flux. @epochs epochs Flux. train! (loss, params (m), data, opt, cb= evalcb)
99+ Flux. @epochs epochs Flux. train! (loss, params (m), data, opt, cb = evalcb)
104100 ỹ = m (xval |> device, gridval |> device)
105101
106102 diffvec = vec (abs .(cpu (yval) .- cpu (ỹ)))
107- mean_diff = sum (diffvec)/ length (diffvec)
103+ mean_diff = sum (diffvec) / length (diffvec)
108104 return mean_diff
109105end
110106
0 commit comments