Skip to content

Commit dd45cb3

Browse files
committed
Get up-to-date with main branch
2 parents 14a7b20 + d1b988a commit dd45cb3

11 files changed

+150
-50
lines changed

CMakeLists.txt

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ string(REGEX REPLACE "^ | $" "" LIBS "${LIBS}")
102102

103103
# tests
104104
enable_testing()
105-
foreach(execid input1d_layer input3d_layer dense_layer conv2d_layer maxpool2d_layer dense_network)
105+
foreach(execid input1d_layer input3d_layer dense_layer conv2d_layer maxpool2d_layer dense_network conv2d_network)
106106
add_executable(test_${execid} test/test_${execid}.f90)
107107
target_link_libraries(test_${execid} neural ${LIBS})
108108
add_test(test_${execid} bin/test_${execid})
@@ -111,5 +111,4 @@ endforeach()
111111
foreach(execid mnist simple sine)
112112
add_executable(${execid} example/${execid}.f90)
113113
target_link_libraries(${execid} neural ${LIBS})
114-
#add_test(example_${execid} bin/example_${execid})
115114
endforeach()

README.md

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,14 @@ Read the paper [here](https://arxiv.org/abs/1902.06714).
2121
* Data-based parallelism
2222
* Several activation functions
2323

24+
### Available layer types
25+
26+
| Layer type | Constructor name | Rank of output array | Forward pass | Backward pass |
27+
|------------|------------------|----------------------|--------------|---------------|
28+
| Input | `input` | 1, 3 | n/a | n/a |
29+
| Dense (fully-connected) | `dense` | 1 |||
30+
| Convolutional (2-d) | `conv2d` | 3 |||
31+
2432
## Getting started
2533

2634
Get the code:
@@ -172,6 +180,10 @@ Most Linux OSs have it out of the box.
172180
The dataset will be downloaded only the first time you run the example in any
173181
given directory.
174182

183+
If you're using Windows OS or don't have curl for any other reason,
184+
download [mnist.tar.gz](https://github.com/modern-fortran/neural-fortran/files/8498876/mnist.tar.gz)
185+
directly and unpack in the directory in which you will run the example program.
186+
175187
## API documentation
176188

177189
API documentation can be generated with [FORD](https://github.com/Fortran-FOSS-Programmers/ford/).

fpm.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
name = "neural-fortran"
2-
version = "0.3.0"
2+
version = "0.4.0"
33
license = "MIT"
44
author = "Milan Curcic"
55
maintainer = "milancurcic@hey.com"

src/nf_conv2d_layer.f90

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ module nf_conv2d_layer
1313
integer :: width
1414
integer :: height
1515
integer :: channels
16-
integer :: window_size
16+
integer :: kernel_size
1717
integer :: filters
1818

1919
real, allocatable :: biases(:) ! size(filters)
@@ -29,10 +29,11 @@ module nf_conv2d_layer
2929
end type conv2d_layer
3030

3131
interface conv2d_layer
32-
pure module function conv2d_layer_cons(window_size, filters, activation) result(res)
32+
pure module function conv2d_layer_cons(filters, kernel_size, activation) &
33+
result(res)
3334
!! `conv2d_layer` constructor function
34-
integer, intent(in) :: window_size
3535
integer, intent(in) :: filters
36+
integer, intent(in) :: kernel_size
3637
character(*), intent(in) :: activation
3738
type(conv2d_layer) :: res
3839
end function conv2d_layer_cons

src/nf_conv2d_layer_submodule.f90

Lines changed: 19 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,13 @@
66

77
contains
88

9-
pure module function conv2d_layer_cons(window_size, filters, activation) result(res)
9+
pure module function conv2d_layer_cons(filters, kernel_size, activation) result(res)
1010
implicit none
11-
integer, intent(in) :: window_size
1211
integer, intent(in) :: filters
12+
integer, intent(in) :: kernel_size
1313
character(*), intent(in) :: activation
1414
type(conv2d_layer) :: res
15-
res % window_size = window_size
15+
res % kernel_size = kernel_size
1616
res % filters = filters
1717
call res % set_activation(activation)
1818
end function conv2d_layer_cons
@@ -24,21 +24,21 @@ module subroutine init(self, input_shape)
2424
integer, intent(in) :: input_shape(:)
2525

2626
self % channels = input_shape(1)
27-
self % width = input_shape(2) - self % window_size + 1
28-
self % height = input_shape(3) - self % window_size + 1
27+
self % width = input_shape(2) - self % kernel_size + 1
28+
self % height = input_shape(3) - self % kernel_size + 1
2929

3030
! Output of shape filters x width x height
3131
allocate(self % output(self % filters, self % width, self % height))
3232
self % output = 0
3333

3434
! Kernel of shape filters x channels x width x height
3535
allocate(self % kernel(self % filters, self % channels, &
36-
self % window_size, self % window_size))
36+
self % kernel_size, self % kernel_size))
3737

3838
! Initialize the kernel with random values with a normal distribution.
3939
self % kernel = randn(self % filters, self % channels, &
40-
self % window_size, self % window_size) &
41-
/ self % window_size**2 !TODO window_width * window_height
40+
self % kernel_size, self % kernel_size) &
41+
/ self % kernel_size**2 !TODO kernel_width * kernel_height
4242

4343
allocate(self % biases(self % filters))
4444
self % biases = 0
@@ -54,7 +54,6 @@ pure module subroutine forward(self, input)
5454
integer :: istart, iend
5555
integer :: jstart, jend
5656
integer :: i, j, n
57-
integer :: ii, jj
5857
integer :: iws, iwe, jws, jwe
5958
integer :: half_window
6059

@@ -64,34 +63,34 @@ pure module subroutine forward(self, input)
6463
input_height = size(input, dim=3)
6564

6665
! Half-window is 1 for window size 3; 2 for window size 5; etc.
67-
half_window = self % window_size / 2
66+
half_window = self % kernel_size / 2
6867

6968
! Determine the start and end indices for the width and height dimensions
7069
! of the input that correspond to the center of each window.
71-
istart = half_window + 1 ! TODO window_width
72-
jstart = half_window + 1 ! TODO window_height
70+
istart = half_window + 1 ! TODO kernel_width
71+
jstart = half_window + 1 ! TODO kernel_height
7372
iend = input_width - istart + 1
7473
jend = input_height - jstart + 1
7574

7675
convolution: do concurrent(i = istart:iend, j = jstart:jend)
7776

7877
! Start and end indices of the input data on the filter window
7978
! iws and jws are also coincidentally the indices of the output matrix
80-
iws = i - half_window ! TODO window_width
81-
iwe = i + half_window ! TODO window_width
82-
jws = j - half_window ! TODO window_height
83-
jwe = j + half_window ! TODO window_height
79+
iws = i - half_window ! TODO kernel_width
80+
iwe = i + half_window ! TODO kernel_width
81+
jws = j - half_window ! TODO kernel_height
82+
jwe = j + half_window ! TODO kernel_height
8483

85-
! This computes the inner tensor product, sum(w_ij * x_ij), for each filter,
86-
! and we add bias b_n to it.
84+
! This computes the inner tensor product, sum(w_ij * x_ij), for each
85+
! filter, and we add bias b_n to it.
8786
inner_product: do concurrent(n = 1:self % filters)
8887
self % output(n,iws,jws) = &
8988
sum(self % kernel(n,:,:,:) * input(:,iws:iwe,jws:jwe)) &
9089
+ self % biases(n)
9190
end do inner_product
9291

93-
! TODO We may need to store self % output before we activate it for the backward
94-
! TODO pass, just like we do for the dense layer.
92+
! TODO We may need to store self % output before we activate it for the
93+
! TODO backward pass, just like we do for the dense layer.
9594

9695
! Activate
9796
self % output(:,iws,jws) = self % activation(self % output(:,iws,jws))

src/nf_layer_constructors.f90

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ pure module function dense(layer_size, activation) result(res)
8484
!! Resulting layer instance
8585
end function dense
8686

87-
pure module function conv2d(window_size, filters, activation) result(res)
87+
pure module function conv2d(filters, kernel_size, activation) result(res)
8888
!! 2-d convolutional layer constructor.
8989
!!
9090
!! This layer is for building 2-d convolutional network.
@@ -98,13 +98,13 @@ pure module function conv2d(window_size, filters, activation) result(res)
9898
!! ```
9999
!! use nf, only :: conv2d, layer
100100
!! type(layer) :: conv2d_layer
101-
!! conv2d_layer = dense(window_size=3, filters=32)
102-
!! conv2d_layer = dense(window_size=3, filters=32, activation='relu')
101+
!! conv2d_layer = dense(filters=32, kernel_size=3)
102+
!! conv2d_layer = dense(filters=32, kernel_size=3, activation='relu')
103103
!! ```
104-
integer, intent(in) :: window_size
105-
!! Width of the convolution window, commonly 3 or 5
106104
integer, intent(in) :: filters
107105
!! Number of filters in the output of the layer
106+
integer, intent(in) :: kernel_size
107+
!! Width of the convolution window, commonly 3 or 5
108108
character(*), intent(in), optional :: activation
109109
!! Activation function (default 'sigmoid')
110110
type(layer) :: res

src/nf_layer_constructors_submodule.f90

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -52,9 +52,9 @@ pure module function dense(layer_size, activation) result(res)
5252
end function dense
5353

5454

55-
pure module function conv2d(window_size, filters, activation) result(res)
56-
integer, intent(in) :: window_size
55+
pure module function conv2d(filters, kernel_size, activation) result(res)
5756
integer, intent(in) :: filters
57+
integer, intent(in) :: kernel_size
5858
character(*), intent(in), optional :: activation
5959
type(layer) :: res
6060

@@ -68,7 +68,7 @@ pure module function conv2d(window_size, filters, activation) result(res)
6868

6969
allocate( &
7070
res % p, &
71-
source=conv2d_layer(window_size, filters, res % activation) &
71+
source=conv2d_layer(filters, kernel_size, res % activation) &
7272
)
7373

7474
end function conv2d

src/nf_network.f90

Lines changed: 40 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,22 @@ module nf_network
1111
public :: network
1212

1313
type :: network
14+
1415
type(layer), allocatable :: layers(:)
16+
1517
contains
18+
1619
procedure :: backward
17-
procedure :: forward
1820
procedure :: output
1921
procedure :: print_info
2022
procedure :: train
2123
procedure :: update
24+
25+
procedure, private :: forward_1d
26+
procedure, private :: forward_3d
27+
28+
generic :: forward => forward_1d, forward_3d
29+
2230
end type network
2331

2432
interface network
@@ -32,29 +40,50 @@ module function network_cons(layers) result(res)
3240
end function network_cons
3341
end interface network
3442

35-
interface
43+
interface forward
3644

37-
pure module subroutine backward(self, output)
38-
!! Apply one backward pass through the network.
45+
pure module subroutine forward_1d(self, input)
46+
!! Apply a forward pass through the network.
47+
!!
3948
!! This changes the state of layers on the network.
4049
!! Typically used only internally from the `train` method,
4150
!! but can be invoked by the user when creating custom optimizers.
51+
!!
52+
!! This specific subroutine is for 1-d input data.
4253
class(network), intent(in out) :: self
4354
!! Network instance
44-
real, intent(in) :: output(:)
45-
!! Output data
46-
end subroutine backward
55+
real, intent(in) :: input(:)
56+
!! 1-d input data
57+
end subroutine forward_1d
4758

48-
pure module subroutine forward(self, input)
59+
pure module subroutine forward_3d(self, input)
4960
!! Apply a forward pass through the network.
61+
!!
5062
!! This changes the state of layers on the network.
5163
!! Typically used only internally from the `train` method,
5264
!! but can be invoked by the user when creating custom optimizers.
65+
!!
66+
!! This specific subroutine is for 3-d input data.
5367
class(network), intent(in out) :: self
5468
!! Network instance
55-
real, intent(in) :: input(:)
56-
!! Input data
57-
end subroutine forward
69+
real, intent(in) :: input(:,:,:)
70+
!! 3-d input data
71+
end subroutine forward_3d
72+
73+
end interface forward
74+
75+
interface
76+
77+
pure module subroutine backward(self, output)
78+
!! Apply one backward pass through the network.
79+
!! This changes the state of layers on the network.
80+
!! Typically used only internally from the `train` method,
81+
!! but can be invoked by the user when creating custom optimizers.
82+
class(network), intent(in out) :: self
83+
!! Network instance
84+
real, intent(in) :: output(:)
85+
!! Output data
86+
end subroutine backward
5887

5988
module function output(self, input) result(res)
6089
!! Return the output of the network given the input array.

src/nf_network_submodule.f90

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
use nf_dense_layer, only: dense_layer
44
use nf_input1d_layer, only: input1d_layer
5+
use nf_input3d_layer, only: input3d_layer
56
use nf_layer, only: layer
67
use nf_loss, only: quadratic_derivative
78
use nf_optimizers, only: sgd
@@ -80,7 +81,7 @@ pure module subroutine backward(self, output)
8081
end subroutine backward
8182

8283

83-
pure module subroutine forward(self, input)
84+
pure module subroutine forward_1d(self, input)
8485
class(network), intent(in out) :: self
8586
real, intent(in) :: input(:)
8687
integer :: n
@@ -94,7 +95,24 @@ pure module subroutine forward(self, input)
9495
call self % layers(n) % forward(self % layers(n - 1))
9596
end do
9697

97-
end subroutine forward
98+
end subroutine forward_1d
99+
100+
101+
pure module subroutine forward_3d(self, input)
102+
class(network), intent(in out) :: self
103+
real, intent(in) :: input(:,:,:)
104+
integer :: n
105+
106+
! Set the input array into the input layer
107+
select type(input_layer => self % layers(1) % p); type is(input3d_layer)
108+
call input_layer % set(input)
109+
end select
110+
111+
do n = 2, size(self % layers)
112+
call self % layers(n) % forward(self % layers(n - 1))
113+
end do
114+
115+
end subroutine forward_3d
98116

99117

100118
module function output(self, input) result(res)

test/test_conv2d_layer.f90

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,12 +7,12 @@ program test_conv2d_layer
77
implicit none
88

99
type(layer) :: conv_layer, input_layer
10-
integer, parameter :: window_size = 3, filters = 32
10+
integer, parameter :: filters = 32, kernel_size=3
1111
real, allocatable :: sample_input(:,:,:), output(:,:,:)
1212
real, parameter :: tolerance = 1e-7
1313
logical :: ok = .true.
1414

15-
conv_layer = conv2d(window_size, filters)
15+
conv_layer = conv2d(filters, kernel_size)
1616

1717
if (.not. conv_layer % name == 'conv2d') then
1818
ok = .false.
@@ -52,7 +52,7 @@ program test_conv2d_layer
5252
sample_input = 0
5353

5454
input_layer = input([1, 3, 3])
55-
conv_layer = conv2d(window_size, filters)
55+
conv_layer = conv2d(filters, kernel_size)
5656
call conv_layer % init(input_layer)
5757

5858
select type(this_layer => input_layer % p); type is(input3d_layer)

0 commit comments

Comments
 (0)