Skip to content

Commit d1b988a

Browse files
authored
Merge pull request #65 from milancurcic/conv2d-forward-pass
Forward pass for the conv2d layer
2 parents 1c7b11f + 0929f49 commit d1b988a

17 files changed

+493
-105
lines changed

CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,7 @@ add_library(neural
6868
src/nf_base_layer.f90
6969
src/nf_base_layer_submodule.f90
7070
src/nf_conv2d_layer.f90
71+
src/nf_conv2d_layer_submodule.f90
7172
src/nf_datasets_mnist.f90
7273
src/nf_datasets_mnist_submodule.f90
7374
src/nf_dense_layer.f90
@@ -99,7 +100,7 @@ string(REGEX REPLACE "^ | $" "" LIBS "${LIBS}")
99100

100101
# tests
101102
enable_testing()
102-
foreach(execid input1d_layer dense_layer dense_network)
103+
foreach(execid input1d_layer input3d_layer dense_layer conv2d_layer dense_network conv2d_network)
103104
add_executable(test_${execid} test/test_${execid}.f90)
104105
target_link_libraries(test_${execid} neural ${LIBS})
105106
add_test(test_${execid} bin/test_${execid})
@@ -108,5 +109,4 @@ endforeach()
108109
foreach(execid mnist simple sine)
109110
add_executable(${execid} example/${execid}.f90)
110111
target_link_libraries(${execid} neural ${LIBS})
111-
#add_test(example_${execid} bin/example_${execid})
112112
endforeach()

README.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,14 @@ Read the paper [here](https://arxiv.org/abs/1902.06714).
2121
* Data-based parallelism
2222
* Several activation functions
2323

24+
### Available layer types
25+
26+
| Layer type | Constructor name | Rank of output array | Forward pass | Backward pass |
27+
|------------|------------------|----------------------|--------------|---------------|
28+
| Input | `input` | 1, 3 | n/a | n/a |
29+
| Dense (fully-connected) | `dense` | 1 |||
30+
| Convolutional (2-d) | `conv2d` | 3 |||
31+
2432
## Getting started
2533

2634
Get the code:

fpm.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
name = "neural-fortran"
2-
version = "0.3.0"
2+
version = "0.4.0"
33
license = "MIT"
44
author = "Milan Curcic"
55
maintainer = "milancurcic@hey.com"

src/nf.f90

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
module nf
22
use nf_datasets_mnist, only: label_digits, load_mnist
33
use nf_layer, only: layer
4-
use nf_layer_constructors, only: dense, input
4+
use nf_layer_constructors, only: conv2d, dense, input
55
use nf_network, only: network
66
end module nf

src/nf_conv2d_layer.f90

Lines changed: 44 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
module nf_conv2d_layer
22

3-
!! This is a placeholder module that will later define a concrete conv2d
4-
!! layer type.
3+
!! This modules provides a 2-d convolutional `conv2d_layer` type.
54

65
use nf_base_layer, only: base_layer
76
implicit none
@@ -14,12 +13,12 @@ module nf_conv2d_layer
1413
integer :: width
1514
integer :: height
1615
integer :: channels
17-
integer :: window_size
16+
integer :: kernel_size
1817
integer :: filters
1918

20-
real, allocatable :: biases(:) ! as many as there are filters
21-
real, allocatable :: kernel(:,:,:,:)
22-
real, allocatable :: output(:,:,:)
19+
real, allocatable :: biases(:) ! size(filters)
20+
real, allocatable :: kernel(:,:,:,:) ! filters x channels x window x window
21+
real, allocatable :: output(:,:,:) ! filters x output_width * output_height
2322

2423
contains
2524

@@ -30,55 +29,46 @@ module nf_conv2d_layer
3029
end type conv2d_layer
3130

3231
interface conv2d_layer
33-
module procedure :: conv2d_layer_cons
32+
pure module function conv2d_layer_cons(filters, kernel_size, activation) &
33+
result(res)
34+
!! `conv2d_layer` constructor function
35+
integer, intent(in) :: filters
36+
integer, intent(in) :: kernel_size
37+
character(*), intent(in) :: activation
38+
type(conv2d_layer) :: res
39+
end function conv2d_layer_cons
3440
end interface conv2d_layer
3541

36-
contains
37-
38-
pure function conv2d_layer_cons(window_size, filters, activation) result(res)
39-
integer, intent(in) :: window_size
40-
integer, intent(in) :: filters
41-
character(*), intent(in) :: activation
42-
type(conv2d_layer) :: res
43-
res % window_size = window_size
44-
res % filters = filters
45-
call res % set_activation(activation)
46-
end function conv2d_layer_cons
47-
48-
49-
subroutine init(self, input_shape)
50-
class(conv2d_layer), intent(in out) :: self
51-
integer, intent(in) :: input_shape(:)
52-
53-
self % width = input_shape(1) - self % window_size + 1
54-
self % height = input_shape(2) - self % window_size + 1
55-
self % channels = input_shape(3)
56-
57-
allocate(self % output(self % width, self % height, self % filters))
58-
self % output = 0
59-
60-
allocate(self % kernel(self % window_size, self % window_size, &
61-
self % channels, self % filters))
62-
self % kernel = 0 ! TODO 4-d randn
63-
64-
allocate(self % biases(self % filters))
65-
self % biases = 0
66-
67-
end subroutine init
68-
69-
70-
subroutine forward(self, input)
71-
class(conv2d_layer), intent(in out) :: self
72-
real, intent(in) :: input(:,:,:)
73-
print *, 'Warning: conv2d forward pass not implemented'
74-
end subroutine forward
75-
76-
77-
subroutine backward(self, input, gradient)
78-
class(conv2d_layer), intent(in out) :: self
79-
real, intent(in) :: input(:,:,:)
80-
real, intent(in) :: gradient(:,:,:)
81-
print *, 'Warning: conv2d backward pass not implemented'
82-
end subroutine backward
42+
interface
43+
44+
module subroutine init(self, input_shape)
45+
!! Initialize the layer data structures.
46+
!!
47+
!! This is a deferred procedure from the `base_layer` abstract type.
48+
class(conv2d_layer), intent(in out) :: self
49+
!! A `conv2d_layer` instance
50+
integer, intent(in) :: input_shape(:)
51+
!! Input layer dimensions
52+
end subroutine init
53+
54+
pure module subroutine forward(self, input)
55+
!! Apply a forward pass on the `conv2d` layer.
56+
class(conv2d_layer), intent(in out) :: self
57+
!! A `conv2d_layer` instance
58+
real, intent(in) :: input(:,:,:)
59+
!! Input data
60+
end subroutine forward
61+
62+
module subroutine backward(self, input, gradient)
63+
!! Apply a backward pass on the `conv2d` layer.
64+
class(conv2d_layer), intent(in out) :: self
65+
!! A `conv2d_layer` instance
66+
real, intent(in) :: input(:,:,:)
67+
!! Input data (previous layer)
68+
real, intent(in) :: gradient(:,:,:)
69+
!! Gradient (next layer)
70+
end subroutine backward
71+
72+
end interface
8373

8474
end module nf_conv2d_layer

src/nf_conv2d_layer_submodule.f90

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
submodule(nf_conv2d_layer) nf_conv2d_layer_submodule
2+
3+
use nf_random, only: randn
4+
5+
implicit none
6+
7+
contains
8+
9+
pure module function conv2d_layer_cons(filters, kernel_size, activation) result(res)
10+
implicit none
11+
integer, intent(in) :: filters
12+
integer, intent(in) :: kernel_size
13+
character(*), intent(in) :: activation
14+
type(conv2d_layer) :: res
15+
res % kernel_size = kernel_size
16+
res % filters = filters
17+
call res % set_activation(activation)
18+
end function conv2d_layer_cons
19+
20+
21+
module subroutine init(self, input_shape)
22+
implicit none
23+
class(conv2d_layer), intent(in out) :: self
24+
integer, intent(in) :: input_shape(:)
25+
26+
self % channels = input_shape(1)
27+
self % width = input_shape(2) - self % kernel_size + 1
28+
self % height = input_shape(3) - self % kernel_size + 1
29+
30+
! Output of shape filters x width x height
31+
allocate(self % output(self % filters, self % width, self % height))
32+
self % output = 0
33+
34+
! Kernel of shape filters x channels x width x height
35+
allocate(self % kernel(self % filters, self % channels, &
36+
self % kernel_size, self % kernel_size))
37+
38+
! Initialize the kernel with random values with a normal distribution.
39+
self % kernel = randn(self % filters, self % channels, &
40+
self % kernel_size, self % kernel_size) &
41+
/ self % kernel_size**2 !TODO kernel_width * kernel_height
42+
43+
allocate(self % biases(self % filters))
44+
self % biases = 0
45+
46+
end subroutine init
47+
48+
49+
pure module subroutine forward(self, input)
50+
implicit none
51+
class(conv2d_layer), intent(in out) :: self
52+
real, intent(in) :: input(:,:,:)
53+
integer :: input_width, input_height, input_channels
54+
integer :: istart, iend
55+
integer :: jstart, jend
56+
integer :: i, j, n
57+
integer :: iws, iwe, jws, jwe
58+
integer :: half_window
59+
60+
! Input dimensions are channels x width x height
61+
input_channels = size(input, dim=1)
62+
input_width = size(input, dim=2)
63+
input_height = size(input, dim=3)
64+
65+
! Half-window is 1 for window size 3; 2 for window size 5; etc.
66+
half_window = self % kernel_size / 2
67+
68+
! Determine the start and end indices for the width and height dimensions
69+
! of the input that correspond to the center of each window.
70+
istart = half_window + 1 ! TODO kernel_width
71+
jstart = half_window + 1 ! TODO kernel_height
72+
iend = input_width - istart + 1
73+
jend = input_height - jstart + 1
74+
75+
convolution: do concurrent(i = istart:iend, j = jstart:jend)
76+
77+
! Start and end indices of the input data on the filter window
78+
! iws and jws are also coincidentally the indices of the output matrix
79+
iws = i - half_window ! TODO kernel_width
80+
iwe = i + half_window ! TODO kernel_width
81+
jws = j - half_window ! TODO kernel_height
82+
jwe = j + half_window ! TODO kernel_height
83+
84+
! This computes the inner tensor product, sum(w_ij * x_ij), for each
85+
! filter, and we add bias b_n to it.
86+
inner_product: do concurrent(n = 1:self % filters)
87+
self % output(n,iws,jws) = &
88+
sum(self % kernel(n,:,:,:) * input(:,iws:iwe,jws:jwe)) &
89+
+ self % biases(n)
90+
end do inner_product
91+
92+
! TODO We may need to store self % output before we activate it for the
93+
! TODO backward pass, just like we do for the dense layer.
94+
95+
! Activate
96+
self % output(:,iws,jws) = self % activation(self % output(:,iws,jws))
97+
98+
end do convolution
99+
100+
end subroutine forward
101+
102+
103+
module subroutine backward(self, input, gradient)
104+
implicit none
105+
class(conv2d_layer), intent(in out) :: self
106+
real, intent(in) :: input(:,:,:)
107+
real, intent(in) :: gradient(:,:,:)
108+
print *, 'Warning: conv2d backward pass not implemented'
109+
end subroutine backward
110+
111+
end submodule nf_conv2d_layer_submodule

src/nf_layer.f90

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,17 @@ module nf_layer
2626

2727
procedure :: backward
2828
procedure :: forward
29-
procedure :: get_output
3029
procedure :: init
3130
procedure :: print_info
3231
procedure :: update
3332

33+
! Specific output subroutines for different array ranks,
34+
! available via generic `get_output`.
35+
procedure, private :: get_output_1d
36+
procedure, private :: get_output_3d
37+
38+
generic :: get_output => get_output_1d, get_output_3d
39+
3440
end type layer
3541

3642
interface
@@ -59,13 +65,22 @@ pure module subroutine forward(self, input)
5965
!! Input layer instance
6066
end subroutine forward
6167

62-
pure module subroutine get_output(self, output)
68+
pure module subroutine get_output_1d(self, output)
6369
!! Returns the output values (activations) from this layer.
6470
class(layer), intent(in) :: self
6571
!! Layer instance
6672
real, allocatable, intent(out) :: output(:)
6773
!! Output values from this layer
68-
end subroutine get_output
74+
end subroutine get_output_1d
75+
76+
pure module subroutine get_output_3d(self, output)
77+
!! Returns the output values (activations) from a layer with a 3-d output
78+
!! (e.g. input3d, conv2d)
79+
class(layer), intent(in) :: self
80+
!! Layer instance
81+
real, allocatable, intent(out) :: output(:,:,:)
82+
!! Output values from this layer
83+
end subroutine get_output_3d
6984

7085
impure elemental module subroutine init(self, input)
7186
!! Initialize the layer, using information from the input layer,

src/nf_layer_constructors.f90

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ pure module function dense(layer_size, activation) result(res)
8484
!! Resulting layer instance
8585
end function dense
8686

87-
pure module function conv2d(window_size, filters, activation) result(res)
87+
pure module function conv2d(filters, kernel_size, activation) result(res)
8888
!! 2-d convolutional layer constructor.
8989
!!
9090
!! This layer is for building 2-d convolutional network.
@@ -98,13 +98,13 @@ pure module function conv2d(window_size, filters, activation) result(res)
9898
!! ```
9999
!! use nf, only :: conv2d, layer
100100
!! type(layer) :: conv2d_layer
101-
!! conv2d_layer = dense(window_size=3, filters=32)
102-
!! conv2d_layer = dense(window_size=3, filters=32, activation='relu')
101+
!! conv2d_layer = dense(filters=32, kernel_size=3)
102+
!! conv2d_layer = dense(filters=32, kernel_size=3, activation='relu')
103103
!! ```
104-
integer, intent(in) :: window_size
105-
!! Width of the convolution window, commonly 3 or 5
106104
integer, intent(in) :: filters
107105
!! Number of filters in the output of the layer
106+
integer, intent(in) :: kernel_size
107+
!! Width of the convolution window, commonly 3 or 5
108108
character(*), intent(in), optional :: activation
109109
!! Activation function (default 'sigmoid')
110110
type(layer) :: res

src/nf_layer_constructors_submodule.f90

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,9 +51,9 @@ pure module function dense(layer_size, activation) result(res)
5151
end function dense
5252

5353

54-
pure module function conv2d(window_size, filters, activation) result(res)
55-
integer, intent(in) :: window_size
54+
pure module function conv2d(filters, kernel_size, activation) result(res)
5655
integer, intent(in) :: filters
56+
integer, intent(in) :: kernel_size
5757
character(*), intent(in), optional :: activation
5858
type(layer) :: res
5959

@@ -67,7 +67,7 @@ pure module function conv2d(window_size, filters, activation) result(res)
6767

6868
allocate( &
6969
res % p, &
70-
source=conv2d_layer(window_size, filters, res % activation) &
70+
source=conv2d_layer(filters, kernel_size, res % activation) &
7171
)
7272

7373
end function conv2d

0 commit comments

Comments
 (0)