66
77contains
88
9- pure module function conv2d_layer_cons(window_size, filters , activation) result(res)
9+ pure module function conv2d_layer_cons(filters, kernel_size , activation) result(res)
1010 implicit none
11- integer , intent (in ) :: window_size
1211 integer , intent (in ) :: filters
12+ integer , intent (in ) :: kernel_size
1313 character (* ), intent (in ) :: activation
1414 type (conv2d_layer) :: res
15- res % window_size = window_size
15+ res % kernel_size = kernel_size
1616 res % filters = filters
1717 call res % set_activation(activation)
1818 end function conv2d_layer_cons
@@ -24,21 +24,21 @@ module subroutine init(self, input_shape)
2424 integer , intent (in ) :: input_shape(:)
2525
2626 self % channels = input_shape(1 )
27- self % width = input_shape(2 ) - self % window_size + 1
28- self % height = input_shape(3 ) - self % window_size + 1
27+ self % width = input_shape(2 ) - self % kernel_size + 1
28+ self % height = input_shape(3 ) - self % kernel_size + 1
2929
3030 ! Output of shape filters x width x height
3131 allocate (self % output(self % filters, self % width, self % height))
3232 self % output = 0
3333
3434 ! Kernel of shape filters x channels x width x height
3535 allocate (self % kernel(self % filters, self % channels, &
36- self % window_size , self % window_size ))
36+ self % kernel_size , self % kernel_size ))
3737
3838 ! Initialize the kernel with random values with a normal distribution.
3939 self % kernel = randn(self % filters, self % channels, &
40- self % window_size , self % window_size ) &
41- / self % window_size ** 2 ! TODO window_width * window_height
40+ self % kernel_size , self % kernel_size ) &
41+ / self % kernel_size ** 2 ! TODO kernel_width * kernel_height
4242
4343 allocate (self % biases(self % filters))
4444 self % biases = 0
@@ -54,7 +54,6 @@ pure module subroutine forward(self, input)
5454 integer :: istart, iend
5555 integer :: jstart, jend
5656 integer :: i, j, n
57- integer :: ii, jj
5857 integer :: iws, iwe, jws, jwe
5958 integer :: half_window
6059
@@ -64,23 +63,23 @@ pure module subroutine forward(self, input)
6463 input_height = size (input, dim= 3 )
6564
6665 ! Half-window is 1 for window size 3; 2 for window size 5; etc.
67- half_window = self % window_size / 2
66+ half_window = self % kernel_size / 2
6867
6968 ! Determine the start and end indices for the width and height dimensions
7069 ! of the input that correspond to the center of each window.
71- istart = half_window + 1 ! TODO window_width
72- jstart = half_window + 1 ! TODO window_height
70+ istart = half_window + 1 ! TODO kernel_width
71+ jstart = half_window + 1 ! TODO kernel_height
7372 iend = input_width - istart + 1
7473 jend = input_height - jstart + 1
7574
7675 convolution: do concurrent(i = istart:iend, j = jstart:jend)
7776
7877 ! Start and end indices of the input data on the filter window
7978 ! iws and jws are also coincidentally the indices of the output matrix
80- iws = i - half_window ! TODO window_width
81- iwe = i + half_window ! TODO window_width
82- jws = j - half_window ! TODO window_height
83- jwe = j + half_window ! TODO window_height
79+ iws = i - half_window ! TODO kernel_width
80+ iwe = i + half_window ! TODO kernel_width
81+ jws = j - half_window ! TODO kernel_height
82+ jwe = j + half_window ! TODO kernel_height
8483
8584 ! This computes the inner tensor product, sum(w_ij * x_ij), for each
8685 ! filter, and we add bias b_n to it.
0 commit comments