@@ -49,7 +49,7 @@ def forward(self, x):
4949class SelectiveKernel (nn .Module ):
5050
5151 def __init__ (self , in_channels , out_channels = None , kernel_size = None , stride = 1 , dilation = 1 , groups = 1 ,
52- rd_ratio = 1. / 16 , rd_channels = None , min_rd_channels = 32 , rd_divisor = 8 , keep_3x3 = True , split_input = True ,
52+ rd_ratio = 1. / 16 , rd_channels = None , rd_divisor = 8 , keep_3x3 = True , split_input = True ,
5353 drop_block = None , act_layer = nn .ReLU , norm_layer = nn .BatchNorm2d , aa_layer = None ):
5454 """ Selective Kernel Convolution Module
5555
@@ -68,7 +68,6 @@ def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, d
6868 dilation (int): dilation for module as a whole, impacts dilation of each branch
6969 groups (int): number of groups for each branch
7070 rd_ratio (int, float): reduction factor for attention features
71- min_rd_channels (int): minimum attention feature channels
7271 keep_3x3 (bool): keep all branch convolution kernels as 3x3, changing larger kernels for dilations
7372 split_input (bool): split input channels evenly across each convolution branch, keeps param count lower,
7473 can be viewed as grouping by path, output expands to module out_channels count
@@ -103,8 +102,7 @@ def __init__(self, in_channels, out_channels=None, kernel_size=None, stride=1, d
103102 ConvBnAct (in_channels , out_channels , kernel_size = k , dilation = d , ** conv_kwargs )
104103 for k , d in zip (kernel_size , dilation )])
105104
106- attn_channels = rd_channels or make_divisible (
107- out_channels * rd_ratio , min_value = min_rd_channels , divisor = rd_divisor )
105+ attn_channels = rd_channels or make_divisible (out_channels * rd_ratio , divisor = rd_divisor )
108106 self .attn = SelectiveKernelAttn (out_channels , self .num_paths , attn_channels )
109107 self .drop_block = drop_block
110108
0 commit comments