@@ -54,7 +54,7 @@ def __init__(
5454 use_act : bool = True ,
5555 use_scale_branch : bool = True ,
5656 num_conv_branches : int = 1 ,
57- act_layer : nn .Module = nn .GELU ,
57+ act_layer : Type [ nn .Module ] = nn .GELU ,
5858 ) -> None :
5959 """Construct a MobileOneBlock module.
6060
@@ -426,7 +426,7 @@ def _fuse_bn(
426426def convolutional_stem (
427427 in_chs : int ,
428428 out_chs : int ,
429- act_layer : nn .Module = nn .GELU ,
429+ act_layer : Type [ nn .Module ] = nn .GELU ,
430430 inference_mode : bool = False
431431) -> nn .Sequential :
432432 """Build convolutional stem with MobileOne blocks.
@@ -545,7 +545,7 @@ def __init__(
545545 stride : int ,
546546 in_chs : int ,
547547 embed_dim : int ,
548- act_layer : nn .Module = nn .GELU ,
548+ act_layer : Type [ nn .Module ] = nn .GELU ,
549549 lkc_use_act : bool = False ,
550550 use_se : bool = False ,
551551 inference_mode : bool = False ,
@@ -718,7 +718,7 @@ def __init__(
718718 in_chs : int ,
719719 hidden_channels : Optional [int ] = None ,
720720 out_chs : Optional [int ] = None ,
721- act_layer : nn .Module = nn .GELU ,
721+ act_layer : Type [ nn .Module ] = nn .GELU ,
722722 drop : float = 0.0 ,
723723 ) -> None :
724724 """Build convolutional FFN module.
@@ -890,7 +890,7 @@ def __init__(
890890 dim : int ,
891891 kernel_size : int = 3 ,
892892 mlp_ratio : float = 4.0 ,
893- act_layer : nn .Module = nn .GELU ,
893+ act_layer : Type [ nn .Module ] = nn .GELU ,
894894 proj_drop : float = 0.0 ,
895895 drop_path : float = 0.0 ,
896896 layer_scale_init_value : float = 1e-5 ,
@@ -947,8 +947,8 @@ def __init__(
947947 self ,
948948 dim : int ,
949949 mlp_ratio : float = 4.0 ,
950- act_layer : nn .Module = nn .GELU ,
951- norm_layer : nn .Module = nn .BatchNorm2d ,
950+ act_layer : Type [ nn .Module ] = nn .GELU ,
951+ norm_layer : Type [ nn .Module ] = nn .BatchNorm2d ,
952952 proj_drop : float = 0.0 ,
953953 drop_path : float = 0.0 ,
954954 layer_scale_init_value : float = 1e-5 ,
@@ -1007,8 +1007,8 @@ def __init__(
10071007 pos_emb_layer : Optional [nn .Module ] = None ,
10081008 kernel_size : int = 3 ,
10091009 mlp_ratio : float = 4.0 ,
1010- act_layer : nn .Module = nn .GELU ,
1011- norm_layer : nn .Module = nn .BatchNorm2d ,
1010+ act_layer : Type [ nn .Module ] = nn .GELU ,
1011+ norm_layer : Type [ nn .Module ] = nn .BatchNorm2d ,
10121012 proj_drop_rate : float = 0.0 ,
10131013 drop_path_rate : float = 0.0 ,
10141014 layer_scale_init_value : Optional [float ] = 1e-5 ,
@@ -1121,8 +1121,8 @@ def __init__(
11211121 fork_feat : bool = False ,
11221122 cls_ratio : float = 2.0 ,
11231123 global_pool : str = 'avg' ,
1124- norm_layer : nn .Module = nn .BatchNorm2d ,
1125- act_layer : nn .Module = nn .GELU ,
1124+ norm_layer : Type [ nn .Module ] = nn .BatchNorm2d ,
1125+ act_layer : Type [ nn .Module ] = nn .GELU ,
11261126 inference_mode : bool = False ,
11271127 ) -> None :
11281128 super ().__init__ ()
0 commit comments