@@ -80,6 +80,7 @@ def __init__(
8080 in_chans : int = 3 ,
8181 stem_size : int = 32 ,
8282 fix_stem : bool = False ,
83+ stem_kernel_size : int = 3 ,
8384 output_stride : int = 32 ,
8485 pad_type : str = '' ,
8586 act_layer : Optional [LayerType ] = None ,
@@ -104,7 +105,7 @@ def __init__(
104105 # Stem
105106 if not fix_stem :
106107 stem_size = round_chs_fn (stem_size )
107- self .conv_stem = create_conv2d (in_chans , stem_size , 3 , stride = 2 , padding = pad_type )
108+ self .conv_stem = create_conv2d (in_chans , stem_size , stem_kernel_size , stride = 2 , padding = pad_type )
108109 self .bn1 = norm_act_layer (stem_size , inplace = True )
109110
110111 # Middle stages (IR/ER/DS Blocks)
@@ -478,6 +479,34 @@ def _gen_mnasnet_small(variant, channel_multiplier=1.0, pretrained=False, **kwar
478479 return model
479480
480481
482+ def _gen_mobilenet_v1 (
483+ variant , channel_multiplier = 1.0 , depth_multiplier = 1.0 , fix_stem_head = False , pretrained = False , ** kwargs ):
484+ """
485+ Ref impl: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_v2.py
486+ Paper: https://arxiv.org/abs/1801.04381
487+ """
488+ arch_def = [
489+ ['dsa_r1_k3_s1_c64' ],
490+ ['dsa_r2_k3_s2_c128' ],
491+ ['dsa_r2_k3_s2_c256' ],
492+ ['dsa_r6_k3_s2_c512' ],
493+ ['dsa_r2_k3_s2_c1024' ],
494+ ]
495+ round_chs_fn = partial (round_channels , multiplier = channel_multiplier )
496+ model_kwargs = dict (
497+ block_args = decode_arch_def (arch_def , depth_multiplier = depth_multiplier , fix_first_last = fix_stem_head ),
498+ num_features = 1024 if fix_stem_head else max (1024 , round_chs_fn (1024 )),
499+ stem_size = 32 ,
500+ fix_stem = fix_stem_head ,
501+ round_chs_fn = round_chs_fn ,
502+ norm_layer = kwargs .pop ('norm_layer' , None ) or partial (nn .BatchNorm2d , ** resolve_bn_args (kwargs )),
503+ act_layer = resolve_act_layer (kwargs , 'relu6' ),
504+ ** kwargs
505+ )
506+ model = _create_effnet (variant , pretrained , ** model_kwargs )
507+ return model
508+
509+
481510def _gen_mobilenet_v2 (
482511 variant , channel_multiplier = 1.0 , depth_multiplier = 1.0 , fix_stem_head = False , pretrained = False , ** kwargs ):
483512 """ Generate MobileNet-V2 network
@@ -1056,6 +1085,95 @@ def _gen_tinynet(
10561085 return model
10571086
10581087
1088+ def _gen_mobilenet_edgetpu (variant , channel_multiplier = 1.0 , depth_multiplier = 1.0 , pretrained = False , ** kwargs ):
1089+ """
1090+ Based on definitions in: https://github.com/tensorflow/models/tree/d2427a562f401c9af118e47af2f030a0a5599f55/official/projects/edgetpu/vision
1091+ """
1092+ if 'edgetpu_v2' in variant :
1093+ stem_size = 64
1094+ stem_kernel_size = 5
1095+ group_size = 64
1096+ num_features = 1280
1097+ act_layer = resolve_act_layer (kwargs , 'relu' )
1098+
1099+ def _arch_def (chs : List [int ], group_size : int ):
1100+ return [
1101+ # stage 0, 112x112 in
1102+ [f'cn_r1_k1_s1_c{ chs [0 ]} ' ], # NOTE with expansion==1, official impl block ends just 1x1 pwl
1103+ # stage 1, 112x112 in
1104+ [f'er_r1_k3_s2_e8_c{ chs [1 ]} ' , f'er_r1_k3_s1_e4_gs{ group_size } _c{ chs [1 ]} ' ],
1105+ # stage 2, 56x56 in
1106+ [
1107+ f'er_r1_k3_s2_e8_c{ chs [2 ]} ' ,
1108+ f'er_r1_k3_s1_e4_gs{ group_size } _c{ chs [2 ]} ' ,
1109+ f'er_r1_k3_s1_e4_c{ chs [2 ]} ' ,
1110+ f'er_r1_k3_s1_e4_gs{ group_size } _c{ chs [2 ]} ' ,
1111+ ],
1112+ # stage 3, 28x28 in
1113+ [f'er_r1_k3_s2_e8_c{ chs [3 ]} ' , f'ir_r3_k3_s1_e4_c{ chs [3 ]} ' ],
1114+ # stage 4, 14x14in
1115+ [f'ir_r1_k3_s1_e8_c{ chs [4 ]} ' , f'ir_r3_k3_s1_e4_c{ chs [4 ]} ' ],
1116+ # stage 5, 14x14in
1117+ [f'ir_r1_k3_s2_e8_c{ chs [5 ]} ' , f'ir_r3_k3_s1_e4_c{ chs [5 ]} ' ],
1118+ # stage 6, 7x7 in
1119+ [f'ir_r1_k3_s1_e8_c{ chs [6 ]} ' ],
1120+ ]
1121+
1122+ if 'edgetpu_v2_xs' in variant :
1123+ stem_size = 32
1124+ stem_kernel_size = 3
1125+ channels = [16 , 32 , 48 , 96 , 144 , 160 , 192 ]
1126+ elif 'edgetpu_v2_s' in variant :
1127+ channels = [24 , 48 , 64 , 128 , 160 , 192 , 256 ]
1128+ elif 'edgetpu_v2_m' in variant :
1129+ channels = [32 , 64 , 80 , 160 , 192 , 240 , 320 ]
1130+ num_features = 1344
1131+ elif 'edgetpu_v2_l' in variant :
1132+ stem_kernel_size = 7
1133+ group_size = 128
1134+ channels = [32 , 64 , 96 , 192 , 240 , 256 , 384 ]
1135+ num_features = 1408
1136+ else :
1137+ assert False
1138+
1139+ arch_def = _arch_def (channels , group_size )
1140+ else :
1141+ # v1
1142+ stem_size = 32
1143+ stem_kernel_size = 3
1144+ num_features = 1280
1145+ act_layer = resolve_act_layer (kwargs , 'relu' )
1146+ arch_def = [
1147+ # stage 0, 112x112 in
1148+ ['cn_r1_k1_s1_c16' ],
1149+ # stage 1, 112x112 in
1150+ ['er_r1_k3_s2_e8_c32' , 'er_r3_k3_s1_e4_c32' ],
1151+ # stage 2, 56x56 in
1152+ ['er_r1_k3_s2_e8_c48' , 'er_r3_k3_s1_e4_c48' ],
1153+ # stage 3, 28x28 in
1154+ ['ir_r1_k3_s2_e8_c96' , 'ir_r3_k3_s1_e4_c96' ],
1155+ # stage 4, 14x14in
1156+ ['ir_r1_k3_s1_e8_c96_noskip' , 'ir_r3_k3_s1_e4_c96' ],
1157+ # stage 5, 14x14in
1158+ ['ir_r1_k5_s2_e8_c160' , 'ir_r3_k5_s1_e4_c160' ],
1159+ # stage 6, 7x7 in
1160+ ['ir_r1_k3_s1_e8_c192' ],
1161+ ]
1162+
1163+ model_kwargs = dict (
1164+ block_args = decode_arch_def (arch_def , depth_multiplier ),
1165+ num_features = num_features ,
1166+ stem_size = stem_size ,
1167+ stem_kernel_size = stem_kernel_size ,
1168+ round_chs_fn = partial (round_channels , multiplier = channel_multiplier ),
1169+ norm_layer = kwargs .pop ('norm_layer' , None ) or partial (nn .BatchNorm2d , ** resolve_bn_args (kwargs )),
1170+ act_layer = act_layer ,
1171+ ** kwargs ,
1172+ )
1173+ model = _create_effnet (variant , pretrained , ** model_kwargs )
1174+ return model
1175+
1176+
10591177def _cfg (url = '' , ** kwargs ):
10601178 return {
10611179 'url' : url , 'num_classes' : 1000 , 'input_size' : (3 , 224 , 224 ), 'pool_size' : (7 , 7 ),
@@ -1086,6 +1204,9 @@ def _cfg(url='', **kwargs):
10861204 url = 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_small_lamb-aff75073.pth' ,
10871205 hf_hub_id = 'timm/' ),
10881206
1207+ 'mobilenet_100.untrained' : _cfg (),
1208+ 'mobilenet_125.untrained' : _cfg (),
1209+
10891210 'mobilenetv2_035.untrained' : _cfg (),
10901211 'mobilenetv2_050.lamb_in1k' : _cfg (
10911212 url = 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv2_050-3d30d450.pth' ,
@@ -1395,7 +1516,6 @@ def _cfg(url='', **kwargs):
13951516 hf_hub_id = 'timm/' ,
13961517 input_size = (3 , 456 , 456 ), pool_size = (15 , 15 ), crop_pct = 0.934 ),
13971518
1398-
13991519 'tf_efficientnet_es.in1k' : _cfg (
14001520 url = 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_es-ca1afbfe.pth' ,
14011521 hf_hub_id = 'timm/' ,
@@ -1584,6 +1704,23 @@ def _cfg(url='', **kwargs):
15841704 input_size = (3 , 106 , 106 ), pool_size = (4 , 4 ), # int(224 * 0.475)
15851705 url = 'https://github.com/huawei-noah/CV-Backbones/releases/download/v1.2.0/tinynet_e.pth' ,
15861706 hf_hub_id = 'timm/' ),
1707+
1708+ 'mobilenet_edgetpu_100.untrained' : _cfg (
1709+ # hf_hub_id='timm/',
1710+ input_size = (3 , 224 , 224 ), crop_pct = 0.9 ),
1711+ 'mobilenet_edgetpu_v2_xs.untrained' : _cfg (
1712+ # hf_hub_id='timm/',
1713+ input_size = (3 , 224 , 224 ), crop_pct = 0.9 ),
1714+ 'mobilenet_edgetpu_v2_s.untrained' : _cfg (
1715+ #hf_hub_id='timm/',
1716+ input_size = (3 , 224 , 224 ), crop_pct = 0.9 ),
1717+ 'mobilenet_edgetpu_v2_m.untrained' : _cfg (
1718+ #hf_hub_id='timm/',
1719+ input_size = (3 , 224 , 224 ), crop_pct = 0.9 ),
1720+ 'mobilenet_edgetpu_v2_l.untrained' : _cfg (
1721+ #hf_hub_id='timm/',
1722+ input_size = (3 , 224 , 224 ), crop_pct = 0.9 ),
1723+
15871724})
15881725
15891726
@@ -1650,6 +1787,20 @@ def mnasnet_small(pretrained=False, **kwargs) -> EfficientNet:
16501787 return model
16511788
16521789
1790+ @register_model
1791+ def mobilenet_100 (pretrained = False , ** kwargs ) -> EfficientNet :
1792+ """ MobileNet V1 """
1793+ model = _gen_mobilenet_v1 ('mobilenet_100' , 1.0 , pretrained = pretrained , ** kwargs )
1794+ return model
1795+
1796+
1797+ @register_model
1798+ def mobilenet_125 (pretrained = False , ** kwargs ) -> EfficientNet :
1799+ """ MobileNet V1 """
1800+ model = _gen_mobilenet_v1 ('mobilenet_125' , 1.25 , pretrained = pretrained , ** kwargs )
1801+ return model
1802+
1803+
16531804@register_model
16541805def mobilenetv2_035 (pretrained = False , ** kwargs ) -> EfficientNet :
16551806 """ MobileNet V2 w/ 0.35 channel multiplier """
@@ -2510,6 +2661,41 @@ def tinynet_e(pretrained=False, **kwargs) -> EfficientNet:
25102661 return model
25112662
25122663
2664+ @register_model
2665+ def mobilenet_edgetpu_100 (pretrained = False , ** kwargs ) -> EfficientNet :
2666+ """ MobileNet-EdgeTPU-v1 100. """
2667+ model = _gen_mobilenet_edgetpu ('mobilenet_edgetpu_100' , pretrained = pretrained , ** kwargs )
2668+ return model
2669+
2670+
2671+ @register_model
2672+ def mobilenet_edgetpu_v2_xs (pretrained = False , ** kwargs ) -> EfficientNet :
2673+ """ MobileNet-EdgeTPU-v2 Extra Small. """
2674+ model = _gen_mobilenet_edgetpu ('mobilenet_edgetpu_v2_xs' , pretrained = pretrained , ** kwargs )
2675+ return model
2676+
2677+
2678+ @register_model
2679+ def mobilenet_edgetpu_v2_s (pretrained = False , ** kwargs ) -> EfficientNet :
2680+ """ MobileNet-EdgeTPU-v2 Small. """
2681+ model = _gen_mobilenet_edgetpu ('mobilenet_edgetpu_v2_s' , pretrained = pretrained , ** kwargs )
2682+ return model
2683+
2684+
2685+ @register_model
2686+ def mobilenet_edgetpu_v2_m (pretrained = False , ** kwargs ) -> EfficientNet :
2687+ """ MobileNet-EdgeTPU-v2 Medium. """
2688+ model = _gen_mobilenet_edgetpu ('mobilenet_edgetpu_v2_m' , pretrained = pretrained , ** kwargs )
2689+ return model
2690+
2691+
2692+ @register_model
2693+ def mobilenet_edgetpu_v2_l (pretrained = False , ** kwargs ) -> EfficientNet :
2694+ """ MobileNet-EdgeTPU-v2 Large. """
2695+ model = _gen_mobilenet_edgetpu ('mobilenet_edgetpu_v2_l' , pretrained = pretrained , ** kwargs )
2696+ return model
2697+
2698+
25132699register_model_deprecations (__name__ , {
25142700 'tf_efficientnet_b0_ap' : 'tf_efficientnet_b0.ap_in1k' ,
25152701 'tf_efficientnet_b1_ap' : 'tf_efficientnet_b1.ap_in1k' ,
0 commit comments