Skip to content

Commit 91e6e17

Browse files
committed
add test_models
1 parent 77788f4 commit 91e6e17

File tree

3 files changed

+12
-14
lines changed

3 files changed

+12
-14
lines changed

tests/test_models.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -53,13 +53,14 @@
5353
'vision_transformer', 'vision_transformer_sam', 'vision_transformer_hybrid', 'vision_transformer_relpos',
5454
'beit', 'mvitv2', 'eva', 'cait', 'xcit', 'volo', 'twins', 'deit', 'swin_transformer', 'swin_transformer_v2',
5555
'swin_transformer_v2_cr', 'maxxvit', 'efficientnet', 'mobilenetv3', 'levit', 'efficientformer', 'resnet',
56-
'regnet', 'byobnet', 'byoanet', 'mlp_mixer', 'hiera', 'fastvit', 'hieradet_sam2', 'aimv2*'
56+
'regnet', 'byobnet', 'byoanet', 'mlp_mixer', 'hiera', 'fastvit', 'hieradet_sam2', 'aimv2*', 'swiftformer',
57+
'starnet', 'shvit',
5758
]
5859

5960
# transformer / hybrid models don't support full set of spatial / feature APIs and/or have spatial output.
6061
NON_STD_FILTERS = [
6162
'vit_*', 'tnt_*', 'pit_*', 'coat_*', 'cait_*', '*mixer_*', 'gmlp_*', 'resmlp_*', 'twins_*',
62-
'convit_*', 'levit*', 'visformer*', 'deit*', 'xcit_*', 'crossvit_*', 'beit*', 'aimv2*',
63+
'convit_*', 'levit*', 'visformer*', 'deit*', 'xcit_*', 'crossvit_*', 'beit*', 'aimv2*', 'swiftformer_*',
6364
'poolformer_*', 'volo_*', 'sequencer2d_*', 'mvitv2*', 'gcvit*', 'efficientformer*', 'sam_hiera*',
6465
'eva_*', 'flexivit*', 'eva02*', 'samvit_*', 'efficientvit_m*', 'tiny_vit_*', 'hiera_*', 'vitamin*', 'test_vit*',
6566
]

timm/models/shvit.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ def __init__(
235235
PatchMerging(prev_dim, dim, act_layer),
236236
Residule(Conv2d_BN(dim, dim, 3, 1, 1, groups=dim)),
237237
Residule(FFN(dim, int(dim * 2), act_layer)),
238-
) if prev_dim is not None else nn.Identity()
238+
) if prev_dim != dim else nn.Identity()
239239

240240
self.block = nn.Sequential(*[
241241
BasicBlock(dim, qk_dim, pdim, type, norm_layer, act_layer) for _ in range(depth)
@@ -269,19 +269,20 @@ def __init__(
269269
self.feature_info = []
270270

271271
# Patch embedding
272+
stem_chs = embed_dim[0]
272273
self.patch_embed = nn.Sequential(
273-
Conv2d_BN(in_chans, embed_dim[0] // 8, 3, 2, 1),
274+
Conv2d_BN(in_chans, stem_chs // 8, 3, 2, 1),
274275
act_layer(),
275-
Conv2d_BN(embed_dim[0] // 8, embed_dim[0] // 4, 3, 2, 1),
276+
Conv2d_BN(stem_chs // 8, stem_chs // 4, 3, 2, 1),
276277
act_layer(),
277-
Conv2d_BN(embed_dim[0] // 4, embed_dim[0] // 2, 3, 2, 1),
278+
Conv2d_BN(stem_chs // 4, stem_chs // 2, 3, 2, 1),
278279
act_layer(),
279-
Conv2d_BN(embed_dim[0] // 2, embed_dim[0], 3, 2, 1)
280+
Conv2d_BN(stem_chs // 2, stem_chs, 3, 2, 1)
280281
)
281282

282283
# Build SHViT blocks
283284
blocks = []
284-
prev_chs = None
285+
prev_chs = stem_chs
285286
for i in range(len(embed_dim)):
286287
blocks.append(StageBlock(
287288
prev_dim=prev_chs,

timm/models/swiftformer.py

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -497,11 +497,7 @@ def forward_features(self, x: torch.Tensor) -> torch.Tensor:
497497
x = self.norm(x)
498498
return x
499499

500-
def forward_head(
501-
self,
502-
x: torch.Tensor,
503-
pre_logits: bool = False,
504-
) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
500+
def forward_head(self, x: torch.Tensor, pre_logits: bool = False):
505501
if self.global_pool == 'avg':
506502
x = x.mean(dim=(2, 3))
507503
x = self.head_drop(x)
@@ -515,7 +511,7 @@ def forward_head(
515511
# during standard train/finetune, inference average the classifier predictions
516512
return (x + x_dist) / 2
517513

518-
def forward(self, x: torch.Tensor) -> Union[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:
514+
def forward(self, x: torch.Tensor):
519515
x = self.forward_features(x)
520516
x = self.forward_head(x)
521517
return x

0 commit comments

Comments
 (0)