We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent f77c04f commit 2e38d53Copy full SHA for 2e38d53
timm/models/vision_transformer.py
@@ -241,7 +241,6 @@ def __init__(
241
self.fast_attn = hasattr(torch.nn.functional, 'scaled_dot_product_attention') # FIXME
242
mlp_hidden_dim = int(mlp_ratio * dim)
243
in_proj_out_dim = mlp_hidden_dim + 3 * dim
244
- out_proj_in_dim = mlp_hidden_dim + dim
245
246
self.in_norm = norm_layer(dim)
247
self.in_proj = nn.Linear(dim, in_proj_out_dim, bias=qkv_bias)
0 commit comments