Skip to content

Commit 7c4ed4d

Browse files
committed
Add EVA-large models
1 parent 6a92587 commit 7c4ed4d

File tree

2 files changed

+48
-0
lines changed

2 files changed

+48
-0
lines changed

README.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,17 @@ And a big thanks to all GitHub sponsors who helped with some of my costs before
2121

2222
## What's New
2323

24+
# Dec 8, 2022
25+
* Add 'EVA l' to `vision_transformer.py`, MAE style ViT-L/14 MIM pretrain w/ EVA-CLIP targets, FT on ImageNet-1k (w/ ImageNet-22k intermediate for some)
26+
* original source: https://github.com/baaivision/EVA
27+
28+
| model | top1 | param_count | gmac | macts | hub |
29+
|:------------------------------------------|-----:|------------:|------:|------:|:----------------------------------------|
30+
| eva_large_patch14_336.in22k_ft_in22k_in1k | 89.2 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) |
31+
| eva_large_patch14_336.in22k_ft_in1k | 88.7 | 304.5 | 191.1 | 270.2 | [link](https://huggingface.co/BAAI/EVA) |
32+
| eva_large_patch14_196.in22k_ft_in22k_in1k | 88.6 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) |
33+
| eva_large_patch14_196.in22k_ft_in1k | 87.9 | 304.1 | 61.6 | 63.5 | [link](https://huggingface.co/BAAI/EVA) |
34+
2435
# Dec 6, 2022
2536
* Add 'EVA g', BEiT style ViT-g/14 model weights w/ both MIM pretrain and CLIP pretrain to `beit.py`.
2637
* original source: https://github.com/baaivision/EVA

timm/models/vision_transformer.py

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -933,6 +933,25 @@ def _cfg(url='', **kwargs):
933933
'vit_small_patch16_36x1_224': _cfg(url=''),
934934
'vit_small_patch16_18x2_224': _cfg(url=''),
935935
'vit_base_patch16_18x2_224': _cfg(url=''),
936+
937+
# EVA fine-tuned weights from MAE style MIM - EVA-CLIP target pretrain
938+
# https://github.com/baaivision/EVA/blob/7ecf2c0a370d97967e86d047d7af9188f78d2df3/eva/README.md#eva-l-learning-better-mim-representations-from-eva-clip
939+
'eva_large_patch14_196.in22k_ft_in22k_in1k': _cfg(
940+
hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_21k_to_1k_ft_88p6.pt',
941+
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
942+
input_size=(3, 196, 196), crop_pct=1.0),
943+
'eva_large_patch14_336.in22k_ft_in22k_in1k': _cfg(
944+
hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_21k_to_1k_ft_89p2.pt',
945+
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
946+
input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'),
947+
'eva_large_patch14_196.in22k_ft_in1k': _cfg(
948+
hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_196px_1k_ft_88p0.pt',
949+
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
950+
input_size=(3, 196, 196), crop_pct=1.0),
951+
'eva_large_patch14_336.in22k_ft_in1k': _cfg(
952+
hf_hub_id='BAAI/EVA', hf_hub_filename='eva_l_psz14_336px_1k_ft_88p65.pt',
953+
mean=OPENAI_CLIP_MEAN, std=OPENAI_CLIP_STD,
954+
input_size=(3, 336, 336), crop_pct=1.0, crop_mode='squash'),
936955
})
937956

938957

@@ -1354,3 +1373,21 @@ def vit_base_patch16_18x2_224(pretrained=False, **kwargs):
13541373
patch_size=16, embed_dim=768, depth=18, num_heads=12, init_values=1e-5, block_fn=ParallelBlock, **kwargs)
13551374
model = _create_vision_transformer('vit_base_patch16_18x2_224', pretrained=pretrained, **model_kwargs)
13561375
return model
1376+
1377+
1378+
@register_model
1379+
def eva_large_patch14_196(pretrained=False, **kwargs):
1380+
""" EVA-large model https://arxiv.org/abs/2211.07636 /via MAE MIM pretrain"""
1381+
model_kwargs = dict(
1382+
patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg', **kwargs)
1383+
model = _create_vision_transformer('eva_large_patch14_196', pretrained=pretrained, **model_kwargs)
1384+
return model
1385+
1386+
1387+
@register_model
1388+
def eva_large_patch14_336(pretrained=False, **kwargs):
1389+
""" EVA-large model https://arxiv.org/abs/2211.07636 via MAE MIM pretrain"""
1390+
model_kwargs = dict(
1391+
patch_size=14, embed_dim=1024, depth=24, num_heads=16, global_pool='avg', **kwargs)
1392+
model = _create_vision_transformer('eva_large_patch14_336', pretrained=pretrained, **model_kwargs)
1393+
return model

0 commit comments

Comments
 (0)