@@ -7,115 +7,137 @@ python hub.py
77
88batch_sizes=(1 2 4 8 16 32 64 128 256)
99large_model_batch_sizes=(1 2 4 8 16 32 64)
10+ backends=(" torch" " ts_trt" " dynamo" " torch_compile" " inductor" )
11+ backends_no_torchscript=(" torch" " dynamo" " torch_compile" " inductor" )
1012
1113
1214# Benchmark VGG16 model
1315echo " Benchmarking VGG16 model"
1416for bs in ${batch_sizes[@]}
1517do
16- python perf_run.py --model ${MODELS_DIR} /vgg16_scripted.jit.pt \
17- --model_torch vgg16 \
18- --precision fp32,fp16 --inputs=" (${bs} , 3, 224, 224)" \
19- --batch_size ${bs} \
20- --truncate \
21- --backends torch,ts_trt,dynamo,torch_compile,inductor \
22- --report " vgg16_perf_bs${bs} .txt"
18+ for backend in ${backends[@]}
19+ do
20+ python perf_run.py --model ${MODELS_DIR} /vgg16_scripted.jit.pt \
21+ --model_torch vgg16 \
22+ --precision fp16 --inputs=" (${bs} , 3, 224, 224)" \
23+ --batch_size ${bs} \
24+ --truncate \
25+ --backends ${backend} \
26+ --report " vgg16_perf_bs${bs} _backend_${backend} .csv"
27+ done
2328done
2429
2530# Benchmark AlexNet model
2631echo " Benchmarking AlexNet model"
2732for bs in ${batch_sizes[@]}
2833do
29- python perf_run.py --model ${MODELS_DIR} /alexnet_scripted.jit.pt \
30- --model_torch alexnet \
31- --precision fp32,fp16 --inputs=" (${bs} , 3, 227, 227)" \
32- --batch_size ${bs} \
33- --truncate \
34- --backends torch,ts_trt,dynamo,torch_compile,inductor \
35- --report " alexnet_perf_bs${bs} .txt"
34+ for backend in ${backends[@]}
35+ do
36+ python perf_run.py --model ${MODELS_DIR} /alexnet_scripted.jit.pt \
37+ --model_torch alexnet \
38+ --precision fp16 --inputs=" (${bs} , 3, 227, 227)" \
39+ --batch_size ${bs} \
40+ --truncate \
41+ --backends ${backend} \
42+ --report " alexnet_perf_bs${bs} _backend_${backend} .csv"
43+ done
3644done
3745
3846# Benchmark Resnet50 model
3947echo " Benchmarking Resnet50 model"
4048for bs in ${batch_sizes[@]}
4149do
42- python perf_run.py --model ${MODELS_DIR} /resnet50_scripted.jit.pt \
43- --model_torch resnet50 \
44- --precision fp32,fp16 --inputs=" (${bs} , 3, 224, 224)" \
45- --batch_size ${bs} \
46- --truncate \
47- --backends torch,ts_trt,dynamo,torch_compile,inductor \
48- --report " resnet50_perf_bs${bs} .txt"
50+ for backend in ${backends[@]}
51+ do
52+ python perf_run.py --model ${MODELS_DIR} /resnet50_scripted.jit.pt \
53+ --model_torch resnet50 \
54+ --precision fp16 --inputs=" (${bs} , 3, 224, 224)" \
55+ --batch_size ${bs} \
56+ --truncate \
57+ --backends ${backend} \
58+ --report " resnet50_perf_bs${bs} _backend_${backend} .csv"
59+ done
4960done
5061
5162# Benchmark VIT model
5263echo " Benchmarking VIT model"
5364for bs in ${batch_sizes[@]}
5465do
55- python perf_run.py --model ${MODELS_DIR} /vit_scripted.jit.pt \
56- --model_torch vit \
57- --precision fp32,fp16 --inputs=" (${bs} , 3, 224, 224)" \
58- --batch_size ${bs} \
59- --truncate \
60- --backends torch,ts_trt,dynamo,torch_compile,inductor \
61- --report " vit_perf_bs${bs} .txt"
66+ for backend in ${backends[@]}
67+ do
68+ python perf_run.py --model ${MODELS_DIR} /vit_scripted.jit.pt \
69+ --model_torch vit \
70+ --precision fp16 --inputs=" (${bs} , 3, 224, 224)" \
71+ --batch_size ${bs} \
72+ --truncate \
73+ --backends ${backend} \
74+ --report " vit_perf_bs${bs} _backend_${backend} .csv"
75+ done
6276done
6377
6478# Benchmark VIT Large model
6579echo " Benchmarking VIT Large model"
6680for bs in ${large_model_batch_sizes[@]}
6781do
68- python perf_run.py --model ${MODELS_DIR} /vit_large_scripted.jit.pt \
69- --model_torch vit_large \
70- --precision fp32,fp16 --inputs=" (${bs} , 3, 224, 224)" \
71- --truncate \
72- --batch_size ${bs} \
73- --backends torch,ts_trt,dynamo,torch_compile,inductor \
74- --report " vit_large_perf_bs${bs} .txt"
82+ for backend in ${backends[@]}
83+ do
84+ python perf_run.py --model ${MODELS_DIR} /vit_large_scripted.jit.pt \
85+ --model_torch vit_large \
86+ --precision fp16 --inputs=" (${bs} , 3, 224, 224)" \
87+ --batch_size ${bs} \
88+ --truncate \
89+ --backends ${backend} \
90+ --report " vit_large_perf_bs${bs} _backend_${backend} .csv"
91+ done
7592done
7693
7794# Benchmark EfficientNet-B0 model
7895echo " Benchmarking EfficientNet-B0 model"
7996for bs in ${batch_sizes[@]}
8097do
81- python perf_run.py --model ${MODELS_DIR} /efficientnet_b0_scripted.jit.pt \
82- --model_torch efficientnet_b0 \
83- --precision fp32,fp16 --inputs=" (${bs} , 3, 224, 224)" \
84- --batch_size ${bs} \
85- --truncate \
86- --backends torch,ts_trt,dynamo,torch_compile,inductor \
87- --report " efficientnet_b0_perf_bs${bs} .txt"
98+ for backend in ${backends[@]}
99+ do
100+ python perf_run.py --model ${MODELS_DIR} /efficientnet_b0_scripted.jit.pt \
101+ --model_torch efficientnet_b0 \
102+ --precision fp16 --inputs=" (${bs} , 3, 224, 224)" \
103+ --batch_size ${bs} \
104+ --truncate \
105+ --backends ${backend} \
106+ --report " efficientnet_b0_perf_bs${bs} _backend_${backend} .csv"
107+ done
88108done
89109
90110# Benchmark Stable Diffusion UNet model
91111echo " Benchmarking SD UNet model"
92112for bs in ${large_model_batch_sizes[@]}
93113do
94- python perf_run.py --model_torch sd_unet \
95- --precision fp32,fp16 --inputs=" (${bs} , 4, 64, 64)@fp16;(${bs} )@fp16;(${bs} , 1, 768)@fp16" \
96- --batch_size ${bs} \
97- --backends torch,dynamo,torch_compile,inductor \
98- --truncate \
99- --report " sd_unet_perf_bs${bs} .txt"
114+ for backend in ${backends_no_torchscript[@]}
115+ do
116+ python perf_run.py --model_torch sd_unet \
117+ --precision fp16 --inputs=" (${bs} , 4, 64, 64);(${bs} );(${bs} , 1, 768)" \
118+ --batch_size ${bs} \
119+ --truncate \
120+ --backends ${backend} \
121+ --report " sd_unet_perf_bs${bs} _backend_${backend} .csv"
122+ done
100123done
101124
102125# Benchmark BERT model
103126echo " Benchmarking Huggingface BERT base model"
104127for bs in ${batch_sizes[@]}
105128do
106- python perf_run.py --model ${MODELS_DIR} /bert_base_uncased_traced.jit.pt \
107- --model_torch " bert_base_uncased" \
108- --precision fp32 --inputs=" (${bs} , 128)@int32;(${bs} , 128)@int32" \
109- --batch_size ${bs} \
110- --backends torch,ts_trt,dynamo,torch_compile,inductor \
111- --truncate \
112- --report " bert_base_perf_bs${bs} .txt"
129+ for backend in ${backends[@]}
130+ do
131+ python perf_run.py --model ${MODELS_DIR} /bert_base_uncased_traced.jit.pt \
132+ --model_torch " bert_base_uncased" \
133+ --precision fp16 --inputs=" (${bs} , 128)@int32;(${bs} , 128)@int32" \
134+ --batch_size ${bs} \
135+ --truncate \
136+ --backends ${backend} \
137+ --report " bert_base_perf_bs${bs} _backend_${backend} .csv"
138+ done
113139done
114140
115141# Collect and concatenate all results
116142echo " Concatenating all results"
117- (echo " Output of All Model Runs" ; echo) >> all_outputs.txt;
118-
119- for i in $( ls * _bs* .txt) ;
120- do (echo $i ; cat $i ; echo ; echo) >> all_outputs.txt;
121- done
143+ python accumulate_results.py
0 commit comments