Skip to content
This repository was archived by the owner on Oct 25, 2024. It is now read-only.

Commit 3735c84

Browse files
authored
limit onnx version to 1.15 before onnxruntime next release to match onnx 1.16 (#1431)
1 parent 20ae003 commit 3735c84

File tree

3 files changed

+5
-3
lines changed

3 files changed

+5
-3
lines changed

.github/workflows/script/unitTest/run_unit_test_optimize.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@ function pytest() {
2424
ut_log_name=${LOG_DIR}/${JOB_NAME}.log
2525
export GLOG_minloglevel=2
2626

27+
# temperately add limitation for onnx before onnxruntime next release to match onnx=1.16.1
28+
pip install onnx==1.15.0
2729
itrex_path=$(python -c 'import intel_extension_for_transformers; import os; print(os.path.dirname(intel_extension_for_transformers.__file__))')
2830
find . -name "test*.py" | sed 's,\.\/,coverage run --source='"${itrex_path}"' --append ,g' | sed 's/$/ --verbose/' >run.sh
2931
coverage erase

examples/huggingface/pytorch/text-generation/quantization/run_benchmark.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -140,7 +140,7 @@ function run_benchmark {
140140
elif [ "${topology}" = "baichuan2_13b" ]; then
141141
model_name_or_path="baichuan-inc/Baichuan2-13B-Base"
142142
extra_cmd=$extra_cmd" --trust_remote_code"
143-
pip install transformers==4.33
143+
pip install transformers==4.35.2
144144
elif [ "${topology}" = "qwen_7b" ]; then
145145
model_name_or_path="Qwen/Qwen-7B"
146146
extra_cmd=$extra_cmd" --trust_remote_code"

tests/requirements.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,8 @@ intel-tensorflow==2.14.0
1313
mlflow
1414
neural-speed==1.0a0
1515
nlpaug==1.1.9
16-
onnx==1.15.0
17-
onnxruntime==1.17.1
16+
onnx
17+
onnxruntime
1818
optimum-intel
1919
peft==0.6.2
2020
py-cpuinfo

0 commit comments

Comments
 (0)