Skip to content

Commit 880db73

Browse files
committed
removed HF_HOME as we are now matching huggingface cache implementation in fms
Signed-off-by: Joshua Rosenkranz <jmrosenk@us.ibm.com>
1 parent fed4a67 commit 880db73

File tree

3 files changed

+0
-21
lines changed

3 files changed

+0
-21
lines changed

tests/models/test_decoders.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,8 +34,6 @@
3434
except ImportError:
3535
GPTQ_ENABLED = False
3636

37-
ORIGINAL_HF_HOME = os.environ.get("HF_HOME", None)
38-
3937
# Add models to test here
4038
LLAMA_3p1_8B_INSTRUCT = "meta-llama/Llama-3.1-8B-Instruct"
4139
GRANITE_3p2_8B_INSTRUCT = "ibm-granite/granite-3.2-8b-instruct"
@@ -159,10 +157,6 @@ def reset_compiler():
159157
torch.compiler.reset()
160158
torch._dynamo.reset()
161159
os.environ.pop("COMPILATION_MODE", None)
162-
if ORIGINAL_HF_HOME is None:
163-
os.environ.pop("HF_HOME", None)
164-
else:
165-
os.environ["HF_HOME"] = ORIGINAL_HF_HOME
166160

167161

168162
# TODO: Currently, gptq does not have the same level of support as non-gptq models for get_model. This method provides the extra requirements for gptq for get_model,
@@ -304,9 +298,6 @@ def test_common_shapes(model_path, batch_size, seq_length, max_new_tokens):
304298
torch.manual_seed(42)
305299
os.environ["COMPILATION_MODE"] = "offline_decoder"
306300

307-
if "HF_HOME" not in os.environ:
308-
os.environ["HF_HOME"] = "/tmp/models/hf_cache"
309-
310301
dprint(
311302
f"testing model={model_path}, batch_size={batch_size}, seq_length={seq_length}, max_new_tokens={max_new_tokens}, micro_model={USE_MICRO_MODELS}"
312303
)

tests/models/test_encoders.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,6 @@
1010
import os
1111
import numpy as np
1212

13-
ORIGINAL_HF_HOME = os.environ.get("HF_HOME", None)
14-
1513
# Add models to test here
1614
ROBERTA_SQUAD_V2 = "deepset/roberta-base-squad2"
1715

@@ -81,20 +79,13 @@ def reset_compiler():
8179
torch.compiler.reset()
8280
torch._dynamo.reset()
8381
os.environ.pop('COMPILATION_MODE', None)
84-
if ORIGINAL_HF_HOME is None:
85-
os.environ.pop('HF_HOME', None)
86-
else:
87-
os.environ['HF_HOME'] = ORIGINAL_HF_HOME
8882

8983
encoder_paths = ["deepset/roberta-base-squad2"]
9084
common_encoder_shapes = list(itertools.product(encoder_paths, common_batch_sizes, common_seq_lengths))
9185

9286
@pytest.mark.parametrize("model_path,batch_size,seq_length", common_encoder_shapes)
9387
def test_common_shapes(model_path, batch_size, seq_length):
9488
os.environ["COMPILATION_MODE"] = "offline"
95-
96-
if "HF_HOME" not in os.environ:
97-
os.environ["HF_HOME"] = "/tmp/models/hf_cache"
9889

9990
dprint(f"testing model={model_path}, batch_size={batch_size}, seq_length={seq_length}")
10091

tests/models/test_model_expectations.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,9 +13,6 @@
1313

1414
os.environ["COMPILATION_MODE"] = "offline"
1515

16-
if "HF_HOME" not in os.environ:
17-
os.environ["HF_HOME"] = "/tmp/models/hf_cache"
18-
1916
model_dir = os.environ.get("FMS_TESTING_MODEL_DIR", "/tmp/models")
2017
LLAMA_3p1_8B_INSTRUCT = "meta-llama/Llama-3.1-8B-Instruct"
2118
GRANITE_3p2_8B_INSTRUCT = "ibm-granite/granite-3.2-8b-instruct"

0 commit comments

Comments
 (0)