Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions tests/entrypoints/offline_mode/test_offline_mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,16 @@
"max_num_seqs": 64,
"tensor_parallel_size": 1,
},
{
"model": "Qwen/Qwen3-0.6B",
"enforce_eager": True,
"gpu_memory_utilization": 0.50,
"max_model_len": 64,
"max_num_batched_tokens": 64,
"max_num_seqs": 64,
"tensor_parallel_size": 1,
"tokenizer": "Qwen/Qwen3-4B",
},
{
"model": "mistralai/Mistral-7B-Instruct-v0.1",
"enforce_eager": True,
Expand Down
23 changes: 17 additions & 6 deletions vllm/engine/arg_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -581,15 +581,26 @@ def __post_init__(self):
from vllm.plugins import load_general_plugins

load_general_plugins()
# when use hf offline,replace model id to local model path
# when use hf offline,replace model and tokenizer id to local model path
if huggingface_hub.constants.HF_HUB_OFFLINE:
model_id = self.model
self.model = get_model_path(self.model, self.revision)
logger.info(
"HF_HUB_OFFLINE is True, replace model_id [%s] to model_path [%s]",
model_id,
self.model,
)
if model_id is not self.model:
logger.info(
"HF_HUB_OFFLINE is True, replace model_id [%s] to model_path [%s]",
model_id,
self.model,
)
if self.tokenizer is not None:
tokenizer_id = self.tokenizer
self.tokenizer = get_model_path(self.tokenizer, self.tokenizer_revision)
if tokenizer_id is not self.tokenizer:
logger.info(
"HF_HUB_OFFLINE is True, replace tokenizer_id [%s] "
"to tokenizer_path [%s]",
tokenizer_id,
self.tokenizer,
)

@staticmethod
def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
Expand Down