[CI][Misc] Use offline mode for model downloads (#7179)
### What this PR does / why we need it?
1. For all parts of the current test module involving the millisecond
download model, add the `local_file_only` parameter to specify offline
mode; this ensures that CI will not fail due to network instability.
2. Install modelscope from a fixed commit until it next release
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
check if the env or arg `local_files_only` works
1) set the env:
```shell
export HF_HUB_OFFLINE=1
```
2) run the script
```python
from transformers import PretrainedConfig
import huggingface_hub
from modelscope.utils.hf_util import patch_hub
patch_hub()
model="Qwen/Qwen3-0.6B"
kwargs = {}
config_dict, _ = PretrainedConfig.get_config_dict(
model,
trust_remote_code=True,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
**kwargs,
)
print(config_dict)
```
it works well:
```shell
2026-03-06 06:40:12,546 - modelscope - WARNING - We can not confirm the cached file is for revision: master
The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.
{'architectures': ['Qwen3ForCausalLM'], 'attention_bias': False, 'attention_dropout': 0.0, 'bos_token_id': 151643, 'eos_token_id': 151645, 'head_dim': 128, 'hidden_act': 'silu', 'hidden_size': 1024, 'initializer_range': 0.02, 'intermediate_size': 3072, 'max_position_embeddings': 40960, 'max_window_layers': 28, 'model_type': 'qwen3', 'num_attention_heads': 16, 'num_hidden_layers': 28, 'num_key_value_heads': 8, 'rms_norm_eps': 1e-06, 'rope_scaling': None, 'rope_theta': 1000000, 'sliding_window': None, 'tie_word_embeddings': True, 'torch_dtype': 'bfloat16', 'transformers_version': '4.51.0', 'use_cache': True, 'use_sliding_window': False, 'vocab_size': 151936, '_commit_hash': None}
```
3) test the model repo does not cached locally when the env
`HF_HUB_OFFLINE`==True
```python
from transformers import PretrainedConfig
import huggingface_hub
from modelscope.utils.hf_util import patch_hub
patch_hub()
model="FireRedTeam/FireRed-OCR"
kwargs = {}
config_dict, _ = PretrainedConfig.get_config_dict(
model,
trust_remote_code=True,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
**kwargs,
)
print(config_dict)
```
and the result is as expected:
```shell
File "/workspace/demo.py", line 12, in <module>
config_dict, _ = PretrainedConfig.get_config_dict(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 189, in patch_get_config_dict
model_dir = get_model_dir(pretrained_model_name_or_path,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 164, in get_model_dir
model_dir = snapshot_download(
^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 137, in snapshot_download
return _snapshot_download(
^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 283, in _snapshot_download
raise ValueError(
ValueError: Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable look-ups and downloads online, set 'local_files_only' to False
```
- vLLM version: v0.16.0
- vLLM main:
15d76f74e2
---------
Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
@@ -34,6 +34,7 @@ import traceback
|
||||
from pathlib import Path
|
||||
from typing import Any, TypeVar
|
||||
|
||||
import huggingface_hub
|
||||
import numpy as np
|
||||
import openai
|
||||
import psutil
|
||||
@@ -1024,7 +1025,10 @@ class HfRunner:
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def ilama_lora_files():
|
||||
return snapshot_download(repo_id="vllm-ascend/ilama-text2sql-spider")
|
||||
return snapshot_download(
|
||||
repo_id="vllm-ascend/ilama-text2sql-spider",
|
||||
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
|
||||
@@ -26,6 +26,7 @@ import sys
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import huggingface_hub
|
||||
import pytest
|
||||
import torch_npu
|
||||
from modelscope import snapshot_download # type: ignore
|
||||
@@ -165,7 +166,10 @@ def test_qwen3_external_launcher_with_sleepmode():
|
||||
def test_qwen3_external_launcher_with_sleepmode_level2():
|
||||
script = Path(__file__).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
|
||||
env = os.environ.copy()
|
||||
model_path = snapshot_download("Qwen/Qwen3-8B")
|
||||
model_path = snapshot_download(
|
||||
"Qwen/Qwen3-8B",
|
||||
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
|
||||
)
|
||||
# TODO: Add moe model test
|
||||
cmd = [
|
||||
sys.executable,
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import torch
|
||||
from modelscope import snapshot_download # type: ignore[import-untyped]
|
||||
from transformers import AutoModelForSequenceClassification
|
||||
import huggingface_hub
|
||||
|
||||
from tests.e2e.conftest import HfRunner, VllmRunner
|
||||
|
||||
|
||||
def test_qwen_pooling_classify_correctness() -> None:
|
||||
|
||||
model_name = snapshot_download("Howeee/Qwen2.5-1.5B-apeach")
|
||||
model_name = snapshot_download("Howeee/Qwen2.5-1.5B-apeach", local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,)
|
||||
|
||||
prompts = [
|
||||
"Hello, my name is",
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#
|
||||
import pytest
|
||||
from modelscope import snapshot_download # type: ignore[import-untyped]
|
||||
import huggingface_hub
|
||||
|
||||
from tests.e2e.conftest import HfRunner, VllmRunner
|
||||
from tests.e2e.utils import check_embeddings_close
|
||||
@@ -32,7 +33,7 @@ MODELS = [
|
||||
def test_embed_models_correctness(model: str):
|
||||
queries = ['What is the capital of China?', 'Explain gravity']
|
||||
|
||||
model_name = snapshot_download(model)
|
||||
model_name = snapshot_download(model, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,)
|
||||
with VllmRunner(
|
||||
model_name,
|
||||
runner="pooling",
|
||||
@@ -60,7 +61,7 @@ def test_embed_models_correctness(model: str):
|
||||
def test_bge_m3_correctness():
|
||||
queries = ['What is the capital of China?', 'Explain gravity']
|
||||
|
||||
model_name = snapshot_download("BAAI/bge-m3")
|
||||
model_name = snapshot_download("BAAI/bge-m3", local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,)
|
||||
with VllmRunner(
|
||||
model_name,
|
||||
runner="pooling",
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import pytest
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
import huggingface_hub
|
||||
from modelscope import snapshot_download # type: ignore[import-untyped]
|
||||
|
||||
from tests.e2e.conftest import HfRunner, VllmRunner
|
||||
@@ -31,7 +32,7 @@ DTYPE = "half"
|
||||
|
||||
@pytest.fixture(scope="module", params=CROSS_ENCODER_MODELS)
|
||||
def model_name(request):
|
||||
yield snapshot_download(request.param)
|
||||
yield snapshot_download(request.param, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,)
|
||||
|
||||
def test_cross_encoder_score_1_to_1(model_name):
|
||||
text_pair = [TEXTS_1[0], TEXTS_2[0]]
|
||||
@@ -100,7 +101,7 @@ def test_cross_encoder_score_N_to_N(model_name):
|
||||
|
||||
@pytest.fixture(scope="module", params=EMBEDDING_MODELS)
|
||||
def emb_model_name(request):
|
||||
yield snapshot_download(request.param)
|
||||
yield snapshot_download(request.param, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,)
|
||||
|
||||
|
||||
def test_embedding_score_1_to_1(emb_model_name):
|
||||
|
||||
Reference in New Issue
Block a user