From 7fe0469e279cc9446048809d90575efb8c960bde Mon Sep 17 00:00:00 2001 From: Li Wang Date: Fri, 13 Mar 2026 08:52:24 +0800 Subject: [PATCH] [CI][Misc] Use offline mode for model downloads (#7179) ### What this PR does / why we need it? 1. For all parts of the current test module involving the millisecond download model, add the `local_file_only` parameter to specify offline mode; this ensures that CI will not fail due to network instability. 2. Install modelscope from a fixed commit until it next release ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? check if the env or arg `local_files_only` works 1) set the env: ```shell export HF_HUB_OFFLINE=1 ``` 2) run the script ```python from transformers import PretrainedConfig import huggingface_hub from modelscope.utils.hf_util import patch_hub patch_hub() model="Qwen/Qwen3-0.6B" kwargs = {} config_dict, _ = PretrainedConfig.get_config_dict( model, trust_remote_code=True, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, **kwargs, ) print(config_dict) ``` it works well: ```shell 2026-03-06 06:40:12,546 - modelscope - WARNING - We can not confirm the cached file is for revision: master The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored. {'architectures': ['Qwen3ForCausalLM'], 'attention_bias': False, 'attention_dropout': 0.0, 'bos_token_id': 151643, 'eos_token_id': 151645, 'head_dim': 128, 'hidden_act': 'silu', 'hidden_size': 1024, 'initializer_range': 0.02, 'intermediate_size': 3072, 'max_position_embeddings': 40960, 'max_window_layers': 28, 'model_type': 'qwen3', 'num_attention_heads': 16, 'num_hidden_layers': 28, 'num_key_value_heads': 8, 'rms_norm_eps': 1e-06, 'rope_scaling': None, 'rope_theta': 1000000, 'sliding_window': None, 'tie_word_embeddings': True, 'torch_dtype': 'bfloat16', 'transformers_version': '4.51.0', 'use_cache': True, 'use_sliding_window': False, 'vocab_size': 151936, '_commit_hash': None} ``` 3) test the model repo does not cached locally when the env `HF_HUB_OFFLINE`==True ```python from transformers import PretrainedConfig import huggingface_hub from modelscope.utils.hf_util import patch_hub patch_hub() model="FireRedTeam/FireRed-OCR" kwargs = {} config_dict, _ = PretrainedConfig.get_config_dict( model, trust_remote_code=True, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, **kwargs, ) print(config_dict) ``` and the result is as expected: ```shell File "/workspace/demo.py", line 12, in config_dict, _ = PretrainedConfig.get_config_dict( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 189, in patch_get_config_dict model_dir = get_model_dir(pretrained_model_name_or_path, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 164, in get_model_dir model_dir = snapshot_download( ^^^^^^^^^^^^^^^^^^ File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 137, in snapshot_download return _snapshot_download( ^^^^^^^^^^^^^^^^^^^ File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 283, in _snapshot_download raise ValueError( ValueError: Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable look-ups and downloads online, set 'local_files_only' to False ``` - vLLM version: v0.16.0 - vLLM main: https://github.com/vllm-project/vllm/commit/15d76f74e2fdb12a95ea00f0ca283acf6219a2b7 --------- Signed-off-by: wangli --- .github/workflows/_e2e_test.yaml | 7 +++++++ tests/e2e/conftest.py | 6 +++++- tests/e2e/multicard/2-cards/test_external_launcher.py | 6 +++++- tests/e2e/singlecard/pooling/test_classification.py | 3 ++- tests/e2e/singlecard/pooling/test_embedding.py | 5 +++-- tests/e2e/singlecard/pooling/test_scoring.py | 5 +++-- tools/send_mm_request.py | 7 ++++++- 7 files changed, 31 insertions(+), 8 deletions(-) diff --git a/.github/workflows/_e2e_test.yaml b/.github/workflows/_e2e_test.yaml index 904e4eb3..01ce509f 100644 --- a/.github/workflows/_e2e_test.yaml +++ b/.github/workflows/_e2e_test.yaml @@ -86,6 +86,7 @@ jobs: pip install uc-manager uv pip install -r requirements-dev.txt uv pip install -v -e . + uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe - name: Run vllm-project/vllm-ascend test env: @@ -177,6 +178,7 @@ jobs: pip install uc-manager uv pip install -r requirements-dev.txt uv pip install -v -e . + uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe - name: Run e2e test env: VLLM_WORKER_MULTIPROC_METHOD: spawn @@ -265,6 +267,7 @@ jobs: pip install uc-manager uv pip install -r requirements-dev.txt uv pip install -v -e . + uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe - name: Run vllm-project/vllm-ascend test (light) env: VLLM_WORKER_MULTIPROC_METHOD: spawn @@ -353,6 +356,7 @@ jobs: pip install uc-manager uv pip install -r requirements-dev.txt uv pip install -v -e . + uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe - name: Run vllm-project/vllm-ascend test (full) env: VLLM_WORKER_MULTIPROC_METHOD: spawn @@ -448,6 +452,7 @@ jobs: pip install uc-manager uv pip install -r requirements-dev.txt uv pip install -v -e . + uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe - name: Run vllm-project/vllm-ascend test for V1 Engine env: @@ -529,6 +534,7 @@ jobs: pip install uc-manager uv pip install -r requirements-dev.txt uv pip install -v -e . + uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe - name: Run vllm-project/vllm-ascend test env: @@ -590,6 +596,7 @@ jobs: pip install uc-manager uv pip install -r requirements-dev.txt uv pip install -v -e . + uv pip install git+https://github.com/modelscope/modelscope.git@dbbcbf631fe6d10cc6446df2ad2fef24039fe7fe - name: Run vllm-project/vllm-ascend test env: diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py index edef3664..d124b9ce 100644 --- a/tests/e2e/conftest.py +++ b/tests/e2e/conftest.py @@ -34,6 +34,7 @@ import traceback from pathlib import Path from typing import Any, TypeVar +import huggingface_hub import numpy as np import openai import psutil @@ -1024,7 +1025,10 @@ class HfRunner: @pytest.fixture(scope="session") def ilama_lora_files(): - return snapshot_download(repo_id="vllm-ascend/ilama-text2sql-spider") + return snapshot_download( + repo_id="vllm-ascend/ilama-text2sql-spider", + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + ) @pytest.fixture(scope="session") diff --git a/tests/e2e/multicard/2-cards/test_external_launcher.py b/tests/e2e/multicard/2-cards/test_external_launcher.py index e74b3585..0ce41945 100644 --- a/tests/e2e/multicard/2-cards/test_external_launcher.py +++ b/tests/e2e/multicard/2-cards/test_external_launcher.py @@ -26,6 +26,7 @@ import sys from pathlib import Path from unittest.mock import patch +import huggingface_hub import pytest import torch_npu from modelscope import snapshot_download # type: ignore @@ -165,7 +166,10 @@ def test_qwen3_external_launcher_with_sleepmode(): def test_qwen3_external_launcher_with_sleepmode_level2(): script = Path(__file__).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py" env = os.environ.copy() - model_path = snapshot_download("Qwen/Qwen3-8B") + model_path = snapshot_download( + "Qwen/Qwen3-8B", + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, + ) # TODO: Add moe model test cmd = [ sys.executable, diff --git a/tests/e2e/singlecard/pooling/test_classification.py b/tests/e2e/singlecard/pooling/test_classification.py index 8bdd3660..e41ca01d 100644 --- a/tests/e2e/singlecard/pooling/test_classification.py +++ b/tests/e2e/singlecard/pooling/test_classification.py @@ -1,13 +1,14 @@ import torch from modelscope import snapshot_download # type: ignore[import-untyped] from transformers import AutoModelForSequenceClassification +import huggingface_hub from tests.e2e.conftest import HfRunner, VllmRunner def test_qwen_pooling_classify_correctness() -> None: - model_name = snapshot_download("Howeee/Qwen2.5-1.5B-apeach") + model_name = snapshot_download("Howeee/Qwen2.5-1.5B-apeach", local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,) prompts = [ "Hello, my name is", diff --git a/tests/e2e/singlecard/pooling/test_embedding.py b/tests/e2e/singlecard/pooling/test_embedding.py index 54eae677..50dc9ee9 100644 --- a/tests/e2e/singlecard/pooling/test_embedding.py +++ b/tests/e2e/singlecard/pooling/test_embedding.py @@ -18,6 +18,7 @@ # import pytest from modelscope import snapshot_download # type: ignore[import-untyped] +import huggingface_hub from tests.e2e.conftest import HfRunner, VllmRunner from tests.e2e.utils import check_embeddings_close @@ -32,7 +33,7 @@ MODELS = [ def test_embed_models_correctness(model: str): queries = ['What is the capital of China?', 'Explain gravity'] - model_name = snapshot_download(model) + model_name = snapshot_download(model, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,) with VllmRunner( model_name, runner="pooling", @@ -60,7 +61,7 @@ def test_embed_models_correctness(model: str): def test_bge_m3_correctness(): queries = ['What is the capital of China?', 'Explain gravity'] - model_name = snapshot_download("BAAI/bge-m3") + model_name = snapshot_download("BAAI/bge-m3", local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,) with VllmRunner( model_name, runner="pooling", diff --git a/tests/e2e/singlecard/pooling/test_scoring.py b/tests/e2e/singlecard/pooling/test_scoring.py index fb81b6f2..2913a361 100644 --- a/tests/e2e/singlecard/pooling/test_scoring.py +++ b/tests/e2e/singlecard/pooling/test_scoring.py @@ -3,6 +3,7 @@ import pytest import torch import torch.nn.functional as F +import huggingface_hub from modelscope import snapshot_download # type: ignore[import-untyped] from tests.e2e.conftest import HfRunner, VllmRunner @@ -31,7 +32,7 @@ DTYPE = "half" @pytest.fixture(scope="module", params=CROSS_ENCODER_MODELS) def model_name(request): - yield snapshot_download(request.param) + yield snapshot_download(request.param, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,) def test_cross_encoder_score_1_to_1(model_name): text_pair = [TEXTS_1[0], TEXTS_2[0]] @@ -100,7 +101,7 @@ def test_cross_encoder_score_N_to_N(model_name): @pytest.fixture(scope="module", params=EMBEDDING_MODELS) def emb_model_name(request): - yield snapshot_download(request.param) + yield snapshot_download(request.param, local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,) def test_embedding_score_1_to_1(emb_model_name): diff --git a/tools/send_mm_request.py b/tools/send_mm_request.py index 8b3033ab..164fba61 100644 --- a/tools/send_mm_request.py +++ b/tools/send_mm_request.py @@ -1,10 +1,15 @@ import base64 import os +import huggingface_hub import requests from modelscope import snapshot_download # type: ignore -mm_dir = snapshot_download("vllm-ascend/mm_request", repo_type="dataset") +mm_dir = snapshot_download( + "vllm-ascend/mm_request", + repo_type="dataset", + local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE, +) image_path = os.path.join(mm_dir, "test_mm2.jpg") with open(image_path, "rb") as image_file: image_data = base64.b64encode(image_file.read()).decode("utf-8")