### What this PR does / why we need it?
1. For all parts of the current test module involving the millisecond
download model, add the `local_file_only` parameter to specify offline
mode; this ensures that CI will not fail due to network instability.
2. Install modelscope from a fixed commit until it next release
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
check if the env or arg `local_files_only` works
1) set the env:
```shell
export HF_HUB_OFFLINE=1
```
2) run the script
```python
from transformers import PretrainedConfig
import huggingface_hub
from modelscope.utils.hf_util import patch_hub
patch_hub()
model="Qwen/Qwen3-0.6B"
kwargs = {}
config_dict, _ = PretrainedConfig.get_config_dict(
model,
trust_remote_code=True,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
**kwargs,
)
print(config_dict)
```
it works well:
```shell
2026-03-06 06:40:12,546 - modelscope - WARNING - We can not confirm the cached file is for revision: master
The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.
{'architectures': ['Qwen3ForCausalLM'], 'attention_bias': False, 'attention_dropout': 0.0, 'bos_token_id': 151643, 'eos_token_id': 151645, 'head_dim': 128, 'hidden_act': 'silu', 'hidden_size': 1024, 'initializer_range': 0.02, 'intermediate_size': 3072, 'max_position_embeddings': 40960, 'max_window_layers': 28, 'model_type': 'qwen3', 'num_attention_heads': 16, 'num_hidden_layers': 28, 'num_key_value_heads': 8, 'rms_norm_eps': 1e-06, 'rope_scaling': None, 'rope_theta': 1000000, 'sliding_window': None, 'tie_word_embeddings': True, 'torch_dtype': 'bfloat16', 'transformers_version': '4.51.0', 'use_cache': True, 'use_sliding_window': False, 'vocab_size': 151936, '_commit_hash': None}
```
3) test the model repo does not cached locally when the env
`HF_HUB_OFFLINE`==True
```python
from transformers import PretrainedConfig
import huggingface_hub
from modelscope.utils.hf_util import patch_hub
patch_hub()
model="FireRedTeam/FireRed-OCR"
kwargs = {}
config_dict, _ = PretrainedConfig.get_config_dict(
model,
trust_remote_code=True,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
**kwargs,
)
print(config_dict)
```
and the result is as expected:
```shell
File "/workspace/demo.py", line 12, in <module>
config_dict, _ = PretrainedConfig.get_config_dict(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 189, in patch_get_config_dict
model_dir = get_model_dir(pretrained_model_name_or_path,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 164, in get_model_dir
model_dir = snapshot_download(
^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 137, in snapshot_download
return _snapshot_download(
^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 283, in _snapshot_download
raise ValueError(
ValueError: Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable look-ups and downloads online, set 'local_files_only' to False
```
- vLLM version: v0.16.0
- vLLM main:
15d76f74e2
---------
Signed-off-by: wangli <wangli858794774@gmail.com>
246 lines
6.4 KiB
Python
246 lines
6.4 KiB
Python
#
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
#
|
|
"""
|
|
Compare the outputs of vLLM with and without aclgraph.
|
|
|
|
Run `pytest tests/multicard/test_external_launcher.py`.
|
|
"""
|
|
|
|
import os
|
|
import subprocess
|
|
import sys
|
|
from pathlib import Path
|
|
from unittest.mock import patch
|
|
|
|
import huggingface_hub
|
|
import pytest
|
|
import torch_npu
|
|
from modelscope import snapshot_download # type: ignore
|
|
|
|
from tests.e2e.conftest import wait_until_npu_memory_free
|
|
|
|
MODELS = ["Qwen/Qwen3-0.6B"]
|
|
MOE_MODELS = ["Qwen/Qwen3-30B-A3B"]
|
|
DEVICE_NAME = torch_npu.npu.get_device_name(0)[:10]
|
|
|
|
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
@patch.dict(os.environ, {"HCCL_BUFFSIZE": "500"})
|
|
def test_qwen3_external_launcher(model):
|
|
script = Path(__file__).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
|
|
env = os.environ.copy()
|
|
# TODO: Change to 2 when ci machine has 4 cards
|
|
cmd = [
|
|
sys.executable,
|
|
str(script),
|
|
"--model",
|
|
model,
|
|
"--tp-size",
|
|
"1",
|
|
"--node-size",
|
|
"1",
|
|
"--node-rank",
|
|
"0",
|
|
"--proc-per-node",
|
|
"2",
|
|
"--trust-remote-code",
|
|
]
|
|
|
|
print(f"Running subprocess: {' '.join(cmd)}")
|
|
proc = subprocess.run(
|
|
cmd,
|
|
env=env,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT,
|
|
timeout=600,
|
|
)
|
|
output = proc.stdout.decode(errors="ignore")
|
|
|
|
print(output)
|
|
|
|
assert "TP RANKS: [0]" in output
|
|
assert "TP RANKS: [1]" in output
|
|
assert "Generated text:" in output
|
|
assert proc.returncode == 0
|
|
|
|
|
|
@pytest.mark.parametrize("model", MOE_MODELS)
|
|
@wait_until_npu_memory_free()
|
|
def test_qwen3_moe_external_launcher_ep_tp2(model):
|
|
script = Path(__file__).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
|
|
env = os.environ.copy()
|
|
# TODO: Change to 2 when ci machine has 4 cards
|
|
cmd = [
|
|
sys.executable,
|
|
str(script),
|
|
"--model",
|
|
model,
|
|
"--tp-size",
|
|
"2",
|
|
"--node-size",
|
|
"1",
|
|
"--node-rank",
|
|
"0",
|
|
"--proc-per-node",
|
|
"2",
|
|
"--trust-remote-code",
|
|
"--enable-expert-parallel",
|
|
]
|
|
|
|
print(f"Running subprocess: {' '.join(cmd)}")
|
|
proc = subprocess.run(
|
|
cmd,
|
|
env=env,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT,
|
|
timeout=600,
|
|
)
|
|
output = proc.stdout.decode(errors="ignore")
|
|
|
|
print(output)
|
|
|
|
assert "TP RANKS: [0, 1]" in output
|
|
assert "Generated text:" in output
|
|
assert proc.returncode == 0
|
|
|
|
|
|
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"})
|
|
@wait_until_npu_memory_free()
|
|
def test_qwen3_external_launcher_with_sleepmode():
|
|
script = Path(__file__).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
|
|
env = os.environ.copy()
|
|
# TODO: Change to 2 when ci machine has 4 cards
|
|
cmd = [
|
|
sys.executable,
|
|
str(script),
|
|
"--model",
|
|
"Qwen/Qwen3-8B",
|
|
"--tp-size",
|
|
"1",
|
|
"--node-size",
|
|
"1",
|
|
"--node-rank",
|
|
"0",
|
|
"--proc-per-node",
|
|
"2",
|
|
"--trust-remote-code",
|
|
"--enable-sleep-mode",
|
|
"--temperature",
|
|
"0",
|
|
"--model-weight-gib",
|
|
"16",
|
|
]
|
|
|
|
print(f"Running subprocess: {' '.join(cmd)}")
|
|
proc = subprocess.run(
|
|
cmd,
|
|
env=env,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT,
|
|
timeout=300,
|
|
)
|
|
output = proc.stdout.decode(errors="ignore")
|
|
|
|
print(output)
|
|
|
|
assert "Generated text:" in output
|
|
assert "Sleep and wake up successfully!!" in output
|
|
assert proc.returncode == 0
|
|
|
|
|
|
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"})
|
|
def test_qwen3_external_launcher_with_sleepmode_level2():
|
|
script = Path(__file__).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
|
|
env = os.environ.copy()
|
|
model_path = snapshot_download(
|
|
"Qwen/Qwen3-8B",
|
|
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
|
|
)
|
|
# TODO: Add moe model test
|
|
cmd = [
|
|
sys.executable,
|
|
str(script),
|
|
"--model",
|
|
model_path,
|
|
"--tp-size",
|
|
"1",
|
|
"--node-size",
|
|
"1",
|
|
"--node-rank",
|
|
"0",
|
|
"--proc-per-node",
|
|
"2",
|
|
"--trust-remote-code",
|
|
"--enable-sleep-mode",
|
|
"--temperature",
|
|
"0",
|
|
"--model-weight-gib",
|
|
"16",
|
|
"--sleep-mode-level",
|
|
"2",
|
|
]
|
|
|
|
print(f"Running subprocess: {' '.join(cmd)}")
|
|
proc = subprocess.run(
|
|
cmd,
|
|
env=env,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT,
|
|
timeout=300,
|
|
)
|
|
output = proc.stdout.decode(errors="ignore")
|
|
|
|
print(output)
|
|
|
|
assert "Generated text:" in output
|
|
assert "Sleep and wake up successfully!!" in output
|
|
assert proc.returncode == 0
|
|
|
|
|
|
@pytest.mark.skipif(
|
|
DEVICE_NAME != "Ascend910B",
|
|
reason="This test is only for Ascend910B devices.",
|
|
)
|
|
@pytest.mark.parametrize("model", MODELS)
|
|
@wait_until_npu_memory_free()
|
|
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_MATMUL_ALLREDUCE": "1", "HCCL_BUFFSIZE": "500"})
|
|
def test_qwen3_external_launcher_with_matmul_allreduce(model):
|
|
script = Path(__file__).parent.parent.parent.parent.parent / "examples" / "offline_external_launcher.py"
|
|
env = os.environ.copy()
|
|
cmd = [
|
|
sys.executable,
|
|
str(script),
|
|
"--model",
|
|
model,
|
|
"--trust-remote-code",
|
|
]
|
|
|
|
print(f"Running subprocess: {' '.join(cmd)}")
|
|
proc = subprocess.run(
|
|
cmd,
|
|
env=env,
|
|
stdout=subprocess.PIPE,
|
|
stderr=subprocess.STDOUT,
|
|
timeout=600,
|
|
)
|
|
|
|
output = proc.stdout.decode(errors="ignore")
|
|
print(output)
|
|
|
|
assert "Generated text:" in output
|
|
assert proc.returncode == 0
|