### What this PR does / why we need it?
1. For all parts of the current test module involving the millisecond
download model, add the `local_file_only` parameter to specify offline
mode; this ensures that CI will not fail due to network instability.
2. Install modelscope from a fixed commit until it next release
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
check if the env or arg `local_files_only` works
1) set the env:
```shell
export HF_HUB_OFFLINE=1
```
2) run the script
```python
from transformers import PretrainedConfig
import huggingface_hub
from modelscope.utils.hf_util import patch_hub
patch_hub()
model="Qwen/Qwen3-0.6B"
kwargs = {}
config_dict, _ = PretrainedConfig.get_config_dict(
model,
trust_remote_code=True,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
**kwargs,
)
print(config_dict)
```
it works well:
```shell
2026-03-06 06:40:12,546 - modelscope - WARNING - We can not confirm the cached file is for revision: master
The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is ignored.
{'architectures': ['Qwen3ForCausalLM'], 'attention_bias': False, 'attention_dropout': 0.0, 'bos_token_id': 151643, 'eos_token_id': 151645, 'head_dim': 128, 'hidden_act': 'silu', 'hidden_size': 1024, 'initializer_range': 0.02, 'intermediate_size': 3072, 'max_position_embeddings': 40960, 'max_window_layers': 28, 'model_type': 'qwen3', 'num_attention_heads': 16, 'num_hidden_layers': 28, 'num_key_value_heads': 8, 'rms_norm_eps': 1e-06, 'rope_scaling': None, 'rope_theta': 1000000, 'sliding_window': None, 'tie_word_embeddings': True, 'torch_dtype': 'bfloat16', 'transformers_version': '4.51.0', 'use_cache': True, 'use_sliding_window': False, 'vocab_size': 151936, '_commit_hash': None}
```
3) test the model repo does not cached locally when the env
`HF_HUB_OFFLINE`==True
```python
from transformers import PretrainedConfig
import huggingface_hub
from modelscope.utils.hf_util import patch_hub
patch_hub()
model="FireRedTeam/FireRed-OCR"
kwargs = {}
config_dict, _ = PretrainedConfig.get_config_dict(
model,
trust_remote_code=True,
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
**kwargs,
)
print(config_dict)
```
and the result is as expected:
```shell
File "/workspace/demo.py", line 12, in <module>
config_dict, _ = PretrainedConfig.get_config_dict(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 189, in patch_get_config_dict
model_dir = get_model_dir(pretrained_model_name_or_path,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/utils/hf_util/patcher.py", line 164, in get_model_dir
model_dir = snapshot_download(
^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 137, in snapshot_download
return _snapshot_download(
^^^^^^^^^^^^^^^^^^^
File "/usr/local/python3.11.14/lib/python3.11/site-packages/modelscope/hub/snapshot_download.py", line 283, in _snapshot_download
raise ValueError(
ValueError: Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable look-ups and downloads online, set 'local_files_only' to False
```
- vLLM version: v0.16.0
- vLLM main:
15d76f74e2
---------
Signed-off-by: wangli <wangli858794774@gmail.com>
46 lines
1.3 KiB
Python
46 lines
1.3 KiB
Python
import base64
|
|
import os
|
|
|
|
import huggingface_hub
|
|
import requests
|
|
from modelscope import snapshot_download # type: ignore
|
|
|
|
mm_dir = snapshot_download(
|
|
"vllm-ascend/mm_request",
|
|
repo_type="dataset",
|
|
local_files_only=huggingface_hub.constants.HF_HUB_OFFLINE,
|
|
)
|
|
image_path = os.path.join(mm_dir, "test_mm2.jpg")
|
|
with open(image_path, "rb") as image_file:
|
|
image_data = base64.b64encode(image_file.read()).decode("utf-8")
|
|
|
|
data = {
|
|
"messages": [
|
|
{
|
|
"role": "user",
|
|
"content": [
|
|
{"type": "text", "text": "What is the content of this image?"},
|
|
{"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{image_data}"}},
|
|
],
|
|
}
|
|
],
|
|
"eos_token_id": [1, 106],
|
|
"pad_token_id": 0,
|
|
"top_k": 64,
|
|
"top_p": 0.95,
|
|
"max_tokens": 8192,
|
|
"stream": False,
|
|
}
|
|
|
|
headers = {"Accept": "application/json", "Content-Type": "application/json"}
|
|
|
|
|
|
def send_image_request(model, server):
|
|
data["model"] = model
|
|
url = server.url_for("v1", "chat", "completions")
|
|
response = requests.post(url, headers=headers, json=data)
|
|
print("Status Code:", response.status_code)
|
|
response_json = response.json()
|
|
print("Response:", response_json)
|
|
assert response_json["choices"][0]["message"]["content"], "empty response"
|