[CI/UT] Unify model usage via ModelScope in CI (#1207)
### What this PR does / why we need it? Unify Model Usage via ModelScope ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? CI passed Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
3
.github/workflows/accuracy_test.yaml
vendored
3
.github/workflows/accuracy_test.yaml
vendored
@@ -125,10 +125,9 @@ jobs:
|
||||
container:
|
||||
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
|
||||
env:
|
||||
HF_ENDPOINT: https://hf-mirror.com
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
DATASET_SOURCE: ModelScope
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
USE_MODELSCOPE_HUB: 1
|
||||
# 1. If version specified (work_dispatch), do specified branch accuracy test
|
||||
# 2. If no version (labeled PR), do accuracy test by default ref:
|
||||
# The branch, tag or SHA to checkout. When checking out the repository that
|
||||
|
||||
3
.github/workflows/nightly_benchmarks.yaml
vendored
3
.github/workflows/nightly_benchmarks.yaml
vendored
@@ -69,8 +69,7 @@ jobs:
|
||||
--device /dev/devmm_svm
|
||||
--device /dev/hisi_hdc
|
||||
env:
|
||||
HF_ENDPOINT: https://hf-mirror.com
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
ES_OM_DOMAIN: ${{ secrets.ES_OM_DOMAIN }}
|
||||
ES_OM_AUTHORIZATION: ${{ secrets.ES_OM_AUTHORIZATION }}
|
||||
VLLM_USE_V1: ${{ matrix.vllm_use_v1 }}
|
||||
|
||||
17
.github/workflows/vllm_ascend_test.yaml
vendored
17
.github/workflows/vllm_ascend_test.yaml
vendored
@@ -209,6 +209,7 @@ jobs:
|
||||
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
|
||||
env:
|
||||
VLLM_LOGGING_LEVEL: ERROR
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
steps:
|
||||
- name: Check npu and CANN info
|
||||
run: |
|
||||
@@ -257,9 +258,7 @@ jobs:
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
run: |
|
||||
pytest -sv tests/e2e/singlecard/test_offline_inference.py
|
||||
# TODO: switch hf to modelscope
|
||||
VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \
|
||||
pytest -sv tests/e2e/singlecard/test_ilama_lora.py
|
||||
pytest -sv tests/e2e/singlecard/test_ilama_lora.py
|
||||
pytest -sv tests/e2e/singlecard/test_guided_decoding.py
|
||||
pytest -sv tests/e2e/singlecard/test_camem.py
|
||||
pytest -sv tests/e2e/singlecard/test_embedding.py
|
||||
@@ -277,9 +276,7 @@ jobs:
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
run: |
|
||||
pytest -sv tests/e2e/singlecard/test_offline_inference.py
|
||||
# TODO: switch hf to modelscope
|
||||
VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \
|
||||
pytest -sv tests/e2e/singlecard/test_ilama_lora.py
|
||||
pytest -sv tests/e2e/singlecard/test_ilama_lora.py
|
||||
pytest -sv tests/e2e/singlecard/test_guided_decoding.py
|
||||
pytest -sv tests/e2e/singlecard/test_camem.py
|
||||
pytest -sv tests/e2e/singlecard/test_prompt_embedding.py
|
||||
@@ -357,9 +354,7 @@ jobs:
|
||||
VLLM_WORKER_MULTIPROC_METHOD: spawn
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
run: |
|
||||
# TODO: switch hf to modelscope
|
||||
VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \
|
||||
pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
|
||||
pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
|
||||
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error.
|
||||
# To avoid oom, we need to run the test in a single process.
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_DeepSeek_multistream_moe
|
||||
@@ -380,9 +375,7 @@ jobs:
|
||||
VLLM_USE_V1: 0
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
run: |
|
||||
# TODO: switch hf to modelscope
|
||||
VLLM_USE_MODELSCOPE=False HF_ENDPOINT=https://hf-mirror.com \
|
||||
pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
|
||||
pytest -sv tests/e2e/multicard/test_ilama_lora_tp2.py
|
||||
# Fixme: run VLLM_USE_MODELSCOPE=True pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py will raise error.
|
||||
# To avoid oom, we need to run the test in a single process.
|
||||
pytest -sv tests/e2e/multicard/test_offline_inference_distributed.py::test_models_distributed_QwQ
|
||||
|
||||
@@ -50,9 +50,8 @@ jobs:
|
||||
# TODO(yikun): Remove m.daocloud.io prefix when infra proxy ready
|
||||
image: m.daocloud.io/quay.io/ascend/cann:8.1.rc1-910b-ubuntu22.04-py3.10
|
||||
env:
|
||||
HF_ENDPOINT: https://hf-mirror.com
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
VLLM_LOGGING_LEVEL: ERROR
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
steps:
|
||||
- name: Check npu and CANN info
|
||||
run: |
|
||||
|
||||
3
.github/workflows/vllm_ascend_test_pd.yaml
vendored
3
.github/workflows/vllm_ascend_test_pd.yaml
vendored
@@ -64,8 +64,7 @@ jobs:
|
||||
--device /dev/devmm_svm
|
||||
--device /dev/hisi_hdc
|
||||
env:
|
||||
HF_ENDPOINT: https://hf-mirror.com
|
||||
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
||||
VLLM_USE_MODELSCOPE: True
|
||||
steps:
|
||||
- name: Check npu and CANN info
|
||||
run: |
|
||||
|
||||
@@ -295,7 +295,7 @@ main() {
|
||||
export VLLM_LOG_LEVEL="WARNING"
|
||||
|
||||
# set env
|
||||
export HF_ENDPOINT="https://hf-mirror.com"
|
||||
export VLLM_USE_MODELSCOPE=True
|
||||
|
||||
# prepare for benchmarking
|
||||
cd benchmarks || exit 1
|
||||
|
||||
@@ -25,7 +25,7 @@ from typing import Any, List, Optional, Tuple, TypeVar, Union
|
||||
import numpy as np
|
||||
import pytest
|
||||
import torch
|
||||
from huggingface_hub import snapshot_download
|
||||
from modelscope import snapshot_download # type: ignore[import-untyped]
|
||||
from PIL import Image
|
||||
from torch import nn
|
||||
from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer,
|
||||
@@ -387,7 +387,7 @@ def example_prompts() -> list[str]:
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def ilama_lora_files():
|
||||
return snapshot_download(repo_id="jeeejeee/ilama-text2sql-spider")
|
||||
return snapshot_download(repo_id="vllm-ascend/ilama-text2sql-spider")
|
||||
|
||||
|
||||
class HfRunner:
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import pytest
|
||||
from modelscope import snapshot_download # type: ignore
|
||||
|
||||
from tests.conftest import VllmRunner
|
||||
from tests.e2e.singlecard.test_ilama_lora import (EXPECTED_LORA_OUTPUT,
|
||||
@@ -7,7 +8,7 @@ from tests.e2e.singlecard.test_ilama_lora import (EXPECTED_LORA_OUTPUT,
|
||||
|
||||
@pytest.mark.parametrize("distributed_executor_backend", ["mp"])
|
||||
def test_ilama_lora_tp2(distributed_executor_backend, ilama_lora_files):
|
||||
with VllmRunner(model_name=MODEL_PATH,
|
||||
with VllmRunner(snapshot_download(MODEL_PATH),
|
||||
enable_lora=True,
|
||||
max_loras=4,
|
||||
max_model_len=1024,
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
import vllm
|
||||
from modelscope import snapshot_download # type: ignore
|
||||
from vllm.lora.request import LoRARequest
|
||||
|
||||
from tests.conftest import VllmRunner
|
||||
|
||||
MODEL_PATH = "ArthurZ/ilama-3.2-1B"
|
||||
MODEL_PATH = "vllm-ascend/ilama-3.2-1B"
|
||||
|
||||
PROMPT_TEMPLATE = """I want you to act as a SQL terminal in front of an example database, you need only to return the sql command to me.Below is an instruction that describes a task, Write a response that appropriately completes the request.\n"\n##Instruction:\nconcert_singer contains tables such as stadium, singer, concert, singer_in_concert. Table stadium has columns such as Stadium_ID, Location, Name, Capacity, Highest, Lowest, Average. Stadium_ID is the primary key.\nTable singer has columns such as Singer_ID, Name, Country, Song_Name, Song_release_year, Age, Is_male. Singer_ID is the primary key.\nTable concert has columns such as concert_ID, concert_Name, Theme, Stadium_ID, Year. concert_ID is the primary key.\nTable singer_in_concert has columns such as concert_ID, Singer_ID. concert_ID is the primary key.\nThe Stadium_ID of concert is the foreign key of Stadium_ID of stadium.\nThe Singer_ID of singer_in_concert is the foreign key of Singer_ID of singer.\nThe concert_ID of singer_in_concert is the foreign key of concert_ID of concert.\n\n###Input:\n{query}\n\n###Response:""" # noqa: E501
|
||||
|
||||
@@ -44,7 +45,7 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> list[str]:
|
||||
|
||||
|
||||
def test_ilama_lora(ilama_lora_files):
|
||||
with VllmRunner(model_name=MODEL_PATH,
|
||||
with VllmRunner(snapshot_download(MODEL_PATH),
|
||||
enable_lora=True,
|
||||
max_loras=4,
|
||||
max_model_len=1024,
|
||||
|
||||
Reference in New Issue
Block a user