2025-02-07 16:47:17 +08:00
|
|
|
#
|
|
|
|
|
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
|
|
|
# Copyright 2023 The vLLM team.
|
|
|
|
|
#
|
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
|
#
|
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
#
|
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
2025-04-17 14:59:56 +08:00
|
|
|
# This file is a part of the vllm-ascend project.
|
|
|
|
|
# Adapted from vllm-project/vllm/vllm/worker/worker.py
|
2025-02-07 16:47:17 +08:00
|
|
|
#
|
2025-05-12 20:26:22 +08:00
|
|
|
|
2025-06-06 09:29:34 +08:00
|
|
|
import atexit
|
2025-07-18 23:09:54 +08:00
|
|
|
import functools
|
2025-05-12 20:26:22 +08:00
|
|
|
import math
|
2025-08-26 12:39:21 +08:00
|
|
|
import os
|
2025-09-19 11:06:45 +08:00
|
|
|
from contextlib import contextmanager, nullcontext
|
[refactor] Refactoring AscendFusedMoE (#1229)
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
This PR is used for resolved [issue
1147](https://github.com/vllm-project/vllm-ascend/issues/1147)
1. Move fused_moe code into one file `fused_moe.py`.
2. Integrate branch conditions into function `get_fused_moe_state`.
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
### Does this PR introduce _any_ user-facing change?
1. This PR has removed the env `VLLM_ENABLE_MC2`, because I think this
env is useless, we can make judgments based on the current scenario
without this env, it will only increase complexity.
2. This PR has removed the env `USING_LCCL_COM`, because this env has
already expired.
3. `additional_config.expert_tensor_parallel_size` has already expired,
and now we also use parameter `enable_expert_parallel`, consistent with
the vLLM.
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
Signed-off-by: zzzzwwjj <1183291235@qq.com>
2025-06-17 17:49:03 +08:00
|
|
|
from enum import Enum
|
2025-06-06 09:29:34 +08:00
|
|
|
from threading import Lock
|
2025-09-13 11:58:52 +08:00
|
|
|
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
|
2025-05-12 20:26:22 +08:00
|
|
|
|
2025-03-11 19:20:06 +08:00
|
|
|
import torch
|
2025-09-22 22:23:14 +08:00
|
|
|
import torch_npu # noqa: F401
|
2025-04-28 14:19:06 +08:00
|
|
|
from packaging.version import InvalidVersion, Version
|
2025-06-06 09:29:34 +08:00
|
|
|
from torch_npu.npu.streams import Event
|
2025-04-15 10:18:05 +08:00
|
|
|
from vllm.logger import logger
|
2025-02-07 16:47:17 +08:00
|
|
|
|
2025-08-14 09:33:39 +08:00
|
|
|
import vllm_ascend.envs as envs_ascend
|
2025-08-29 11:41:21 +08:00
|
|
|
from vllm_ascend.ascend_config import get_ascend_config
|
2025-04-17 14:57:52 +08:00
|
|
|
|
2025-05-12 20:26:22 +08:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
|
from vllm.config import VllmConfig
|
|
|
|
|
else:
|
|
|
|
|
VllmConfig = None
|
|
|
|
|
|
2025-08-26 09:06:16 +08:00
|
|
|
ASCEND_QUANTIZATION_METHOD = "ascend"
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
SOC_VERSION_INFERENCE_SERIES = ["Ascend310P3"]
|
2025-09-11 11:30:16 +08:00
|
|
|
REGISTERED_ASCEND_OPS = {}
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
|
|
|
|
|
ACL_FORMAT_FRACTAL_ND = 2
|
|
|
|
|
ACL_FORMAT_FRACTAL_NZ = 29
|
|
|
|
|
|
2025-06-27 09:14:43 +08:00
|
|
|
_CUSTOM_OP_ENABLED = None
|
|
|
|
|
_IS_310P = None
|
|
|
|
|
_SLEEP_MODE_ENABLED = None
|
|
|
|
|
_CURRENT_STREAM = None
|
2025-10-09 20:38:39 +08:00
|
|
|
_PREFETCH_STREAM = None
|
2025-10-25 15:51:43 +08:00
|
|
|
_SHARED_EXPERTS_COMPUTE_STREAM = None
|
2025-07-21 19:43:30 +08:00
|
|
|
_ASCEND_CUSTOMOP_IS_REIGISTERED = False
|
2025-10-11 15:55:22 +08:00
|
|
|
_DEFAULT_BUFFER_SIZE = 200
|
|
|
|
|
_MIN_DP_BUFFER_SIZE = 50
|
2025-10-15 19:36:32 +08:00
|
|
|
_IS_MOE_MODEL = None
|
2025-11-21 15:04:35 +08:00
|
|
|
_IS_VL_MODEL = None
|
2025-10-17 21:13:41 +08:00
|
|
|
_ENABLE_SP = None
|
[v0.11.0][Perf] Delete redundant operations in model_runner and forward_context (#3775)
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
cherry pick https://github.com/vllm-project/vllm-ascend/pull/3677
Remove redundant operations from `model_runner` and `forward_context`.
This optimization can significantly reduce the idle time (bubble) before
decoding when running models with small parameter counts (e.g.,
Qwen/Qwen2.5-0.5B).
Testing on 800I A2, bubble is reduced from 3.8ms to 2.8ms :
Before
<img width="1655" height="696" alt="image"
src="https://github.com/user-attachments/assets/d7608e52-2438-46dd-8fc9-391fd6274495"
/>
After
<img width="1607" height="774" alt="image"
src="https://github.com/user-attachments/assets/56daf081-2dba-4d2e-99d4-e055187d9806"
/>
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
No
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
---------
Signed-off-by: realliujiaxu <realliujiaxu@163.com>
2025-10-29 15:58:53 +08:00
|
|
|
_HAS_LAYER_IDX = None
|
2025-11-10 20:56:39 +08:00
|
|
|
_ENABLE_NZ = None
|
2025-12-12 14:52:29 +08:00
|
|
|
_IS_EAGLE_MODE = None
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def is_310p():
|
2025-06-27 09:14:43 +08:00
|
|
|
global _IS_310P
|
|
|
|
|
if _IS_310P is None:
|
|
|
|
|
from vllm_ascend import _build_info # type: ignore
|
|
|
|
|
_IS_310P = _build_info.__soc_version__.lower().startswith("ascend310p")
|
|
|
|
|
return _IS_310P
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
|
|
|
|
|
|
2025-11-28 17:32:25 +08:00
|
|
|
def is_enable_nz(dtype: Optional[torch.dtype] = torch.int8,
|
|
|
|
|
vllm_config: Optional[VllmConfig] = None) -> bool:
|
2025-12-12 14:52:29 +08:00
|
|
|
global _ENABLE_NZ, _IS_EAGLE_MODE
|
2025-11-10 20:56:39 +08:00
|
|
|
if _ENABLE_NZ is None:
|
|
|
|
|
if not vllm_config:
|
|
|
|
|
raise ValueError(
|
|
|
|
|
"vllm_config must be provided when _ENABLE_NZ is None")
|
|
|
|
|
_ENABLE_NZ = envs_ascend.VLLM_ASCEND_ENABLE_NZ and vllm_config.model_config.hf_config.model_type != "qwen3_next"
|
2025-12-12 14:52:29 +08:00
|
|
|
|
2025-12-12 14:54:41 +08:00
|
|
|
_IS_EAGLE_MODE = (vllm_config.speculative_config is not None
|
|
|
|
|
and getattr(vllm_config.speculative_config, 'method',
|
|
|
|
|
None) in ("eagle", "eagle3"))
|
2025-12-12 14:52:29 +08:00
|
|
|
|
2025-12-16 15:04:31 +08:00
|
|
|
if dtype in [torch.float16, torch.bfloat16, torch.float32]:
|
2025-12-12 14:52:29 +08:00
|
|
|
return _ENABLE_NZ if _IS_EAGLE_MODE else False
|
2025-11-10 20:56:39 +08:00
|
|
|
return _ENABLE_NZ
|
2025-10-14 17:39:26 +08:00
|
|
|
|
|
|
|
|
|
2025-06-27 09:14:43 +08:00
|
|
|
def sleep_mode_enabled():
|
|
|
|
|
global _SLEEP_MODE_ENABLED
|
|
|
|
|
if _SLEEP_MODE_ENABLED is None:
|
|
|
|
|
from vllm_ascend import _build_info # type: ignore
|
|
|
|
|
_SLEEP_MODE_ENABLED = _build_info.__sleep_mode_enabled__
|
|
|
|
|
return _SLEEP_MODE_ENABLED
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def _round_up(x: int, align: int):
|
2025-06-27 09:14:43 +08:00
|
|
|
# round up x to align, for example, if align is 16, x will be rounded up to 16, 32, 48, etc.
|
|
|
|
|
# input: 15, 16 -> output: 16
|
|
|
|
|
# input: 17, 16 -> output: 32
|
|
|
|
|
# input: 30, 16 -> output: 32
|
|
|
|
|
# input: 33, 16 -> output: 48
|
|
|
|
|
# ...
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
return (x + align - 1) // align * align
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _custom_pad(x, pad_dims):
|
2025-06-27 09:14:43 +08:00
|
|
|
# pad the input tensor to the shape of pad_dims
|
|
|
|
|
# input: (13, 30), pad_dims: [0, 2, 0, 3]
|
|
|
|
|
# output: (16, 32)
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
return torch.nn.functional.pad(x, pad_dims)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _custom_reshape(x, target_shape):
|
2025-06-27 09:14:43 +08:00
|
|
|
# reshape the input tensor to the shape of target_shape
|
|
|
|
|
# input: (16, 32), target_shape: [1, 16, 2, 16]
|
|
|
|
|
# output: (1, 16, 2, 16)
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
return x.reshape(target_shape)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _custom_transpose(x, dim1, dim2):
|
2025-06-27 09:14:43 +08:00
|
|
|
# transpose the input tensor
|
|
|
|
|
# input: (1, 16, 2, 16), dim1: 1, dim2: 2
|
|
|
|
|
# output: (1, 2, 16, 16)
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
return x.transpose(dim1, dim2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def nd_to_nz_2d(in_tensor: torch.Tensor) -> torch.Tensor:
|
2025-06-27 09:14:43 +08:00
|
|
|
# in_tensor: (13, 30)
|
|
|
|
|
aux_dims = [1, 0, 0, 16]
|
|
|
|
|
# aux_dims[1]: 16
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
aux_dims[1] = _round_up(in_tensor.size(0), 16)
|
2025-06-27 09:14:43 +08:00
|
|
|
# aux_dims[2]: 2
|
|
|
|
|
aux_dims[2] = _round_up(in_tensor.size(1), 16) // 16
|
|
|
|
|
|
|
|
|
|
# after: aux_dims: [1, 16, 2, 16]
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
|
|
|
|
|
pad_dims = [0, 0, 0, 0]
|
2025-06-27 09:14:43 +08:00
|
|
|
# pad_dims[1]: 2
|
|
|
|
|
pad_dims[1] = _round_up(in_tensor.size(1), 16) - in_tensor.size(1)
|
|
|
|
|
# pad_dims[3]: 3
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
pad_dims[3] = _round_up(in_tensor.size(0), 16) - in_tensor.size(0)
|
|
|
|
|
|
2025-06-27 09:14:43 +08:00
|
|
|
# after: pad_dims: [0, 2, 0, 3]
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
|
2025-06-27 09:14:43 +08:00
|
|
|
# return: (1, 2, 16, 16)
|
[Platform] Add initial experimental support for Altlas 300I series (#1333)
### What this PR does / why we need it?
Add initial experimental support for Ascend 310P, this patch squash
below PR into one to help validation:
- https://github.com/vllm-project/vllm-ascend/pull/914
- https://github.com/vllm-project/vllm-ascend/pull/1318
- https://github.com/vllm-project/vllm-ascend/pull/1327
### Does this PR introduce _any_ user-facing change?
User can run vLLM on Altlas 300I DUO series
### How was this patch tested?
CI passed with:
- E2E image build for 310P
- CI test on A2 with e2e test and longterm test
- Unit test missing because need a real 310P image to have the test,
will add in a separate PR later.
- Manually e2e test:
- Qwen2.5-7b-instruct, Qwen2.5-0.5b, Qwen3-0.6B, Qwen3-4B, Qwen3-8B:
https://github.com/vllm-project/vllm-ascend/pull/914#issuecomment-2942989322
- Pangu MGoE 72B
The patch has been tested locally on Ascend 310P hardware to ensure that
the changes do not break existing functionality and that the new
features work as intended.
#### ENV information
CANN, NNAL version: 8.1.RC1
> [!IMPORTANT]
> PTA 2.5.1 version >= torch_npu-2.5.1.post1.dev20250528 to support NZ
format and calling NNAL operators on 310P
#### Code example
##### Build vllm-ascend from source code
```shell
# download source code as vllm-ascend
cd vllm-ascend
export SOC_VERSION=Ascend310P3
pip install -v -e .
cd ..
```
##### Run offline inference
```python
from vllm import LLM, SamplingParams
prompts = ["水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。",
"水的沸点是100摄氏度吗?请回答是或者否。", "若腋下体温为38摄氏度,请问这人是否发烧?请回答是或者否。"]
# Create a sampling params object.
sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=10)
# Create an LLM.
llm = LLM(
model="Qwen/Qwen2.5-7B-Instruct",
max_model_len=4096,
max_num_seqs=4,
dtype="float16", # IMPORTANT cause some ATB ops cannot support bf16 on 310P
disable_custom_all_reduce=True,
trust_remote_code=True,
tensor_parallel_size=2,
compilation_config={"custom_ops":['none', "+rms_norm", "+rotary_embedding"]},
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
---------
Signed-off-by: Vincent Yuan <farawayboat@gmail.com>
Signed-off-by: Yikun Jiang <yikunkero@gmail.com>
Signed-off-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: Vincent Yuan <farawayboat@gmail.com>
Co-authored-by: angazenn <zengyanjia@huawei.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: shen-shanshan <467638484@qq.com>
2025-06-21 09:00:16 +08:00
|
|
|
return _custom_transpose(
|
|
|
|
|
_custom_reshape(_custom_pad(in_tensor, pad_dims), aux_dims), 1,
|
|
|
|
|
2).contiguous()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def nd_to_nz_spec(mask_tensor: torch.Tensor) -> torch.Tensor:
|
|
|
|
|
num_tokens = mask_tensor.shape[0]
|
|
|
|
|
max_seq_len = mask_tensor.shape[1]
|
|
|
|
|
|
|
|
|
|
tokens_pad = (num_tokens + 15) // 16 * 16
|
|
|
|
|
max_seq_len_pad = (max_seq_len + 15) // 16 * 16
|
|
|
|
|
|
|
|
|
|
mask_tensor_pad = \
|
|
|
|
|
torch.zeros((1, tokens_pad, max_seq_len_pad), dtype=mask_tensor.dtype, device=mask_tensor.device)
|
|
|
|
|
mask_tensor_pad[0][:num_tokens, :max_seq_len] = mask_tensor
|
|
|
|
|
mask = mask_tensor_pad.reshape(
|
|
|
|
|
(1, tokens_pad, max_seq_len_pad // 16, 16)).permute(0, 2, 1, 3)
|
|
|
|
|
return mask
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def aligned_16(tensor: torch.Tensor):
|
|
|
|
|
"""Aligned tensor for 310P"""
|
|
|
|
|
|
|
|
|
|
# Get the size of the current 0th dimension
|
|
|
|
|
n = tensor.size(0)
|
|
|
|
|
|
|
|
|
|
# Calculate the aligned size
|
|
|
|
|
n_aligned = ((n + 15) // 16) * 16
|
|
|
|
|
|
|
|
|
|
# If already aligned, return the original tensor
|
|
|
|
|
if n == n_aligned:
|
|
|
|
|
return tensor
|
|
|
|
|
|
|
|
|
|
# Create a new tensor with shape (n_aligned, H, W) and fill it with zeros
|
|
|
|
|
new_tensor = torch.zeros(n_aligned,
|
|
|
|
|
*tensor.shape[1:],
|
|
|
|
|
dtype=tensor.dtype,
|
|
|
|
|
device=tensor.device)
|
|
|
|
|
|
|
|
|
|
# Copy the original tensor to the first N positions of the new tensor
|
|
|
|
|
new_tensor[:n] = tensor
|
|
|
|
|
|
|
|
|
|
return new_tensor
|
|
|
|
|
|
2025-02-07 16:47:17 +08:00
|
|
|
|
|
|
|
|
def try_register_lib(lib_name: str, lib_info: str = ""):
|
|
|
|
|
import importlib
|
|
|
|
|
import importlib.util
|
|
|
|
|
try:
|
|
|
|
|
module_spec = importlib.util.find_spec(lib_name)
|
|
|
|
|
if module_spec is not None:
|
|
|
|
|
importlib.import_module(lib_name)
|
|
|
|
|
if lib_info:
|
|
|
|
|
logger.info(lib_info)
|
|
|
|
|
except Exception:
|
|
|
|
|
pass
|
2025-03-11 19:20:06 +08:00
|
|
|
|
|
|
|
|
|
2025-06-16 21:03:16 +08:00
|
|
|
def enable_custom_op():
|
|
|
|
|
"""
|
2025-09-13 11:58:52 +08:00
|
|
|
Enable lazy init for vllm_ascend_C to avoid early initialization of CANN's RTS component.
|
2025-06-16 21:03:16 +08:00
|
|
|
Ensure that ASCEND_RT_VISIBLE_DEVICES can be dynamically modified before torch.npu.set_device().
|
|
|
|
|
"""
|
2025-06-27 09:14:43 +08:00
|
|
|
global _CUSTOM_OP_ENABLED
|
|
|
|
|
if _CUSTOM_OP_ENABLED is not None:
|
|
|
|
|
return _CUSTOM_OP_ENABLED
|
|
|
|
|
try:
|
2025-08-11 15:59:42 +08:00
|
|
|
# isort: off
|
2025-06-27 09:14:43 +08:00
|
|
|
# register custom ops into torch_library here
|
|
|
|
|
import vllm_ascend.vllm_ascend_C # type: ignore # noqa: F401
|
2025-08-11 15:59:42 +08:00
|
|
|
# register the meta implementation for custom kernel if necessary
|
|
|
|
|
import vllm_ascend.meta_registration # type: ignore # noqa: F401
|
|
|
|
|
# isort: on
|
2025-06-27 09:14:43 +08:00
|
|
|
_CUSTOM_OP_ENABLED = True
|
|
|
|
|
except ImportError:
|
|
|
|
|
_CUSTOM_OP_ENABLED = False
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Warning: Failed to register custom ops, all custom ops will be disabled"
|
|
|
|
|
)
|
|
|
|
|
return _CUSTOM_OP_ENABLED
|
2025-06-16 21:03:16 +08:00
|
|
|
|
|
|
|
|
|
2025-04-17 14:57:52 +08:00
|
|
|
def find_hccl_library() -> str:
|
|
|
|
|
"""
|
|
|
|
|
We either use the library file specified by the `HCCL_SO_PATH`
|
|
|
|
|
environment variable, or we find the library file brought by PyTorch.
|
|
|
|
|
After importing `torch`, `libhccl.so` can be
|
|
|
|
|
found by `ctypes` automatically.
|
|
|
|
|
"""
|
2025-08-14 09:33:39 +08:00
|
|
|
so_file = envs_ascend.HCCL_SO_PATH
|
2025-04-17 14:57:52 +08:00
|
|
|
|
|
|
|
|
# manually load the hccl library
|
|
|
|
|
if so_file:
|
|
|
|
|
logger.info("Found hccl from environment variable HCCL_SO_PATH=%s",
|
|
|
|
|
so_file)
|
|
|
|
|
else:
|
|
|
|
|
if torch.version.cann is not None:
|
|
|
|
|
so_file = "libhccl.so"
|
|
|
|
|
else:
|
|
|
|
|
raise ValueError("HCCL only supports Ascend NPU backends.")
|
|
|
|
|
logger.info("Found hccl from library %s", so_file)
|
|
|
|
|
return so_file
|
|
|
|
|
|
|
|
|
|
|
2025-03-11 19:20:06 +08:00
|
|
|
def current_stream() -> torch.npu.Stream:
|
|
|
|
|
"""
|
|
|
|
|
replace `torch.npu.current_stream()` with `vllm.utils.current_stream()`.
|
|
|
|
|
it turns out that `torch.npu.current_stream()` is quite expensive,
|
|
|
|
|
as it will construct a new stream object at each call.
|
|
|
|
|
here we patch `torch.npu.set_stream` to keep track of the current stream
|
|
|
|
|
directly, so that we can avoid calling `torch.npu.current_stream()`.
|
|
|
|
|
|
|
|
|
|
"""
|
2025-06-27 09:14:43 +08:00
|
|
|
global _CURRENT_STREAM
|
|
|
|
|
if _CURRENT_STREAM is None:
|
2025-03-11 19:20:06 +08:00
|
|
|
# when this function is called before any stream is set,
|
|
|
|
|
# we return the default stream.
|
2025-06-27 09:14:43 +08:00
|
|
|
_CURRENT_STREAM = torch.npu.current_stream()
|
|
|
|
|
return _CURRENT_STREAM
|
2025-04-16 09:28:58 +08:00
|
|
|
|
|
|
|
|
|
2025-10-09 20:38:39 +08:00
|
|
|
def prefetch_stream() -> torch.npu.Stream:
|
|
|
|
|
global _PREFETCH_STREAM
|
|
|
|
|
if _PREFETCH_STREAM is None:
|
|
|
|
|
# when this function is called before any stream is set,
|
|
|
|
|
# we return the default stream.
|
|
|
|
|
_PREFETCH_STREAM = torch_npu.npu.Stream()
|
|
|
|
|
return _PREFETCH_STREAM
|
|
|
|
|
|
|
|
|
|
|
2025-10-25 15:51:43 +08:00
|
|
|
def shared_experts_compute_stream() -> torch.npu.Stream:
|
|
|
|
|
global _SHARED_EXPERTS_COMPUTE_STREAM
|
|
|
|
|
if _SHARED_EXPERTS_COMPUTE_STREAM is None:
|
|
|
|
|
# when this function is called before any stream is set,
|
|
|
|
|
# we return the default stream.
|
|
|
|
|
_SHARED_EXPERTS_COMPUTE_STREAM = torch_npu.npu.Stream()
|
|
|
|
|
return _SHARED_EXPERTS_COMPUTE_STREAM
|
|
|
|
|
|
|
|
|
|
|
2025-04-16 09:28:58 +08:00
|
|
|
def adapt_patch(is_global_patch: bool = False):
|
|
|
|
|
if is_global_patch:
|
|
|
|
|
from vllm_ascend.patch import platform # noqa: F401
|
|
|
|
|
else:
|
|
|
|
|
from vllm_ascend.patch import worker # noqa: F401
|
2025-04-18 12:23:32 +08:00
|
|
|
|
|
|
|
|
|
2025-07-18 23:09:54 +08:00
|
|
|
@functools.cache
|
2025-04-28 14:19:06 +08:00
|
|
|
def vllm_version_is(target_vllm_version: str):
|
2025-08-14 09:33:39 +08:00
|
|
|
if envs_ascend.VLLM_VERSION is not None:
|
|
|
|
|
vllm_version = envs_ascend.VLLM_VERSION
|
2025-04-28 14:19:06 +08:00
|
|
|
else:
|
|
|
|
|
import vllm
|
|
|
|
|
vllm_version = vllm.__version__
|
|
|
|
|
try:
|
|
|
|
|
return Version(vllm_version) == Version(target_vllm_version)
|
|
|
|
|
except InvalidVersion:
|
|
|
|
|
raise ValueError(
|
|
|
|
|
f"Invalid vllm version {vllm_version} found. A dev version of vllm "
|
|
|
|
|
"is installed probably. Set the environment variable VLLM_VERSION "
|
2025-05-12 22:04:48 +08:00
|
|
|
"to control it by hand. And please make sure the value follows the "
|
2025-04-28 14:19:06 +08:00
|
|
|
"format of x.y.z.")
|
2025-05-12 20:26:22 +08:00
|
|
|
|
|
|
|
|
|
[Bugfix] Fix num_hidden_layers when Qwen2-Audio 7B (#1803)
### What this PR does / why we need it?
Fix num_hidden_layers when Qwen2-Audio 7B and #1760 :
```
INFO 07-15 04:38:53 [platform.py:174] PIECEWISE compilation enabled on NPU. use_inductor not supported - using only ACL Graph mode
Traceback (most recent call last):
File "/workspace/test1.py", line 58, in <module>
main(audio_count)
File "/workspace/test1.py", line 38, in main
llm = LLM(model="Qwen/Qwen2-Audio-7B-Instruct",
File "/vllm-workspace/vllm/vllm/entrypoints/llm.py", line 271, in __init__
self.llm_engine = LLMEngine.from_engine_args(
File "/vllm-workspace/vllm/vllm/engine/llm_engine.py", line 494, in from_engine_args
vllm_config = engine_args.create_engine_config(usage_context)
File "/vllm-workspace/vllm/vllm/engine/arg_utils.py", line 1286, in create_engine_config
config = VllmConfig(
File "/usr/local/python3.10.17/lib/python3.10/site-packages/pydantic/_internal/_dataclasses.py", line 123, in __init__
s.__pydantic_validator__.validate_python(ArgsKwargs(args, kwargs), self_instance=s)
File "/vllm-workspace/vllm/vllm/config.py", line 4624, in __post_init__
current_platform.check_and_update_config(self)
File "/vllm-workspace/vllm-ascend/vllm_ascend/platform.py", line 180, in check_and_update_config
update_aclgraph_sizes(vllm_config)
File "/vllm-workspace/vllm-ascend/vllm_ascend/utils.py", line 307, in update_aclgraph_sizes
num_hidden_layers = vllm_config.model_config.hf_config.num_hidden_layers
File "/usr/local/python3.10.17/lib/python3.10/site-packages/transformers/configuration_utils.py", line 211, in __getattribute__
return super().__getattribute__(key)
AttributeError: 'Qwen2AudioConfig' object has no attribute 'num_hidden_layers'
```
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
Closes: https://github.com/vllm-project/vllm-ascend/issues/1780
https://github.com/vllm-project/vllm-ascend/issues/1760
https://github.com/vllm-project/vllm-ascend/issues/1276
https://github.com/vllm-project/vllm-ascend/issues/359
- vLLM version: v0.10.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/7728dd77bb802e1876012eb264df4d2fa2fc6f3c
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
2025-07-26 20:13:00 +08:00
|
|
|
def get_max_hidden_layers(hf_config) -> int:
|
|
|
|
|
cfg_dict = hf_config.to_dict()
|
|
|
|
|
layer_counts = []
|
|
|
|
|
|
|
|
|
|
def _rec_find(d):
|
|
|
|
|
if isinstance(d, dict):
|
|
|
|
|
for k, v in d.items():
|
|
|
|
|
if k == "num_hidden_layers" and isinstance(v, int):
|
|
|
|
|
layer_counts.append(v)
|
|
|
|
|
else:
|
|
|
|
|
_rec_find(v)
|
|
|
|
|
|
|
|
|
|
_rec_find(cfg_dict)
|
|
|
|
|
if not layer_counts:
|
|
|
|
|
raise ValueError("Not found num_hidden_layers in model config.")
|
|
|
|
|
return max(layer_counts)
|
|
|
|
|
|
|
|
|
|
|
[v0.11.0-dev][misc]change default capture size for Qwen3-MoE when using full dp (#4205)
### What this PR does / why we need it?
This dev version of #4199 .
Currently, the default `cudagraph_capture_size` in vLLM is `[1, 2, 4 ,8
,16 ,24 ,... , max_capture_size]`. However, this is not always the best
choice on different situations. This PR aims to change the default
setting when running Qwen3-MoE on full dp (`dp_size > 1` && `tp_size ==
1`) setting, which is usually applied in Large-Scale EP.
old :
`[1, 2, 4 ,8 ,16 ,24 ,... , max_capture_size]`
new:
`[1, 2, 5 ,10 ,15, 16 ,24 ,... , max_capture_size]`
This is mainly because the performance of `_npu_paged_attention` op
degrades dramatically on old settings. We hope to provide better
performance if users do not set specific `cudagraph_capture_size`.
### Does this PR introduce _any_ user-facing change?
The default `cudagraph_capture_size` is modified in above cases.
However, if `cudagraph_capture_size` has already set by users, this PR
won't have any influence on this.
### How was this patch tested?
- vLLM version: v0.11.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2918c1b49c88c29783c86f78d2c4221cb9622379
---------
Signed-off-by: Angazenn <supperccell@163.com>
2025-11-21 11:19:11 +08:00
|
|
|
def _is_default_capture_sizes(vllm_config: VllmConfig) -> bool:
|
|
|
|
|
"""
|
|
|
|
|
Check whether it is vLLM default capture sizes.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
cuda_graph_sizes = vllm_config.scheduler_config.cuda_graph_sizes
|
|
|
|
|
if len(cuda_graph_sizes) == 1:
|
|
|
|
|
default_size_capture_list = [1, 2, 4] + [
|
|
|
|
|
i for i in range(8, cuda_graph_sizes[0] + 1, 8)
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
if sorted(default_size_capture_list, reverse=True) == \
|
|
|
|
|
vllm_config.compilation_config.cudagraph_capture_sizes:
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_default_aclgraph_sizes(vllm_config: VllmConfig) -> None:
|
|
|
|
|
"""
|
|
|
|
|
Update ACL graph default capture sizes, so that new sizes
|
|
|
|
|
are more friendly to ascend ops && hardware.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
if vllm_config.model_config is None or \
|
|
|
|
|
vllm_config.model_config.enforce_eager or \
|
|
|
|
|
not _is_default_capture_sizes(vllm_config):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# modify the default capture_sizes for Qwen3-MoE models on dp settings.
|
|
|
|
|
# this is mainly because performance of _npu_paged_attention might degrades
|
|
|
|
|
# on special shapes.
|
|
|
|
|
# TODO(Angazenn): we will remove this once _npu_paged_attention is fully
|
|
|
|
|
# replaced by npu_fused_infer_attention_score which does not contain such bugs.
|
|
|
|
|
if vllm_config.model_config and vllm_config.model_config.hf_config.model_type == "qwen3_moe" \
|
|
|
|
|
and vllm_config.parallel_config.tensor_parallel_size == 1 \
|
|
|
|
|
and vllm_config.parallel_config.data_parallel_size > 1 :
|
|
|
|
|
max_capture_size = vllm_config.scheduler_config.cuda_graph_sizes[0]
|
|
|
|
|
new_cudagraph_capture_sizes = [1, 2, 5, 10, 15, 20] + [
|
|
|
|
|
i for i in range(24, max_capture_size + 1, 8)
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
vllm_config.compilation_config.cudagraph_capture_sizes = new_cudagraph_capture_sizes
|
|
|
|
|
vllm_config.compilation_config.init_with_cudagraph_sizes(
|
|
|
|
|
new_cudagraph_capture_sizes)
|
|
|
|
|
|
|
|
|
|
|
2025-05-12 20:26:22 +08:00
|
|
|
def update_aclgraph_sizes(vllm_config: VllmConfig) -> None:
|
|
|
|
|
"""Update ACL graph capture sizes based on hardware limitations"""
|
2025-09-28 17:44:04 +08:00
|
|
|
# NOTE: Currently, we can only capture 1800 graphs at most,
|
|
|
|
|
# due to the limitation of ACL graph. This number is bounded by
|
|
|
|
|
# the number of streams, which is 2048, we save 248 streams
|
|
|
|
|
# as a buffer.
|
|
|
|
|
# Maximum number of graphs that can be captured by ACL Graph
|
|
|
|
|
# TODO: Find out whether we need to solve allreduce function
|
|
|
|
|
MAX_CAPTURE_SIZE = 1800
|
|
|
|
|
|
2025-05-12 20:26:22 +08:00
|
|
|
# Store original configuration and temporarily clear it
|
|
|
|
|
compilation_config = vllm_config.compilation_config
|
|
|
|
|
original_sizes, compilation_config.cudagraph_capture_sizes = \
|
|
|
|
|
compilation_config.cudagraph_capture_sizes, None
|
|
|
|
|
|
2025-05-30 15:17:11 +08:00
|
|
|
# Calculate parallel configuration factor
|
[Bugfix] Fix num_hidden_layers when Qwen2-Audio 7B (#1803)
### What this PR does / why we need it?
Fix num_hidden_layers when Qwen2-Audio 7B and #1760 :
```
INFO 07-15 04:38:53 [platform.py:174] PIECEWISE compilation enabled on NPU. use_inductor not supported - using only ACL Graph mode
Traceback (most recent call last):
File "/workspace/test1.py", line 58, in <module>
main(audio_count)
File "/workspace/test1.py", line 38, in main
llm = LLM(model="Qwen/Qwen2-Audio-7B-Instruct",
File "/vllm-workspace/vllm/vllm/entrypoints/llm.py", line 271, in __init__
self.llm_engine = LLMEngine.from_engine_args(
File "/vllm-workspace/vllm/vllm/engine/llm_engine.py", line 494, in from_engine_args
vllm_config = engine_args.create_engine_config(usage_context)
File "/vllm-workspace/vllm/vllm/engine/arg_utils.py", line 1286, in create_engine_config
config = VllmConfig(
File "/usr/local/python3.10.17/lib/python3.10/site-packages/pydantic/_internal/_dataclasses.py", line 123, in __init__
s.__pydantic_validator__.validate_python(ArgsKwargs(args, kwargs), self_instance=s)
File "/vllm-workspace/vllm/vllm/config.py", line 4624, in __post_init__
current_platform.check_and_update_config(self)
File "/vllm-workspace/vllm-ascend/vllm_ascend/platform.py", line 180, in check_and_update_config
update_aclgraph_sizes(vllm_config)
File "/vllm-workspace/vllm-ascend/vllm_ascend/utils.py", line 307, in update_aclgraph_sizes
num_hidden_layers = vllm_config.model_config.hf_config.num_hidden_layers
File "/usr/local/python3.10.17/lib/python3.10/site-packages/transformers/configuration_utils.py", line 211, in __getattribute__
return super().__getattribute__(key)
AttributeError: 'Qwen2AudioConfig' object has no attribute 'num_hidden_layers'
```
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
Closes: https://github.com/vllm-project/vllm-ascend/issues/1780
https://github.com/vllm-project/vllm-ascend/issues/1760
https://github.com/vllm-project/vllm-ascend/issues/1276
https://github.com/vllm-project/vllm-ascend/issues/359
- vLLM version: v0.10.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/7728dd77bb802e1876012eb264df4d2fa2fc6f3c
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
2025-07-26 20:13:00 +08:00
|
|
|
hf_config = vllm_config.model_config.hf_config
|
|
|
|
|
if hasattr(hf_config, 'num_hidden_layers'):
|
|
|
|
|
num_hidden_layers = hf_config.num_hidden_layers
|
|
|
|
|
else:
|
|
|
|
|
num_hidden_layers = get_max_hidden_layers(hf_config)
|
2025-05-12 20:26:22 +08:00
|
|
|
parallel_config = vllm_config.parallel_config
|
2025-05-30 15:17:11 +08:00
|
|
|
|
2025-09-04 11:50:43 +08:00
|
|
|
# Calculate maximum supported batch sizes considering model architecture
|
|
|
|
|
resources_per_graph = num_hidden_layers + 1
|
|
|
|
|
if vllm_config.speculative_config is not None:
|
|
|
|
|
draft_model_hf_config = vllm_config.speculative_config.draft_model_config.hf_config
|
|
|
|
|
resources_per_graph += draft_model_hf_config.num_hidden_layers + 1
|
|
|
|
|
|
2025-05-30 15:17:11 +08:00
|
|
|
# TODO: Find out whether we need to take into account the pp_size
|
2025-08-26 12:39:21 +08:00
|
|
|
num_comm_groups = sum(size > 1 for size in [
|
|
|
|
|
parallel_config.data_parallel_size,
|
2025-05-30 15:17:11 +08:00
|
|
|
parallel_config.tensor_parallel_size,
|
2025-05-12 20:26:22 +08:00
|
|
|
])
|
|
|
|
|
|
2025-08-26 12:39:21 +08:00
|
|
|
if os.getenv("HCCL_OP_EXPANSION_MODE") == 'AIV':
|
|
|
|
|
# TODO: Find out whether we need to take into account the pp_size
|
|
|
|
|
parallel_factor = 1 + num_comm_groups + int(
|
2025-09-19 11:06:45 +08:00
|
|
|
parallel_config.enable_expert_parallel) + int(
|
|
|
|
|
vllm_config.additional_config.get(
|
|
|
|
|
"multistream_overlap_shared_expert", False))
|
2025-09-10 22:50:48 +08:00
|
|
|
if is_moe_model(vllm_config):
|
|
|
|
|
parallel_factor += (parallel_config.data_parallel_size > 1)
|
2025-09-28 17:44:04 +08:00
|
|
|
else:
|
|
|
|
|
# When AIV mode is enabled, the allreduce operator of the dense
|
|
|
|
|
# layer model will occupy additional streams, which are buffered here.
|
|
|
|
|
MAX_CAPTURE_SIZE = MAX_CAPTURE_SIZE - parallel_factor * resources_per_graph
|
|
|
|
|
|
2025-08-26 12:39:21 +08:00
|
|
|
# Calculate maximum supported batch sizes considering model architecture on the A2 Hardware Device
|
|
|
|
|
# Assume the following case:
|
|
|
|
|
# MAX_CAPTURE_SIZE = 1920, num_hidden_layers = 48, data_parallel_size is 1, tensor_parallel_size is 4,
|
|
|
|
|
# According to the formula, max_num_batch_sizes = math.floor(1920 / (48 + 1) / 2) = 19
|
2025-09-04 11:50:43 +08:00
|
|
|
max_num_batch_sizes = math.floor(MAX_CAPTURE_SIZE /
|
|
|
|
|
resources_per_graph / parallel_factor)
|
2025-08-26 12:39:21 +08:00
|
|
|
logger.info(
|
|
|
|
|
"Calculated maximum supported batch sizes for ACL graph: %s",
|
|
|
|
|
max_num_batch_sizes)
|
|
|
|
|
else:
|
|
|
|
|
# The above describes an empirical formula applicable to the A2 hardware.
|
|
|
|
|
# Under this configuration, HCCL employs the FFTS+ method for execution unfolding,
|
|
|
|
|
# which adds only 1 concurrent stream without consuming collective communication execution unfolding streams.
|
|
|
|
|
# On A3 hardware, HCCL defaults to the AICPU method.
|
|
|
|
|
# This approach may additionally allocate up to rank_size (max 16) - 1 streams per collective communication domain on the device (worst case).
|
|
|
|
|
# Using the default collective communication unfolding method on A3 will lead to a significant reduction in the maximum supported sizes.
|
|
|
|
|
# Therefore, the calculation formula has been modified as follows:
|
|
|
|
|
# Assume the following case:
|
|
|
|
|
# MAX_CAPTURE_SIZE = 1920, num_hidden_layers = 48, data_parallel_size is 1, tensor_parallel_size is 4,
|
|
|
|
|
# According to the formula, max_num_batch_sizes = math.floor((1920 - 1 * 40) / (48 + 1) / (1 + 1 * 2)) = 12
|
|
|
|
|
max_num_batch_sizes = math.floor(
|
2025-09-04 11:50:43 +08:00
|
|
|
(MAX_CAPTURE_SIZE - num_comm_groups * 40) / resources_per_graph /
|
|
|
|
|
(1 + num_comm_groups * 2))
|
2025-08-26 12:39:21 +08:00
|
|
|
logger.info(
|
|
|
|
|
"Calculated maximum supported batch sizes for ACL graph: %s",
|
|
|
|
|
max_num_batch_sizes)
|
|
|
|
|
logger.warning(
|
|
|
|
|
"Currently, communication is performed using FFTS+ method, which reduces "
|
|
|
|
|
"the number of available streams and, as a result, limits the range of runtime "
|
|
|
|
|
"shapes that can be handled. To both improve communication performance and "
|
|
|
|
|
"increase the number of supported shapes, set HCCL_OP_EXPANSION_MODE=AIV."
|
|
|
|
|
)
|
2025-05-12 20:26:22 +08:00
|
|
|
|
|
|
|
|
# If original sizes exceed maximum, sample a representative subset
|
|
|
|
|
if max_num_batch_sizes < len(original_sizes):
|
|
|
|
|
# Sample uniformly from original sizes
|
|
|
|
|
step = (len(original_sizes) - 1) / (max_num_batch_sizes - 1)
|
|
|
|
|
indices = [round(i * step) for i in range(max_num_batch_sizes)]
|
|
|
|
|
|
|
|
|
|
# Ensure first and last elements are preserved
|
|
|
|
|
indices[0], indices[-1] = 0, len(original_sizes) - 1
|
|
|
|
|
|
|
|
|
|
sampled_sizes = [original_sizes[i] for i in indices]
|
|
|
|
|
compilation_config.init_with_cudagraph_sizes(sampled_sizes)
|
|
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
|
"Adjusted ACL graph batch sizes for %s model (layers: %d): %d → %d sizes",
|
|
|
|
|
vllm_config.model_config.architectures[0],
|
|
|
|
|
num_hidden_layers,
|
|
|
|
|
len(original_sizes),
|
|
|
|
|
len(compilation_config.
|
|
|
|
|
cudagraph_capture_sizes # type: ignore[arg-type]
|
|
|
|
|
))
|
|
|
|
|
else:
|
|
|
|
|
# No adjustment needed
|
|
|
|
|
compilation_config.cudagraph_capture_sizes = original_sizes
|
|
|
|
|
logger.info(
|
|
|
|
|
"No adjustment needed for ACL graph batch sizes: %s model (layers: %d) with %d sizes",
|
|
|
|
|
vllm_config.model_config.architectures[0], num_hidden_layers,
|
|
|
|
|
len(original_sizes))
|
2025-05-29 11:48:26 +08:00
|
|
|
|
2025-10-22 22:07:39 +08:00
|
|
|
# default or defined cudagraph_capture_sizes may not consider num_speculative_tokens>1 scenario
|
|
|
|
|
# the maximum size cudagraph_capture_sizes[0] should be greater or equal than
|
|
|
|
|
# (num_speculative_tokens+1)*max_num_seqs, otherwise draft model will run in eager mode
|
2025-10-31 09:25:06 +08:00
|
|
|
if vllm_config.speculative_config is not None and \
|
|
|
|
|
vllm_config.speculative_config.num_speculative_tokens > 1:
|
|
|
|
|
num_speculative_tokens = vllm_config.speculative_config.num_speculative_tokens
|
|
|
|
|
max_num_seqs = vllm_config.scheduler_config.max_num_seqs
|
|
|
|
|
original_sizes, compilation_config.cudagraph_capture_sizes = \
|
|
|
|
|
compilation_config.cudagraph_capture_sizes, None
|
|
|
|
|
assert len(original_sizes) > 0
|
|
|
|
|
if original_sizes[0] < (num_speculative_tokens + 1) * max_num_seqs:
|
|
|
|
|
enlarged_sizes = [(num_speculative_tokens + 1) * size
|
|
|
|
|
for size in original_sizes]
|
|
|
|
|
compilation_config.init_with_cudagraph_sizes(enlarged_sizes)
|
|
|
|
|
logger.info(
|
|
|
|
|
"Adjusted ACL graphs: %s → %s for speculative decoding",
|
|
|
|
|
original_sizes, enlarged_sizes)
|
|
|
|
|
else:
|
|
|
|
|
compilation_config.cudagraph_capture_sizes = original_sizes
|
2025-10-22 22:07:39 +08:00
|
|
|
|
2025-05-29 11:48:26 +08:00
|
|
|
|
2025-06-27 09:14:43 +08:00
|
|
|
# TODO(wxy): Move to ops module
|
2025-05-29 11:48:26 +08:00
|
|
|
def dispose_tensor(x: torch.Tensor):
|
|
|
|
|
x.set_(torch.empty((0, ), device=x.device, dtype=x.dtype))
|
2025-06-06 09:29:34 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class ProfileExecuteDuration:
|
|
|
|
|
_instance = None
|
|
|
|
|
_observations: List[Tuple[str, Event, Event]] = []
|
|
|
|
|
_lock = Lock()
|
|
|
|
|
|
|
|
|
|
def __new__(cls):
|
|
|
|
|
with cls._lock:
|
|
|
|
|
if cls._instance is None:
|
|
|
|
|
cls._instance = super().__new__(cls)
|
|
|
|
|
atexit.register(cls._instance.destroy)
|
|
|
|
|
return cls._instance
|
|
|
|
|
|
|
|
|
|
def destroy(self):
|
|
|
|
|
with self._lock:
|
|
|
|
|
self._observations.clear()
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
|
def capture_async(self, duration_tag: str):
|
2025-08-14 09:33:39 +08:00
|
|
|
if not envs_ascend.VLLM_ASCEND_MODEL_EXECUTE_TIME_OBSERVE:
|
2025-06-06 09:29:34 +08:00
|
|
|
yield
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
observe_start = Event(enable_timing=True)
|
|
|
|
|
observe_start.record()
|
|
|
|
|
try:
|
|
|
|
|
yield
|
|
|
|
|
finally:
|
|
|
|
|
observe_end = Event(enable_timing=True)
|
|
|
|
|
observe_end.record()
|
|
|
|
|
with self._lock:
|
|
|
|
|
self._observations.append(
|
|
|
|
|
(duration_tag, observe_start, observe_end))
|
|
|
|
|
|
|
|
|
|
def pop_captured_sync(self) -> dict:
|
|
|
|
|
"""Pop and synchronize all events in the observation list"""
|
|
|
|
|
durations: dict[str, float] = {}
|
2025-08-14 09:33:39 +08:00
|
|
|
if not envs_ascend.VLLM_ASCEND_MODEL_EXECUTE_TIME_OBSERVE:
|
2025-06-06 09:29:34 +08:00
|
|
|
return durations
|
|
|
|
|
|
|
|
|
|
while self._observations:
|
|
|
|
|
with self._lock:
|
|
|
|
|
tag, observe_start, observe_end = self._observations.pop()
|
|
|
|
|
observe_end.synchronize()
|
|
|
|
|
durations[tag] = observe_start.elapsed_time(observe_end)
|
|
|
|
|
|
|
|
|
|
return durations
|
Support multistream of shared experts in FusedMoE (#997)
Contains on #1111 for completeness.
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
### What this PR does / why we need it?
Implement multi-stream parallelism for MoE layers with shared experts,
where computation of shared experts will be overlapped with expert token
dispatch and combine. Also, when multi-stream is enabled, weights of
shared experts will be force to replicate across all cards, regardless
of any tensor parallelism configurations, to avoid AllReduce operations.
With the expected overlaping being:
```
| shared gate_up | shared act | | shared down |
| dispatch | routed gate_up, act, down | combine |
```
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
### Does this PR introduce _any_ user-facing change?
No.
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
### How was this patch tested?
Tested on 1x16 910 node, with tailored 2 layer DSKv2.
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
---------
Signed-off-by: sdmyzlp <lrwei2@petalmail.com>
2025-06-11 09:18:38 +08:00
|
|
|
|
|
|
|
|
|
2025-09-11 23:14:02 +08:00
|
|
|
def register_ascend_customop(vllm_config: Optional[VllmConfig] = None):
|
2025-07-18 23:07:14 +08:00
|
|
|
"""Register Ascend CustomOP
|
|
|
|
|
|
2025-09-13 11:58:52 +08:00
|
|
|
NOTE: if the register branch requires model type, please use `vllm.config.get_current_vllm_config`,
|
2025-07-18 23:07:14 +08:00
|
|
|
and ensure this will execute after model config is initilazed.
|
|
|
|
|
"""
|
|
|
|
|
global _ASCEND_CUSTOMOP_IS_REIGISTERED
|
|
|
|
|
if _ASCEND_CUSTOMOP_IS_REIGISTERED:
|
|
|
|
|
return
|
|
|
|
|
from vllm.model_executor.custom_op import CustomOp
|
|
|
|
|
|
2025-09-11 11:30:16 +08:00
|
|
|
from vllm_ascend.models.layers.mla import AscendMultiHeadLatentAttention
|
2025-07-18 23:07:14 +08:00
|
|
|
from vllm_ascend.ops.activation import AscendQuickGELU, AscendSiluAndMul
|
2025-09-19 19:05:01 +08:00
|
|
|
from vllm_ascend.ops.common_fused_moe import (AscendFusedMoE,
|
|
|
|
|
AscendSharedFusedMoE)
|
2025-10-31 22:14:26 +08:00
|
|
|
from vllm_ascend.ops.layernorm import AscendGemmaRMSNorm, AscendRMSNorm
|
2025-09-07 10:31:32 +08:00
|
|
|
from vllm_ascend.ops.linear import (AscendColumnParallelLinear,
|
|
|
|
|
AscendMergedColumnParallelLinear,
|
2025-09-08 22:52:24 +08:00
|
|
|
AscendQKVParallelLinear,
|
2025-10-14 17:39:26 +08:00
|
|
|
AscendReplicatedLinear,
|
2025-09-07 10:31:32 +08:00
|
|
|
AscendRowParallelLinear)
|
2025-08-25 09:32:35 +08:00
|
|
|
from vllm_ascend.ops.rotary_embedding import (
|
2025-10-25 11:41:23 +08:00
|
|
|
AscendDeepseekScalingRotaryEmbedding, AscendMRotaryEmbedding,
|
|
|
|
|
AscendRotaryEmbedding, AscendYaRNRotaryEmbedding)
|
2025-08-29 11:41:21 +08:00
|
|
|
from vllm_ascend.ops.vocab_parallel_embedding import (
|
|
|
|
|
AscendLogitsProcessor, AscendParallelLMHead,
|
|
|
|
|
AscendVocabParallelEmbedding)
|
2025-08-26 19:05:23 +08:00
|
|
|
|
2025-09-11 11:30:16 +08:00
|
|
|
global REGISTERED_ASCEND_OPS
|
|
|
|
|
REGISTERED_ASCEND_OPS = {
|
|
|
|
|
"QuickGELU": AscendQuickGELU,
|
|
|
|
|
"SiluAndMul": AscendSiluAndMul,
|
|
|
|
|
"RotaryEmbedding": AscendRotaryEmbedding,
|
2025-10-25 11:41:23 +08:00
|
|
|
"MRotaryEmbedding": AscendMRotaryEmbedding,
|
2025-09-11 11:30:16 +08:00
|
|
|
"ColumnParallelLinear": AscendColumnParallelLinear,
|
|
|
|
|
"RowParallelLinear": AscendRowParallelLinear,
|
2025-10-11 08:36:20 +08:00
|
|
|
"YaRNScalingRotaryEmbedding": AscendYaRNRotaryEmbedding,
|
2025-09-11 11:30:16 +08:00
|
|
|
"MergedColumnParallelLinear": AscendMergedColumnParallelLinear,
|
|
|
|
|
"QKVParallelLinear": AscendQKVParallelLinear,
|
2025-10-14 17:39:26 +08:00
|
|
|
"ReplicatedLinear": AscendReplicatedLinear,
|
2025-09-11 11:30:16 +08:00
|
|
|
"DeepseekScalingRotaryEmbedding": AscendDeepseekScalingRotaryEmbedding,
|
|
|
|
|
"VocabParallelEmbedding": AscendVocabParallelEmbedding,
|
|
|
|
|
"ParallelLMHead": AscendParallelLMHead,
|
|
|
|
|
"LogitsProcessor": AscendLogitsProcessor,
|
|
|
|
|
"RMSNorm": AscendRMSNorm,
|
2025-09-28 21:19:10 +08:00
|
|
|
"GemmaRMSNorm": AscendGemmaRMSNorm,
|
2025-09-11 11:30:16 +08:00
|
|
|
"FusedMoE": AscendFusedMoE,
|
2025-09-19 19:05:01 +08:00
|
|
|
"SharedFusedMoE": AscendSharedFusedMoE,
|
2025-09-11 11:30:16 +08:00
|
|
|
"MultiHeadLatentAttention": AscendMultiHeadLatentAttention,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for name, op_cls in REGISTERED_ASCEND_OPS.items():
|
|
|
|
|
CustomOp.register_oot(_decorated_op_cls=op_cls, name=name)
|
2025-09-10 11:26:11 +08:00
|
|
|
|
2025-07-18 23:07:14 +08:00
|
|
|
# NOTE: Keep this at last to ensure all custom actions are registered
|
|
|
|
|
_ASCEND_CUSTOMOP_IS_REIGISTERED = True
|
2025-07-28 14:06:20 +08:00
|
|
|
|
|
|
|
|
|
2025-08-05 08:39:02 +08:00
|
|
|
# TODO(zzzzwwjj): Currently there is no clear SOC_VERSION policy for A2 and A3 in CANN.
|
|
|
|
|
# So we get the version dynamically. In the future, we should get the version info from _build_info like 310p does.
|
2025-07-28 14:06:20 +08:00
|
|
|
class AscendSocVersion(Enum):
|
|
|
|
|
A2 = 0
|
|
|
|
|
A3 = 1
|
|
|
|
|
UNDEFINED = 2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_ascend_soc_version = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def init_ascend_soc_version():
|
|
|
|
|
soc_version = torch_npu.npu.get_soc_version()
|
|
|
|
|
global _ascend_soc_version
|
|
|
|
|
if 220 <= soc_version <= 225:
|
|
|
|
|
_ascend_soc_version = AscendSocVersion.A2
|
|
|
|
|
elif 250 <= soc_version <= 255:
|
|
|
|
|
_ascend_soc_version = AscendSocVersion.A3
|
|
|
|
|
else:
|
|
|
|
|
_ascend_soc_version = AscendSocVersion.UNDEFINED
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_ascend_soc_version():
|
|
|
|
|
global _ascend_soc_version
|
|
|
|
|
assert _ascend_soc_version is not None
|
|
|
|
|
return _ascend_soc_version
|
2025-08-29 11:41:21 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def lmhead_tp_enable() -> bool:
|
|
|
|
|
return get_ascend_config().lmhead_tensor_parallel_size is not None
|
2025-09-07 10:31:32 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def oproj_tp_enable() -> bool:
|
|
|
|
|
return get_ascend_config().oproj_tensor_parallel_size is not None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def mlp_tp_enable() -> bool:
|
|
|
|
|
return envs_ascend.VLLM_ASCEND_ENABLE_MLP_OPTIMIZE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def matmul_allreduce_enable() -> bool:
|
|
|
|
|
return envs_ascend.VLLM_ASCEND_ENABLE_MATMUL_ALLREDUCE
|
2025-09-08 22:52:24 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def dense_optim_enable() -> bool:
|
|
|
|
|
return envs_ascend.VLLM_ASCEND_ENABLE_DENSE_OPTIMIZE
|
2025-09-10 22:50:48 +08:00
|
|
|
|
|
|
|
|
|
2025-09-29 09:44:49 +08:00
|
|
|
def enable_sp(vllm_config=None) -> bool:
|
2025-10-17 21:13:41 +08:00
|
|
|
global _ENABLE_SP
|
|
|
|
|
if _ENABLE_SP is None:
|
|
|
|
|
if vllm_config is None:
|
|
|
|
|
from vllm.config import get_current_vllm_config
|
|
|
|
|
vllm_config = get_current_vllm_config()
|
|
|
|
|
_ENABLE_SP = (
|
|
|
|
|
vllm_config.compilation_config.pass_config.
|
|
|
|
|
enable_sequence_parallelism
|
|
|
|
|
or envs_ascend.VLLM_ASCEND_ENABLE_FLASHCOMM1
|
|
|
|
|
# Flash comm 1 should be enabled by env VLLM_ASCEND_ENABLE_FLASHCOMM1
|
|
|
|
|
# We retain the env VLLM_ASCEND_ENABLE_FLASHCOMM here for backward compatibility.
|
|
|
|
|
or bool(int(os.getenv("VLLM_ASCEND_ENABLE_FLASHCOMM", '0'))))
|
|
|
|
|
|
|
|
|
|
return _ENABLE_SP
|
2025-10-15 19:36:32 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# TODO remove it after vllm has this func
|
|
|
|
|
def shared_expert_dp_enabled() -> bool:
|
|
|
|
|
return get_ascend_config().enable_shared_expert_dp or enable_sp()
|
2025-09-24 11:29:59 +08:00
|
|
|
|
|
|
|
|
|
2025-09-10 22:50:48 +08:00
|
|
|
def is_moe_model(vllm_config: VllmConfig):
|
2025-11-06 20:30:40 +08:00
|
|
|
"""Checks if the model is a MoE model by config"""
|
2025-10-15 19:36:32 +08:00
|
|
|
global _IS_MOE_MODEL
|
|
|
|
|
if _IS_MOE_MODEL is None:
|
2025-11-06 20:30:40 +08:00
|
|
|
model_configs = vllm_config.model_config.hf_config.to_dict()
|
|
|
|
|
_IS_MOE_MODEL = _is_contain_expert(model_configs)
|
2025-10-15 19:36:32 +08:00
|
|
|
return _IS_MOE_MODEL
|
2025-09-13 11:58:52 +08:00
|
|
|
|
|
|
|
|
|
2025-11-06 20:30:40 +08:00
|
|
|
def _is_contain_expert(config: Any):
|
|
|
|
|
if isinstance(config, dict):
|
|
|
|
|
for k, v in config.items():
|
|
|
|
|
if "expert" in str(k):
|
|
|
|
|
return True
|
|
|
|
|
if _is_contain_expert(v):
|
|
|
|
|
return True
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
2025-11-21 15:04:35 +08:00
|
|
|
def is_vl_model(vllm_config: VllmConfig):
|
|
|
|
|
"""Checks if the model is a VL model by config"""
|
|
|
|
|
global _IS_VL_MODEL
|
2025-11-25 20:36:32 +08:00
|
|
|
if _IS_VL_MODEL is None and vllm_config.model_config:
|
2025-11-21 15:04:35 +08:00
|
|
|
model_configs = vllm_config.model_config.hf_config.to_dict()
|
|
|
|
|
_IS_VL_MODEL = "VL" in model_configs["architectures"][0]
|
|
|
|
|
return _IS_VL_MODEL
|
|
|
|
|
|
|
|
|
|
|
2025-09-13 11:58:52 +08:00
|
|
|
def weak_ref_tensor(tensor: Any) -> Any:
|
|
|
|
|
"""
|
|
|
|
|
Create a weak reference to a tensor.
|
|
|
|
|
The new tensor will share the same data as the original tensor,
|
|
|
|
|
but will not keep the original tensor alive.
|
|
|
|
|
"""
|
|
|
|
|
if isinstance(tensor, torch.Tensor):
|
|
|
|
|
return torch.ops._C_ascend.weak_ref_tensor(tensor)
|
|
|
|
|
else:
|
|
|
|
|
return tensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def weak_ref_tensors(
|
|
|
|
|
tensors: Union[torch.Tensor, list[torch.Tensor], tuple[torch.Tensor]]
|
|
|
|
|
) -> Union[torch.Tensor, list[Any], tuple[Any], Any]:
|
|
|
|
|
"""
|
|
|
|
|
Convenience function to create weak references to tensors,
|
|
|
|
|
for single tensor, list of tensors or tuple of tensors.
|
2025-10-27 16:00:20 +08:00
|
|
|
|
|
|
|
|
This function should be used in the following scenario:
|
|
|
|
|
When a tensor is created during graph capture, and it's held by a method
|
|
|
|
|
that's not part of the graph, we don't really need to store it, but we
|
|
|
|
|
**do need** its buffer pointer. If we don't handle this, it cannot
|
|
|
|
|
be garbage collected, leading to a memory leak. To avoid this,
|
|
|
|
|
we should create a weak reference to the tensor.
|
2025-09-13 11:58:52 +08:00
|
|
|
"""
|
|
|
|
|
if isinstance(tensors, torch.Tensor):
|
|
|
|
|
return weak_ref_tensor(tensors)
|
|
|
|
|
if isinstance(tensors, list):
|
|
|
|
|
return [weak_ref_tensor(t) for t in tensors]
|
|
|
|
|
if isinstance(tensors, tuple):
|
|
|
|
|
return tuple(weak_ref_tensor(t) for t in tensors)
|
|
|
|
|
raise ValueError("Invalid type for tensors")
|
2025-09-19 11:06:45 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def npu_stream_switch(target_stream: torch.npu.Stream,
|
|
|
|
|
*,
|
|
|
|
|
enabled: bool = True):
|
|
|
|
|
"""
|
|
|
|
|
Switch to the target stream if enabled is True.
|
|
|
|
|
Otherwise, do nothing.
|
|
|
|
|
"""
|
|
|
|
|
if not enabled:
|
|
|
|
|
return nullcontext()
|
|
|
|
|
assert target_stream is not None
|
|
|
|
|
return torch.npu.stream(target_stream)
|
2025-10-11 15:55:22 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_hccl_pg_options(group_name: str):
|
|
|
|
|
options = torch_npu._C._distributed_c10d.ProcessGroupHCCL.Options()
|
|
|
|
|
hccl_config = get_hccl_config_for_pg_options(group_name)
|
|
|
|
|
if hccl_config is not None:
|
|
|
|
|
options.hccl_config = hccl_config
|
|
|
|
|
return options
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_hccl_config_for_pg_options(group_name: str) -> Optional[dict]:
|
|
|
|
|
"""
|
|
|
|
|
Get HCCL process group options for the given communication group name.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
group_name: Name of the communication group
|
2025-10-13 16:13:17 +08:00
|
|
|
|
2025-10-11 15:55:22 +08:00
|
|
|
Returns:
|
|
|
|
|
HCCL pg_options or None for mc2 group
|
|
|
|
|
"""
|
|
|
|
|
# FIXME: Current mc2 operators only perform communication space partitioning
|
|
|
|
|
# based on HCCL_BUFFSIZE configuration. Using pg_options with mc2 group would
|
|
|
|
|
# result in memory misalignment problems.
|
|
|
|
|
if group_name and "mc2" in group_name:
|
|
|
|
|
return None
|
|
|
|
|
hccl_config_map = {
|
|
|
|
|
"dp": {
|
|
|
|
|
"hccl_buffer_size": calculate_dp_buffer_size()
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
return hccl_config_map.get(group_name, get_default_buffer_config())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_default_buffer_config() -> dict:
|
|
|
|
|
return {"hccl_buffer_size": _DEFAULT_BUFFER_SIZE}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def calculate_dp_buffer_size() -> int:
|
|
|
|
|
"""
|
2025-10-13 16:13:17 +08:00
|
|
|
formula of dp buffer size:
|
2025-10-11 15:55:22 +08:00
|
|
|
dp_size + 2 (flags: with_prefill and enable_dbo)
|
|
|
|
|
"""
|
|
|
|
|
from vllm.config import get_current_vllm_config
|
|
|
|
|
vllm_config = get_current_vllm_config()
|
|
|
|
|
dp_size = vllm_config.parallel_config.data_parallel_size
|
|
|
|
|
int32_size = torch.iinfo(torch.int32).bits // 8
|
|
|
|
|
dp_buffer_size = math.ceil((dp_size + 2) * int32_size / (1024 * 1024))
|
|
|
|
|
return max(dp_buffer_size, _MIN_DP_BUFFER_SIZE)
|
2025-10-13 16:13:17 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
# Currently, when in A2, setting the environment variables HCCL_INTRA_PCIE_ENABLE=1
|
|
|
|
|
# and HCCL_INTRA_ROCE_ENABLE=0 can reduce cross-machine communication traffic and
|
|
|
|
|
# significantly improve communication performance of MC2 ops dispatch/combine.
|
|
|
|
|
def is_hierarchical_communication_enabled():
|
|
|
|
|
return (os.getenv("HCCL_INTRA_ROCE_ENABLE", "") == "0"
|
|
|
|
|
and os.getenv("HCCL_INTRA_PCIE_ENABLE", "") == "1")
|
2025-10-17 09:30:51 +08:00
|
|
|
|
|
|
|
|
|
[v0.11.0][Perf] Delete redundant operations in model_runner and forward_context (#3775)
<!-- Thanks for sending a pull request!
BEFORE SUBMITTING, PLEASE READ
https://docs.vllm.ai/en/latest/contributing/overview.html
-->
cherry pick https://github.com/vllm-project/vllm-ascend/pull/3677
Remove redundant operations from `model_runner` and `forward_context`.
This optimization can significantly reduce the idle time (bubble) before
decoding when running models with small parameter counts (e.g.,
Qwen/Qwen2.5-0.5B).
Testing on 800I A2, bubble is reduced from 3.8ms to 2.8ms :
Before
<img width="1655" height="696" alt="image"
src="https://github.com/user-attachments/assets/d7608e52-2438-46dd-8fc9-391fd6274495"
/>
After
<img width="1607" height="774" alt="image"
src="https://github.com/user-attachments/assets/56daf081-2dba-4d2e-99d4-e055187d9806"
/>
### What this PR does / why we need it?
<!--
- Please clarify what changes you are proposing. The purpose of this
section is to outline the changes and how this PR fixes the issue.
If possible, please consider writing useful notes for better and faster
reviews in your PR.
- Please clarify why the changes are needed. For instance, the use case
and bug description.
- Fixes #
-->
### Does this PR introduce _any_ user-facing change?
<!--
Note that it means *any* user-facing change including all aspects such
as API, interface or other behavior changes.
Documentation-only updates are not considered user-facing changes.
-->
No
### How was this patch tested?
<!--
CI passed with new added/existing test.
If it was tested in a way different from regular unit tests, please
clarify how you tested step by step, ideally copy and paste-able, so
that other reviewers can test and check, and descendants can verify in
the future.
If tests were not added, please describe why they were not added and/or
why it was difficult to add.
-->
---------
Signed-off-by: realliujiaxu <realliujiaxu@163.com>
2025-10-29 15:58:53 +08:00
|
|
|
def has_layer_idx(model_instance: torch.nn.Module) -> bool:
|
|
|
|
|
if model_instance is None:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
global _HAS_LAYER_IDX
|
|
|
|
|
if _HAS_LAYER_IDX is None:
|
|
|
|
|
_HAS_LAYER_IDX = hasattr(model_instance, "model") and \
|
|
|
|
|
hasattr(model_instance.model, "start_layer")
|
|
|
|
|
return _HAS_LAYER_IDX
|