cleanup useless torchair logic (#4856)

This PR clean up useless torchair logic in model runner. The moge doc is
only for torchair, it can be removed as well.

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
This commit is contained in:
wangxiyuan
2025-12-11 11:21:13 +08:00
committed by GitHub
parent c12eb22cbe
commit bb76f7962c
7 changed files with 22 additions and 307 deletions

View File

@@ -13,7 +13,6 @@ single_node_pd_disaggregation_mooncake
multi_npu_qwen3_next
multi_npu
multi_npu_kimi-k2-thinking
multi_npu_moge
Qwen3-Dense
multi_npu_qwen3_moe
multi_npu_quantization

View File

@@ -1,235 +0,0 @@
# Multi-NPU (Pangu-Pro-MoE)
## Run vllm-ascend on Multi-NPU
Run container:
```{code-block} bash
:substitutions:
# Update the vllm-ascend image
export IMAGE=quay.io/ascend/vllm-ascend:|vllm_ascend_version|
docker run --rm \
--name vllm-ascend \
--shm-size=1g \
--device /dev/davinci0 \
--device /dev/davinci1 \
--device /dev/davinci2 \
--device /dev/davinci3 \
--device /dev/davinci_manager \
--device /dev/devmm_svm \
--device /dev/hisi_hdc \
-v /usr/local/dcmi:/usr/local/dcmi \
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
-v /usr/local/Ascend/driver/lib64/:/usr/local/Ascend/driver/lib64/ \
-v /usr/local/Ascend/driver/version.info:/usr/local/Ascend/driver/version.info \
-v /etc/ascend_install.info:/etc/ascend_install.info \
-v /root/.cache:/root/.cache \
-p 8000:8000 \
-it $IMAGE bash
```
Set up environment variables:
```bash
# Set `max_split_size_mb` to reduce memory fragmentation and avoid out of memory
export PYTORCH_NPU_ALLOC_CONF=max_split_size_mb:256
```
Download the model:
```bash
git lfs install
git clone https://gitcode.com/ascend-tribe/pangu-pro-moe-model.git
```
### Online Inference on Multi-NPU
Run the following script to start the vLLM server on multi-NPU:
```bash
vllm serve /path/to/pangu-pro-moe-model \
--tensor-parallel-size 4 \
--enable-expert-parallel \
--trust-remote-code \
--max_model_len=1024 \
--enforce-eager
```
Once your server is started, you can query the model with input prompts:
:::::{tab-set}
::::{tab-item} v1/completions
```{code-block} bash
:substitutions:
export question="你是谁?"
curl http://localhost:8000/v1/completions \
-H "Content-Type: application/json" \
-d '{
"prompt": "[unused9]系统:[unused10][unused9]用户:'${question}'[unused10][unused9]助手:",
"max_tokens": 64,
"top_p": 0.95,
"top_k": 50,
"temperature": 0.6
}'
```
::::
::::{tab-item} v1/chat/completions
```{code-block} bash
:substitutions:
curl http://localhost:8000/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"messages": [
{"role": "system", "content": ""},
{"role": "user", "content": "你是谁?"}
],
"max_tokens": "64",
"top_p": "0.95",
"top_k": "50",
"temperature": "0.6",
"add_special_tokens" : true
}'
```
::::
:::::
If you run this successfully, you can see the info shown below:
```json
{"id":"cmpl-2cd4223228ab4be9a91f65b882e65b32","object":"text_completion","created":1751255067,"model":"/root/.cache/pangu-pro-moe-model","choices":[{"index":0,"text":" [unused16] 好的用户问我是谁我需要根据之前的设定来回答。用户提到我是华为开发的“盘古Reasoner”属于盘古大模型系列作为智能助手帮助解答问题和提供 信息支持。现在用户再次询问,可能是在确认我的身份或者测试我的回答是否一致。\n\n首先我要确保","logprobs":null,"finish_reason":"length","stop_reason":null,"prompt_logprobs":null}],"usage":{"prompt_tokens":15,"total_tokens":79,"completion_tokens":64,"prompt_tokens_details":null},"kv_transfer_params":null}
```
### Offline Inference on Multi-NPU
Run the following script to execute offline inference on multi-NPU:
:::::{tab-set}
::::{tab-item} Graph Mode
```{code-block} python
:substitutions:
import gc
from transformers import AutoTokenizer
import torch
import os
from vllm import LLM, SamplingParams
from vllm.distributed.parallel_state import (destroy_distributed_environment,
destroy_model_parallel)
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
def clean_up():
destroy_model_parallel()
destroy_distributed_environment()
gc.collect()
torch.npu.empty_cache()
if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained("/path/to/pangu-pro-moe-model", trust_remote_code=True)
tests = [
"Hello, my name is",
"The future of AI is",
]
prompts = []
for text in tests:
messages = [
{"role": "system", "content": ""}, # Optionally customize system content
{"role": "user", "content": text}
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
prompts.append(prompt)
sampling_params = SamplingParams(temperature=0.6, top_p=0.95, top_k=40)
llm = LLM(model="/path/to/pangu-pro-moe-model",
tensor_parallel_size=4,
enable_expert_parallel=True,
distributed_executor_backend="mp",
max_model_len=1024,
trust_remote_code=True)
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
del llm
clean_up()
```
::::
::::{tab-item} Eager Mode
```{code-block} python
:substitutions:
import gc
from transformers import AutoTokenizer
import torch
import os
from vllm import LLM, SamplingParams
from vllm.distributed.parallel_state import (destroy_distributed_environment,
destroy_model_parallel)
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
def clean_up():
destroy_model_parallel()
destroy_distributed_environment()
gc.collect()
torch.npu.empty_cache()
if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained("/path/to/pangu-pro-moe-model", trust_remote_code=True)
tests = [
"Hello, my name is",
"The future of AI is",
]
prompts = []
for text in tests:
messages = [
{"role": "system", "content": ""}, # Optionally customize system content
{"role": "user", "content": text}
]
prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
prompts.append(prompt)
sampling_params = SamplingParams(temperature=0.6, top_p=0.95, top_k=40)
llm = LLM(model="/path/to/pangu-pro-moe-model",
tensor_parallel_size=4,
enable_expert_parallel=True,
distributed_executor_backend="mp",
max_model_len=1024,
trust_remote_code=True,
enforce_eager=True)
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
del llm
clean_up()
```
::::
:::::
If you run this script successfully, you can see the info shown below:
```bash
Prompt: 'Hello, my name is', Generated text: ' Daniel and I am an 8th grade student at York Middle School. I'
Prompt: 'The future of AI is', Generated text: ' following you. As the technology advances, a new report from the Institute for the'
```

View File

@@ -229,7 +229,6 @@ class TestNPUPlatform(TestBase):
mock_empty_cache.assert_called_once()
mock_reset_stats.assert_called_once()
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch("vllm_ascend.utils.update_aclgraph_sizes")
@patch('vllm_ascend.utils.get_ascend_device_type',
@@ -240,7 +239,7 @@ class TestNPUPlatform(TestBase):
)
def test_check_and_update_config_basic_config_update(
self, mock_init_recompute, mock_soc_version, mock_update_acl,
mock_init_ascend, mock_check_ascend):
mock_init_ascend):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -263,18 +262,15 @@ class TestNPUPlatform(TestBase):
self.platform.check_and_update_config(vllm_config)
mock_init_ascend.assert_called_once_with(vllm_config)
mock_check_ascend.assert_called_once()
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_schedule_config.RecomputeSchedulerConfig.initialize_from_config"
)
def test_check_and_update_config_no_model_config_warning(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_soc_version):
self, mock_init_recompute, mock_init_ascend, mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -294,14 +290,12 @@ class TestNPUPlatform(TestBase):
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_schedule_config.RecomputeSchedulerConfig.initialize_from_config"
)
def test_check_and_update_config_enforce_eager_mode(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_soc_version):
self, mock_init_recompute, mock_init_ascend, mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -333,14 +327,13 @@ class TestNPUPlatform(TestBase):
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.utils.update_default_aclgraph_sizes")
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_schedule_config.RecomputeSchedulerConfig.initialize_from_config"
)
def test_check_and_update_config_unsupported_compilation_level(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_update_default, mock_soc_version):
self, mock_init_recompute, mock_init_ascend, mock_update_default,
mock_soc_version):
mock_update_default.return_value = MagicMock()
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
@@ -374,10 +367,9 @@ class TestNPUPlatform(TestBase):
"Revert me when vllm support setting cudagraph_mode on oot platform")
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
def test_check_and_update_config_unsupported_cudagraph_mode(
self, mock_init_ascend, mock_check_ascend, mock_soc_version):
self, mock_init_ascend, mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -404,14 +396,12 @@ class TestNPUPlatform(TestBase):
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_schedule_config.RecomputeSchedulerConfig.initialize_from_config"
)
def test_check_and_update_config_cache_config_block_size(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_soc_version):
self, mock_init_recompute, mock_init_ascend, mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -433,14 +423,12 @@ class TestNPUPlatform(TestBase):
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._910_93)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch(
"vllm_ascend.core.recompute_schedule_config.RecomputeSchedulerConfig.initialize_from_config"
)
def test_check_and_update_config_v1_worker_class_selection(
self, mock_init_recompute, mock_init_ascend, mock_check_ascend,
mock_soc_version):
self, mock_init_recompute, mock_init_ascend, mock_soc_version):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()
@@ -471,7 +459,6 @@ class TestNPUPlatform(TestBase):
"vllm_ascend.xlite.xlite_worker.XliteWorker",
)
@patch("vllm_ascend.ascend_config.check_ascend_config")
@patch("vllm_ascend.ascend_config.init_ascend_config")
@patch('vllm_ascend.utils.get_ascend_device_type',
return_value=AscendDeviceType._310P)
@@ -479,8 +466,7 @@ class TestNPUPlatform(TestBase):
"vllm_ascend.core.recompute_schedule_config.RecomputeSchedulerConfig.initialize_from_config"
)
def test_check_and_update_config_310p_no_custom_ops(
self, mock_init_recompute, mock_soc_version, mock_init_ascend,
mock_check_ascend):
self, mock_init_recompute, mock_soc_version, mock_init_ascend):
mock_init_ascend.return_value = TestNPUPlatform.mock_vllm_ascend_config(
)
vllm_config = TestNPUPlatform.mock_vllm_config()

View File

@@ -289,12 +289,3 @@ def get_ascend_config():
"Ascend config is not initialized. Please call init_ascend_config first."
)
return _ASCEND_CONFIG
def check_ascend_config(vllm_config, enforce_eager):
ascend_config = get_ascend_config()
if ascend_config.ascend_compilation_config.enable_quantization_fusion:
logger.info(
"Quantization fusion enabled! op fusion on quantization are expected. "
)

View File

@@ -26,7 +26,7 @@ from vllm.platforms import Platform, PlatformEnum
# todo: please remove it when solve cuda hard code in vllm
os.environ["VLLM_DISABLE_SHARED_EXPERTS_STREAM"] = "1"
from vllm_ascend.ascend_config import check_ascend_config, init_ascend_config
from vllm_ascend.ascend_config import init_ascend_config
from vllm_ascend.utils import refresh_block_size
# isort: off
@@ -181,7 +181,6 @@ class NPUPlatform(Platform):
else:
enforce_eager = getattr(model_config, "enforce_eager", False)
check_ascend_config(vllm_config, enforce_eager)
from vllm.config.compilation import CUDAGraphMode
if enforce_eager:
logger.info("Compilation disabled, using eager mode by default")

View File

@@ -607,7 +607,6 @@ class MtpProposer(Proposer):
attn_mask=self.runner.attn_mask,
spec_attn_mask=self.runner.spec_attn_mask,
attn_state=self.runner.attn_state,
graph_pad_size=self.runner.graph_pad_size,
decode_token_per_req=self.runner.decode_token_per_req,
)
return spec_common_attn_metadata, token_indices
@@ -762,8 +761,7 @@ class MtpProposer(Proposer):
) and aclgraph_runtime_mode == CUDAGraphMode.FULL:
graph_pad_size = num_input_tokens
else:
# Currently, runner.graph_pad_size will always be -1.
graph_pad_size = self.runner.graph_pad_size
graph_pad_size = -1
# If use fullgraph and disable_padded_drafter_batch=True, We need to
# update the graph_pad_size in common_attn_metadata, to tell the
@@ -1135,7 +1133,6 @@ class MtpProposer(Proposer):
attn_mask=self.runner.attn_mask,
spec_attn_mask=self.runner.spec_attn_mask,
attn_state=self.runner.attn_state,
graph_pad_size=self.runner.graph_pad_size,
decode_token_per_req=self.runner.decode_token_per_req,
num_computed_tokens_cpu=common_attn_metadata.
num_computed_tokens_cpu,

View File

@@ -35,7 +35,6 @@ import numpy as np
import numpy.typing as npt
import regex as re
import torch
import torch._dynamo.cache_size
import torch.distributed as dist
import torch.nn as nn
from tqdm import tqdm # type: ignore
@@ -384,8 +383,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
self.is_kv_producer = vllm_config.kv_transfer_config.is_kv_producer
self.is_kv_consumer = vllm_config.kv_transfer_config.is_kv_consumer
self._may_pad_kv_consumer_num_seq()
# Persistent batch.
self.input_ids = torch.zeros(self.max_num_tokens,
dtype=torch.int32,
@@ -656,12 +653,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
return get_spec_decode_method(self.speculative_config.method,
self.vllm_config, self.device, self)
def _may_pad_kv_consumer_num_seq(self):
# For Full Graph + MTP in a PD (Prefill/Decode) disaggregation scenario,
# we may want to pad self.max_num_seqs in kv_consumer nodes to avoid
# exceeding a sequence length limit (16 tokens) in npu_fused_infer_attention_score operation
pass
def _init_mc2_tokens_capacity(self):
# NOTE: To be clear, we need to make sure that during graph capture, the number of
# tokens is less than or equal to mc2_tokens_capacity. According to _set_cudagraph_sizes,
@@ -1661,7 +1652,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
self.with_prefill = with_prefill
self.num_tokens_across_dp = num_tokens_across_dp
self._update_graph_pad_size(with_prefill, maybe_padded_num_tokens)
attn_metadata: dict[str, Any] = {}
# Record the index of requests that should not be sampled,
@@ -1750,10 +1740,10 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
# then the embedding layer is not included in the ACL graph.
input_ids = self.input_ids[:num_input_tokens]
inputs_embeds = None
positions = self.positions[:num_input_tokens]
input_ids, positions = self._update_input_ids_and_positions(
input_ids, positions, num_input_tokens, with_prefill,
maybe_padded_num_tokens)
if self.uses_mrope:
positions = self.mrope_positions[:, :num_input_tokens]
if get_pp_group().is_first_rank:
intermediate_tensors = None
@@ -1943,7 +1933,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
attn_state=self.attn_state,
is_only_prefill=bool(np.all(num_valid_tokens != 1)),
max_query_len=max_num_scheduled_tokens,
graph_pad_size=self.graph_pad_size,
decode_token_per_req=self.decode_token_per_req,
cos=self.cos,
sin=self.sin,
@@ -2058,8 +2047,7 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
device=self.device)
return model_kwargs
def _generate_process_reqs_hidden_states(self, attn_metadata, with_prefill,
maybe_padded_num_tokens,
def _generate_process_reqs_hidden_states(self, maybe_padded_num_tokens,
input_ids, positions,
intermediate_tensors,
inputs_embeds):
@@ -2141,16 +2129,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
attn_state = AscendAttentionState.PrefillCacheHit
return attn_state
def _update_graph_pad_size(self, with_prefill, graph_pad_size):
self.graph_pad_size = -1
def _update_input_ids_and_positions(self, input_ids, positions,
num_input_tokens, with_prefill,
maybe_padded_num_tokens):
if self.uses_mrope:
positions = self.mrope_positions[:, :num_input_tokens]
return input_ids, positions
def _calc_spec_decode_metadata(
self,
num_draft_tokens: np.ndarray,
@@ -2529,8 +2507,8 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
self.maybe_setup_kv_connector(scheduler_output)
hidden_states = self._generate_process_reqs_hidden_states(
attn_metadata, self.with_prefill, maybe_padded_num_tokens,
input_ids, positions, intermediate_tensors, inputs_embeds)
maybe_padded_num_tokens, input_ids, positions,
intermediate_tensors, inputs_embeds)
self.maybe_wait_for_kv_save()
finished_sending, finished_recving = self.get_finished_kv_transfer(
@@ -3023,9 +3001,9 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
return attn_metadata
def _generate_dummy_run_hidden_states(self, with_prefill, input_ids,
positions, attn_metadata, num_tokens,
intermediate_tensors, inputs_embeds):
def _generate_dummy_run_hidden_states(self, input_ids, positions,
num_tokens, intermediate_tensors,
inputs_embeds):
hidden_states = self.model(input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
@@ -3246,8 +3224,8 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
model_instance=self.model,
weight_prefetch_method=self.weight_prefetch_method):
hidden_states = self._generate_dummy_run_hidden_states(
with_prefill, input_ids, positions, attn_metadata,
num_tokens_padded, intermediate_tensors, inputs_embeds)
input_ids, positions, num_tokens_padded,
intermediate_tensors, inputs_embeds)
dummy_compute_logits(hidden_states)
if self.drafter: