cleanup useless torchair logic (#4856)

This PR clean up useless torchair logic in model runner. The moge doc is
only for torchair, it can be removed as well.

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
This commit is contained in:
wangxiyuan
2025-12-11 11:21:13 +08:00
committed by GitHub
parent c12eb22cbe
commit bb76f7962c
7 changed files with 22 additions and 307 deletions

View File

@@ -289,12 +289,3 @@ def get_ascend_config():
"Ascend config is not initialized. Please call init_ascend_config first."
)
return _ASCEND_CONFIG
def check_ascend_config(vllm_config, enforce_eager):
ascend_config = get_ascend_config()
if ascend_config.ascend_compilation_config.enable_quantization_fusion:
logger.info(
"Quantization fusion enabled! op fusion on quantization are expected. "
)

View File

@@ -26,7 +26,7 @@ from vllm.platforms import Platform, PlatformEnum
# todo: please remove it when solve cuda hard code in vllm
os.environ["VLLM_DISABLE_SHARED_EXPERTS_STREAM"] = "1"
from vllm_ascend.ascend_config import check_ascend_config, init_ascend_config
from vllm_ascend.ascend_config import init_ascend_config
from vllm_ascend.utils import refresh_block_size
# isort: off
@@ -181,7 +181,6 @@ class NPUPlatform(Platform):
else:
enforce_eager = getattr(model_config, "enforce_eager", False)
check_ascend_config(vllm_config, enforce_eager)
from vllm.config.compilation import CUDAGraphMode
if enforce_eager:
logger.info("Compilation disabled, using eager mode by default")

View File

@@ -607,7 +607,6 @@ class MtpProposer(Proposer):
attn_mask=self.runner.attn_mask,
spec_attn_mask=self.runner.spec_attn_mask,
attn_state=self.runner.attn_state,
graph_pad_size=self.runner.graph_pad_size,
decode_token_per_req=self.runner.decode_token_per_req,
)
return spec_common_attn_metadata, token_indices
@@ -762,8 +761,7 @@ class MtpProposer(Proposer):
) and aclgraph_runtime_mode == CUDAGraphMode.FULL:
graph_pad_size = num_input_tokens
else:
# Currently, runner.graph_pad_size will always be -1.
graph_pad_size = self.runner.graph_pad_size
graph_pad_size = -1
# If use fullgraph and disable_padded_drafter_batch=True, We need to
# update the graph_pad_size in common_attn_metadata, to tell the
@@ -1135,7 +1133,6 @@ class MtpProposer(Proposer):
attn_mask=self.runner.attn_mask,
spec_attn_mask=self.runner.spec_attn_mask,
attn_state=self.runner.attn_state,
graph_pad_size=self.runner.graph_pad_size,
decode_token_per_req=self.runner.decode_token_per_req,
num_computed_tokens_cpu=common_attn_metadata.
num_computed_tokens_cpu,

View File

@@ -35,7 +35,6 @@ import numpy as np
import numpy.typing as npt
import regex as re
import torch
import torch._dynamo.cache_size
import torch.distributed as dist
import torch.nn as nn
from tqdm import tqdm # type: ignore
@@ -384,8 +383,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
self.is_kv_producer = vllm_config.kv_transfer_config.is_kv_producer
self.is_kv_consumer = vllm_config.kv_transfer_config.is_kv_consumer
self._may_pad_kv_consumer_num_seq()
# Persistent batch.
self.input_ids = torch.zeros(self.max_num_tokens,
dtype=torch.int32,
@@ -656,12 +653,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
return get_spec_decode_method(self.speculative_config.method,
self.vllm_config, self.device, self)
def _may_pad_kv_consumer_num_seq(self):
# For Full Graph + MTP in a PD (Prefill/Decode) disaggregation scenario,
# we may want to pad self.max_num_seqs in kv_consumer nodes to avoid
# exceeding a sequence length limit (16 tokens) in npu_fused_infer_attention_score operation
pass
def _init_mc2_tokens_capacity(self):
# NOTE: To be clear, we need to make sure that during graph capture, the number of
# tokens is less than or equal to mc2_tokens_capacity. According to _set_cudagraph_sizes,
@@ -1661,7 +1652,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
self.with_prefill = with_prefill
self.num_tokens_across_dp = num_tokens_across_dp
self._update_graph_pad_size(with_prefill, maybe_padded_num_tokens)
attn_metadata: dict[str, Any] = {}
# Record the index of requests that should not be sampled,
@@ -1750,10 +1740,10 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
# then the embedding layer is not included in the ACL graph.
input_ids = self.input_ids[:num_input_tokens]
inputs_embeds = None
positions = self.positions[:num_input_tokens]
input_ids, positions = self._update_input_ids_and_positions(
input_ids, positions, num_input_tokens, with_prefill,
maybe_padded_num_tokens)
if self.uses_mrope:
positions = self.mrope_positions[:, :num_input_tokens]
if get_pp_group().is_first_rank:
intermediate_tensors = None
@@ -1943,7 +1933,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
attn_state=self.attn_state,
is_only_prefill=bool(np.all(num_valid_tokens != 1)),
max_query_len=max_num_scheduled_tokens,
graph_pad_size=self.graph_pad_size,
decode_token_per_req=self.decode_token_per_req,
cos=self.cos,
sin=self.sin,
@@ -2058,8 +2047,7 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
device=self.device)
return model_kwargs
def _generate_process_reqs_hidden_states(self, attn_metadata, with_prefill,
maybe_padded_num_tokens,
def _generate_process_reqs_hidden_states(self, maybe_padded_num_tokens,
input_ids, positions,
intermediate_tensors,
inputs_embeds):
@@ -2141,16 +2129,6 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
attn_state = AscendAttentionState.PrefillCacheHit
return attn_state
def _update_graph_pad_size(self, with_prefill, graph_pad_size):
self.graph_pad_size = -1
def _update_input_ids_and_positions(self, input_ids, positions,
num_input_tokens, with_prefill,
maybe_padded_num_tokens):
if self.uses_mrope:
positions = self.mrope_positions[:, :num_input_tokens]
return input_ids, positions
def _calc_spec_decode_metadata(
self,
num_draft_tokens: np.ndarray,
@@ -2529,8 +2507,8 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
self.maybe_setup_kv_connector(scheduler_output)
hidden_states = self._generate_process_reqs_hidden_states(
attn_metadata, self.with_prefill, maybe_padded_num_tokens,
input_ids, positions, intermediate_tensors, inputs_embeds)
maybe_padded_num_tokens, input_ids, positions,
intermediate_tensors, inputs_embeds)
self.maybe_wait_for_kv_save()
finished_sending, finished_recving = self.get_finished_kv_transfer(
@@ -3023,9 +3001,9 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
return attn_metadata
def _generate_dummy_run_hidden_states(self, with_prefill, input_ids,
positions, attn_metadata, num_tokens,
intermediate_tensors, inputs_embeds):
def _generate_dummy_run_hidden_states(self, input_ids, positions,
num_tokens, intermediate_tensors,
inputs_embeds):
hidden_states = self.model(input_ids=input_ids,
positions=positions,
intermediate_tensors=intermediate_tensors,
@@ -3246,8 +3224,8 @@ class NPUModelRunner(LoRAModelRunnerMixin, ECConnectorModelRunnerMixin):
model_instance=self.model,
weight_prefetch_method=self.weight_prefetch_method):
hidden_states = self._generate_dummy_run_hidden_states(
with_prefill, input_ids, positions, attn_metadata,
num_tokens_padded, intermediate_tensors, inputs_embeds)
input_ids, positions, num_tokens_padded,
intermediate_tensors, inputs_embeds)
dummy_compute_logits(hidden_states)
if self.drafter: