[Lint]Style: Convert vllm-ascend/ to ruff format(Batch #10) (#6173)

### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|`vllm_ascend/ops/layer_shard_linear.py`|
|`vllm_ascend/ops/linear.py`|
|`vllm_ascend/ops/linear_op.py`|
|`vllm_ascend/worker/worker.py`|
| ` vllm_ascend/patch/worker/patch_bert.py` |
| ` vllm_ascend/patch/worker/patch_deepseek.py` |
| ` vllm_ascend/patch/worker/patch_distributed.py` |
| ` vllm_ascend/patch/worker/patch_module.py` |
| ` vllm_ascend/patch/worker/patch_multimodal_merge.py` |
| ` vllm_ascend/patch/worker/patch_qwen3_next.py` |
| ` vllm_ascend/patch/worker/patch_qwen3_next_mtp.py` |
| ` vllm_ascend/patch/worker/patch_rejection_sampler.py` |
| ` vllm_ascend/patch/worker/patch_rope.py` |
| ` vllm_ascend/patch/worker/patch_triton.py` |
| ` vllm_ascend/patch/worker/patch_unquantized_gemm.py` |
| ` vllm_ascend/patch/worker/patch_v2_egale.py` |
|` vllm_ascend/worker/npu_input_batch.py`|
|` vllm_ascend/worker/v2/aclgraph_utils.py`|
|` vllm_ascend/worker/v2/attn_utils.py`|
|` vllm_ascend/worker/v2/model_runner.py`|
|` vllm_ascend/worker/v2/sample/gumbel.py`|
|` vllm_ascend/worker/v2/sample/penalties.py`|
|` vllm_ascend/worker/v2/sample/sampler.py`|
|` vllm_ascend/worker/v2/spec_decode/__init__.py`|
|` vllm_ascend/worker/v2/spec_decode/eagle.py`|
|` vllm_ascend/worker/v2/states.py`|
### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.14.0
- vLLM main:
d68209402d

Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: SILONG ZENG <2609716663@qq.com>
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
SILONG ZENG
2026-02-06 15:35:06 +08:00
committed by GitHub
parent 65b7f716e6
commit 19b5d44ea8
33 changed files with 938 additions and 1243 deletions

View File

@@ -22,19 +22,16 @@ from typing import Any
import torch
import torch.nn as nn
from vllm.config import VllmConfig
from vllm.v1.attention.backend import AttentionMetadataBuilder
from vllm.v1.kv_cache_interface import KVCacheConfig
from vllm.v1.worker.gpu.block_table import BlockTables
from vllm.v1.worker.gpu.cudagraph_utils import CudaGraphManager
from vllm.v1.worker.gpu.cudagraph_utils import \
prepare_inputs_to_capture as prepare_inputs_to_capture_gpu
from vllm.v1.worker.gpu.cudagraph_utils import prepare_inputs_to_capture as prepare_inputs_to_capture_gpu
from vllm.v1.worker.gpu.input_batch import InputBuffers
from vllm.v1.attention.backend import AttentionMetadataBuilder
from vllm_ascend.worker.v2.utils import torch_cuda_wrapper
class AclGraphManager(CudaGraphManager):
"""ACL Graph Manager for Ascend NPUs."""
@@ -51,7 +48,7 @@ class AclGraphManager(CudaGraphManager):
attn_metadata_builders: list[AttentionMetadataBuilder],
kv_cache_config: KVCacheConfig,
) -> None:
with (torch_cuda_wrapper(), prepare_capture_inputs_wrapper()):
with torch_cuda_wrapper(), prepare_capture_inputs_wrapper():
super().capture_graph(
num_tokens,
model,

View File

@@ -18,19 +18,17 @@
#
from collections.abc import Sequence
from typing import Any, Tuple
from typing import Any
import numpy as np
import torch
from vllm.config import VllmConfig
from vllm.v1.kv_cache_interface import EncoderOnlyAttentionSpec, KVCacheConfig
from vllm.v1.attention.backend import AttentionMetadataBuilder
from vllm.v1.kv_cache_interface import EncoderOnlyAttentionSpec, KVCacheConfig
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
from vllm_ascend.attention.attention_v1 import AscendAttentionState
from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
AscendPrefillContextParallelMetadata)
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata, AscendPrefillContextParallelMetadata
_ATTENTION_MASK_BUILDER = None
@@ -59,8 +57,7 @@ def build_attn_metadata(
attn_state: Any | None = None,
graph_pad_size: int = -1,
num_input_tokens: int = 0,
prefill_context_parallel_metadata: AscendPrefillContextParallelMetadata
| None = None,
prefill_context_parallel_metadata: AscendPrefillContextParallelMetadata | None = None,
) -> dict[str, Any]:
"""Build attention metadata for Ascend NPUs."""
# TODO(Ronald1995): optimize AscendCommonAttentionMetadata.
@@ -92,7 +89,8 @@ def build_attn_metadata(
graph_pad_size=graph_pad_size,
num_input_tokens=num_input_tokens,
prefill_context_parallel_metadata=prefill_context_parallel_metadata,
max_seq_len=max_seq_len)
max_seq_len=max_seq_len,
)
attn_metadata_builder = attn_metadata_builders[i]
metadata = attn_metadata_builder.build(
@@ -114,8 +112,8 @@ def build_attn_state(
"""Build attention state for npu's attention backend."""
if vllm_config.model_config.runner_type == "pooling":
if isinstance(
vllm_config.kv_cache_config.kv_cache_groups[0].kv_cache_spec,
EncoderOnlyAttentionSpec,
vllm_config.kv_cache_config.kv_cache_groups[0].kv_cache_spec,
EncoderOnlyAttentionSpec,
):
attn_state = AscendAttentionState.PrefillNoCache
else:
@@ -126,16 +124,14 @@ def build_attn_state(
# but only one token is not hit in cache.
elif np.all(num_scheduled_tokens == 1):
attn_state = AscendAttentionState.DecodeOnly
if (vllm_config.speculative_config
and vllm_config.speculative_config.method == 'mtp'):
if vllm_config.speculative_config and vllm_config.speculative_config.method == "mtp":
# SpecDecoding now supports seq_len=1 and seq_len=2
# In Prefilling Decoding Disaggregation scenario, SpecDecoding
# need to supports seq_len=1
attn_state = AscendAttentionState.SpecDecoding
# Speculative decoding.
elif np.all(num_valid_tokens == 1):
if (vllm_config.speculative_config
and vllm_config.speculative_config.method == 'mtp'):
if vllm_config.speculative_config and vllm_config.speculative_config.method == "mtp":
attn_state = AscendAttentionState.SpecDecoding
else:
attn_state = AscendAttentionState.ChunkedPrefill

View File

@@ -22,15 +22,16 @@ import torch
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.worker.gpu.input_batch import (InputBatch,
combine_sampled_and_draft_tokens,
prepare_pos_seq_lens,
prepare_prefill_inputs)
from vllm.v1.worker.gpu.input_batch import (
InputBatch,
combine_sampled_and_draft_tokens,
prepare_pos_seq_lens,
prepare_prefill_inputs,
)
from vllm.v1.worker.gpu.model_runner import GPUModelRunner
from vllm_ascend.worker.v2.aclgraph_utils import AclGraphManager
from vllm_ascend.worker.v2.attn_utils import (build_attn_metadata,
build_attn_state)
from vllm_ascend.worker.v2.attn_utils import build_attn_metadata, build_attn_state
from vllm_ascend.worker.v2.input_batch import AscendInputBuffers
from vllm_ascend.worker.v2.sample.sampler import AscendSampler
from vllm_ascend.worker.v2.spec_decode import init_speculator
@@ -45,7 +46,7 @@ class NPUModelRunner(GPUModelRunner):
"""Model runner for Ascend NPUs."""
def __init__(self, vllm_config: VllmConfig, device: torch.device):
with (torch_cuda_wrapper(), uva_wrapper()):
with torch_cuda_wrapper(), uva_wrapper():
super().__init__(vllm_config, device)
# because we will override these attribute, delete these attribute to
@@ -94,7 +95,8 @@ class NPUModelRunner(GPUModelRunner):
# we need to adjust triton operators in sampler,
# so reinitialize sampler here.
self.sampler: AscendSampler = AscendSampler(
logprobs_mode=self.model_config.logprobs_mode, )
logprobs_mode=self.model_config.logprobs_mode,
)
# we need to copy num_computed_tokens back to cpu to help
# update actual seq_lens_cpu. gpu attention backend doesn't need these
@@ -131,16 +133,12 @@ class NPUModelRunner(GPUModelRunner):
self._update_seq_lens_cpu(scheduler_output, req_ids)
num_scheduled_tokens = np.array(
[scheduler_output.num_scheduled_tokens[i] for i in req_ids],
dtype=np.int32)
num_scheduled_tokens = np.array([scheduler_output.num_scheduled_tokens[i] for i in req_ids], dtype=np.int32)
num_valid_tokens = num_scheduled_tokens
if scheduler_output.scheduled_spec_decode_tokens:
num_valid_tokens = np.array(
[
num_tokens - len(
scheduler_output.scheduled_spec_decode_tokens.get(
i, []))
num_tokens - len(scheduler_output.scheduled_spec_decode_tokens.get(i, []))
for num_tokens, i in zip(num_scheduled_tokens, req_ids)
],
dtype=np.int32,
@@ -153,9 +151,7 @@ class NPUModelRunner(GPUModelRunner):
num_valid_tokens,
)
idx_mapping_list = [
self.req_states.req_id_to_index[req_id] for req_id in req_ids
]
idx_mapping_list = [self.req_states.req_id_to_index[req_id] for req_id in req_ids]
idx_mapping = self.input_buffers.idx_mapping
idx_mapping.np[:num_reqs] = idx_mapping_list
idx_mapping_np = idx_mapping.np[:num_reqs]
@@ -167,16 +163,11 @@ class NPUModelRunner(GPUModelRunner):
# No draft token scheduled (common case).
total_num_draft_tokens = 0
total_num_logits = num_reqs
cu_num_logits = torch.arange(num_reqs + 1,
device=self.device,
dtype=torch.int32)
cu_num_logits = torch.arange(num_reqs + 1, device=self.device, dtype=torch.int32)
else:
draft_tokens = scheduler_output.scheduled_spec_decode_tokens
num_draft_tokens = np.array(
[
len(draft_tokens[req_id]) if req_id in draft_tokens else 0
for req_id in req_ids
],
[len(draft_tokens[req_id]) if req_id in draft_tokens else 0 for req_id in req_ids],
dtype=np.int32,
)
total_num_draft_tokens = int(num_draft_tokens.sum())
@@ -184,10 +175,9 @@ class NPUModelRunner(GPUModelRunner):
np.cumsum(
num_draft_tokens + 1,
out=self.input_buffers.cu_num_logits.np[1:num_reqs + 1],
out=self.input_buffers.cu_num_logits.np[1 : num_reqs + 1],
)
cu_num_logits = self.input_buffers.cu_num_logits.copy_to_gpu(
num_reqs + 1)
cu_num_logits = self.input_buffers.cu_num_logits.copy_to_gpu(num_reqs + 1)
# Block tables: num_kv_cache_groups x [num_reqs, max_num_blocks]
block_tables = self.block_tables.gather_block_tables(idx_mapping_npu)
@@ -195,20 +185,15 @@ class NPUModelRunner(GPUModelRunner):
# Get query_start_loc.
np.cumsum(
num_scheduled_tokens,
out=self.input_buffers.query_start_loc.np[1:num_reqs + 1],
out=self.input_buffers.query_start_loc.np[1 : num_reqs + 1],
)
# Pad for full CUDA graph mode.
# Some attention backends like FA3 require query_start_loc to be non-decreasing.
self.input_buffers.query_start_loc.np[num_reqs + 1:] = num_tokens
self.input_buffers.query_start_loc.np[num_reqs + 1 :] = num_tokens
self.input_buffers.query_start_loc.copy_to_gpu()
query_start_loc_gpu = self.input_buffers.query_start_loc.gpu[:
num_reqs +
1]
query_start_loc_cpu = self.input_buffers.query_start_loc.cpu[:
num_reqs +
1]
query_start_loc_np = self.input_buffers.query_start_loc.np[:num_reqs +
1]
query_start_loc_gpu = self.input_buffers.query_start_loc.gpu[: num_reqs + 1]
query_start_loc_cpu = self.input_buffers.query_start_loc.cpu[: num_reqs + 1]
query_start_loc_np = self.input_buffers.query_start_loc.np[: num_reqs + 1]
# Get prefill tokens.
prepare_prefill_inputs(
@@ -249,7 +234,8 @@ class NPUModelRunner(GPUModelRunner):
# Compute slot mappings: [num_kv_cache_groups, num_tokens]
slot_mappings = self.block_tables.compute_slot_mappings(
query_start_loc_gpu, self.input_buffers.positions[:num_tokens])
query_start_loc_gpu, self.input_buffers.positions[:num_tokens]
)
# Layer name -> attention metadata.
# TODO(Ronald1995): try to add a new method `build_attn_metadata` in
@@ -263,8 +249,7 @@ class NPUModelRunner(GPUModelRunner):
query_start_loc_cpu=query_start_loc_cpu,
seq_lens=self.input_buffers.seq_lens,
seq_lens_np=self.input_buffers.seq_lens_np,
num_computed_tokens_cpu=self.req_states.
num_computed_tokens_cpu[idx_mapping_cpu],
num_computed_tokens_cpu=self.req_states.num_computed_tokens_cpu[idx_mapping_cpu],
block_tables=block_tables,
slot_mappings=slot_mappings,
kv_cache_config=self.kv_cache_config,
@@ -335,16 +320,13 @@ class NPUModelRunner(GPUModelRunner):
req_index = self.req_states.req_id_to_index[req_id]
# num_computed_tokens_cpu has reverted by num_rejected_tokens already.
# in super postprocess method.
self.req_states.num_computed_tokens_cpu[
req_index] = self.num_computed_tokens_cpu[req_index]
self.req_states.num_computed_tokens_cpu[req_index] = self.num_computed_tokens_cpu[req_index]
# update seq_lens_cpu
for i, req_id in enumerate(req_ids):
req_index = self.req_states.req_id_to_index[req_id]
num_computed_tokens = self.req_states.num_computed_tokens_cpu[
req_index]
self.input_buffers.seq_lens_cpu[
i] = num_computed_tokens + num_scheduled_tokens[req_id]
num_computed_tokens = self.req_states.num_computed_tokens_cpu[req_index]
self.input_buffers.seq_lens_cpu[i] = num_computed_tokens + num_scheduled_tokens[req_id]
def eplb_warmup(self):
# TODO(Ronald1995): just define the method in case calling error in

View File

@@ -76,8 +76,7 @@ def _gumbel_sample_kernel(
idx = tl.argmax(logits, axis=0)
token_id = block_idx * BLOCK_SIZE + idx
value = tl.max(logits, axis=0)
tl.store(local_argmax_ptr + req_idx * local_argmax_stride + block_idx,
token_id)
tl.store(local_argmax_ptr + req_idx * local_argmax_stride + block_idx, token_id)
tl.store(local_max_ptr + req_idx * local_max_stride + block_idx, value)

View File

@@ -68,8 +68,7 @@ def _penalties_and_temperature_kernel(
if use_penalty:
req_state_idx = tl.load(idx_mapping_ptr + batch_idx)
output_bin_counts = tl.load(
output_bin_counts_ptr + req_state_idx * output_bin_counts_stride +
block,
output_bin_counts_ptr + req_state_idx * output_bin_counts_stride + block,
mask=mask,
)
# to use vector core, if use > 0 will use scalar to slow down performance
@@ -77,11 +76,9 @@ def _penalties_and_temperature_kernel(
# Apply repetition penalties.
if use_rep_penalty:
packed_block = block_idx * BLOCK_SIZE // 32 + tl.arange(
0, BLOCK_SIZE // 32)
packed_block = block_idx * BLOCK_SIZE // 32 + tl.arange(0, BLOCK_SIZE // 32)
packed_mask = tl.load(
prompt_bin_mask_ptr + req_state_idx * prompt_bin_mask_stride +
packed_block,
prompt_bin_mask_ptr + req_state_idx * prompt_bin_mask_stride + packed_block,
mask=packed_block < tl.cdiv(vocab_size, 32),
)
# the compiler itself does not optimize right-shift operations, so we change the same func
@@ -97,8 +94,7 @@ def _penalties_and_temperature_kernel(
prompt_bin_mask = prompt_bin_mask.reshape(BLOCK_SIZE)
# If token appears in prompt or output, apply, otherwise use 1.0 for no-op.
scale = tl.where(prompt_bin_mask | output_bin_mask, rep_penalty,
1.0)
scale = tl.where(prompt_bin_mask | output_bin_mask, rep_penalty, 1.0)
# If logits are positive, divide by penalty, otherwise multiply by penalty.
logits *= tl.where(logits > 0, 1.0 / scale, scale)

View File

@@ -16,18 +16,16 @@
#
import torch
from vllm.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p
from vllm.v1.sample.metadata import SamplingMetadata
from vllm.v1.sample.ops.topk_topp_sampler import apply_top_k_top_p
from vllm.v1.worker.gpu.sample.min_p import apply_min_p
from vllm.v1.worker.gpu.sample.sampler import Sampler
from vllm_ascend.worker.v2.sample.gumbel import gumbel_sample
from vllm_ascend.worker.v2.sample.penalties import \
apply_penalties_and_temperature
from vllm_ascend.worker.v2.sample.penalties import apply_penalties_and_temperature
class AscendSampler(Sampler):
def sample(
self,
logits: torch.Tensor,
@@ -45,8 +43,7 @@ class AscendSampler(Sampler):
if sampling_metadata.min_p is not None:
apply_min_p(logits, sampling_metadata.min_p)
# Apply top_k and/or top_p. This might return a new tensor.
logits = apply_top_k_top_p(logits, sampling_metadata.top_k,
sampling_metadata.top_p)
logits = apply_top_k_top_p(logits, sampling_metadata.top_k, sampling_metadata.top_p)
sampled = gumbel_sample(
logits,

View File

@@ -30,9 +30,7 @@ def init_speculator(
speculative_config = vllm_config.speculative_config
assert speculative_config is not None
if speculative_config.use_eagle():
from vllm_ascend.worker.v2.spec_decode.eagle import \
AscendEagleSpeculator
from vllm_ascend.worker.v2.spec_decode.eagle import AscendEagleSpeculator
return AscendEagleSpeculator(vllm_config, device)
raise NotImplementedError(
f"{speculative_config.method} is not supported yet.")
raise NotImplementedError(f"{speculative_config.method} is not supported yet.")

View File

@@ -30,7 +30,6 @@ from vllm_ascend.worker.v2.attn_utils import build_attn_metadata
class AscendEagleSpeculator(EagleSpeculator):
def __init__(self, vllm_config: VllmConfig, device: torch.device):
"""Override GPU EagleSpeculator.__init__ for Ascend NPUs.
attnention metadata building in Ascend backend needs more information,

View File

@@ -63,8 +63,8 @@ class AscendRequestState(RequestState):
# NOTE(Ronald1995): Ascend NPUs do not support UVA yet,
# so we use CpuGpuBuffer to allocate prefill_token_ids buffer.
self.prefill_token_ids: CpuGpuBuffer = self._make_buffer( # type: ignore
(self.max_num_reqs, self.max_model_len),
dtype=torch.int32)
(self.max_num_reqs, self.max_model_len), dtype=torch.int32
)
def add_request(
self,
@@ -75,7 +75,6 @@ class AscendRequestState(RequestState):
sampling_params,
lora_request,
):
super().add_request(
req_id,
prompt_len,
@@ -93,7 +92,6 @@ def uva_wrapper():
"""Context manager to disable UVA for Ascend NPUs."""
class UvaBufferWrapper:
def __init__(self, *args, **kwargs):
pass