### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|`vllm_ascend/ops/layer_shard_linear.py`|
|`vllm_ascend/ops/linear.py`|
|`vllm_ascend/ops/linear_op.py`|
|`vllm_ascend/worker/worker.py`|
| ` vllm_ascend/patch/worker/patch_bert.py` |
| ` vllm_ascend/patch/worker/patch_deepseek.py` |
| ` vllm_ascend/patch/worker/patch_distributed.py` |
| ` vllm_ascend/patch/worker/patch_module.py` |
| ` vllm_ascend/patch/worker/patch_multimodal_merge.py` |
| ` vllm_ascend/patch/worker/patch_qwen3_next.py` |
| ` vllm_ascend/patch/worker/patch_qwen3_next_mtp.py` |
| ` vllm_ascend/patch/worker/patch_rejection_sampler.py` |
| ` vllm_ascend/patch/worker/patch_rope.py` |
| ` vllm_ascend/patch/worker/patch_triton.py` |
| ` vllm_ascend/patch/worker/patch_unquantized_gemm.py` |
| ` vllm_ascend/patch/worker/patch_v2_egale.py` |
|` vllm_ascend/worker/npu_input_batch.py`|
|` vllm_ascend/worker/v2/aclgraph_utils.py`|
|` vllm_ascend/worker/v2/attn_utils.py`|
|` vllm_ascend/worker/v2/model_runner.py`|
|` vllm_ascend/worker/v2/sample/gumbel.py`|
|` vllm_ascend/worker/v2/sample/penalties.py`|
|` vllm_ascend/worker/v2/sample/sampler.py`|
|` vllm_ascend/worker/v2/spec_decode/__init__.py`|
|` vllm_ascend/worker/v2/spec_decode/eagle.py`|
|` vllm_ascend/worker/v2/states.py`|
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.14.0
- vLLM main:
d68209402d
Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: SILONG ZENG <2609716663@qq.com>
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -18,19 +18,17 @@
|
||||
#
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, Tuple
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from vllm.config import VllmConfig
|
||||
from vllm.v1.kv_cache_interface import EncoderOnlyAttentionSpec, KVCacheConfig
|
||||
from vllm.v1.attention.backend import AttentionMetadataBuilder
|
||||
from vllm.v1.kv_cache_interface import EncoderOnlyAttentionSpec, KVCacheConfig
|
||||
|
||||
from vllm_ascend.attention.attention_mask import AttentionMaskBuilder
|
||||
from vllm_ascend.attention.attention_v1 import AscendAttentionState
|
||||
from vllm_ascend.attention.utils import (AscendCommonAttentionMetadata,
|
||||
AscendPrefillContextParallelMetadata)
|
||||
|
||||
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata, AscendPrefillContextParallelMetadata
|
||||
|
||||
_ATTENTION_MASK_BUILDER = None
|
||||
|
||||
@@ -59,8 +57,7 @@ def build_attn_metadata(
|
||||
attn_state: Any | None = None,
|
||||
graph_pad_size: int = -1,
|
||||
num_input_tokens: int = 0,
|
||||
prefill_context_parallel_metadata: AscendPrefillContextParallelMetadata
|
||||
| None = None,
|
||||
prefill_context_parallel_metadata: AscendPrefillContextParallelMetadata | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Build attention metadata for Ascend NPUs."""
|
||||
# TODO(Ronald1995): optimize AscendCommonAttentionMetadata.
|
||||
@@ -92,7 +89,8 @@ def build_attn_metadata(
|
||||
graph_pad_size=graph_pad_size,
|
||||
num_input_tokens=num_input_tokens,
|
||||
prefill_context_parallel_metadata=prefill_context_parallel_metadata,
|
||||
max_seq_len=max_seq_len)
|
||||
max_seq_len=max_seq_len,
|
||||
)
|
||||
|
||||
attn_metadata_builder = attn_metadata_builders[i]
|
||||
metadata = attn_metadata_builder.build(
|
||||
@@ -114,8 +112,8 @@ def build_attn_state(
|
||||
"""Build attention state for npu's attention backend."""
|
||||
if vllm_config.model_config.runner_type == "pooling":
|
||||
if isinstance(
|
||||
vllm_config.kv_cache_config.kv_cache_groups[0].kv_cache_spec,
|
||||
EncoderOnlyAttentionSpec,
|
||||
vllm_config.kv_cache_config.kv_cache_groups[0].kv_cache_spec,
|
||||
EncoderOnlyAttentionSpec,
|
||||
):
|
||||
attn_state = AscendAttentionState.PrefillNoCache
|
||||
else:
|
||||
@@ -126,16 +124,14 @@ def build_attn_state(
|
||||
# but only one token is not hit in cache.
|
||||
elif np.all(num_scheduled_tokens == 1):
|
||||
attn_state = AscendAttentionState.DecodeOnly
|
||||
if (vllm_config.speculative_config
|
||||
and vllm_config.speculative_config.method == 'mtp'):
|
||||
if vllm_config.speculative_config and vllm_config.speculative_config.method == "mtp":
|
||||
# SpecDecoding now supports seq_len=1 and seq_len=2
|
||||
# In Prefilling Decoding Disaggregation scenario, SpecDecoding
|
||||
# need to supports seq_len=1
|
||||
attn_state = AscendAttentionState.SpecDecoding
|
||||
# Speculative decoding.
|
||||
elif np.all(num_valid_tokens == 1):
|
||||
if (vllm_config.speculative_config
|
||||
and vllm_config.speculative_config.method == 'mtp'):
|
||||
if vllm_config.speculative_config and vllm_config.speculative_config.method == "mtp":
|
||||
attn_state = AscendAttentionState.SpecDecoding
|
||||
else:
|
||||
attn_state = AscendAttentionState.ChunkedPrefill
|
||||
|
||||
Reference in New Issue
Block a user