[Lint]Style: Convert vllm-ascend/ to ruff format(Batch #8) (#6129)

### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
| vllm_ascend/ops/\_\_init\_\_.py |
| vllm_ascend/ops/activation.py |
| vllm_ascend/ops/flashcomm2_oshard_manager.py |
| vllm_ascend/ops/layernorm.py |
| vllm_ascend/ops/mla.py |
| vllm_ascend/ops/mm_encoder_attention.py |
| vllm_ascend/ops/register_custom_ops.py |
| vllm_ascend/ops/vocab_parallel_embedding.py |
| vllm_ascend/ops/weight_prefetch.py |
| vllm_ascend/spec_decode/\_\_init\_\_.py |
| vllm_ascend/spec_decode/eagle_proposer.py |
| vllm_ascend/spec_decode/interface.py |
| vllm_ascend/spec_decode/mtp_proposer.py |
| vllm_ascend/spec_decode/ngram_proposer.py |
| vllm_ascend/spec_decode/suffix_proposer.py |

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
d68209402d

Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: SILONG ZENG <2609716663@qq.com>
This commit is contained in:
SILONG ZENG
2026-02-06 15:25:08 +08:00
committed by GitHub
parent 99aedaff63
commit 4fb3d5e1b2
17 changed files with 948 additions and 1147 deletions

View File

@@ -52,18 +52,6 @@ line-length = 120
exclude = [ exclude = [
"tests/**", "tests/**",
# (8)
"vllm_ascend/ops/__init__.py",
"vllm_ascend/ops/activation.py",
"vllm_ascend/ops/flashcomm2_oshard_manager.py",
"vllm_ascend/ops/layernorm.py",
"vllm_ascend/ops/mla.py",
"vllm_ascend/ops/mm_encoder_attention.py",
"vllm_ascend/ops/register_custom_ops.py",
"vllm_ascend/ops/vocab_parallel_embedding.py",
"vllm_ascend/ops/weight_prefetch.py",
"vllm_ascend/spec_decode/**",
# (10) # (10)
"vllm_ascend/ops/*linear*.py", "vllm_ascend/ops/*linear*.py",
"vllm_ascend/worker/worker.py", "vllm_ascend/worker/worker.py",

View File

@@ -27,8 +27,7 @@ if HAS_TRITON:
import vllm_ascend.ops.vocab_parallel_embedding # noqa import vllm_ascend.ops.vocab_parallel_embedding # noqa
from vllm_ascend.ops.activation import AscendQuickGELU, AscendSiluAndMul from vllm_ascend.ops.activation import AscendQuickGELU, AscendSiluAndMul
from vllm_ascend.ops.rotary_embedding import ( from vllm_ascend.ops.rotary_embedding import AscendDeepseekScalingRotaryEmbedding, AscendRotaryEmbedding
AscendDeepseekScalingRotaryEmbedding, AscendRotaryEmbedding)
class dummyFusionOp: class dummyFusionOp:
@@ -40,23 +39,13 @@ class dummyFusionOp:
def register_dummy_fusion_op() -> None: def register_dummy_fusion_op() -> None:
torch.ops._C_ascend.rms_norm = dummyFusionOp(name="rms_norm") torch.ops._C_ascend.rms_norm = dummyFusionOp(name="rms_norm")
torch.ops._C_ascend.fused_add_rms_norm = dummyFusionOp( torch.ops._C_ascend.fused_add_rms_norm = dummyFusionOp(name="fused_add_rms_norm")
name="fused_add_rms_norm") torch.ops._C_ascend.static_scaled_fp8_quant = dummyFusionOp(name="static_scaled_fp8_quant")
torch.ops._C_ascend.static_scaled_fp8_quant = dummyFusionOp( torch.ops._C_ascend.dynamic_scaled_fp8_quant = dummyFusionOp(name="dynamic_scaled_fp8_quant")
name="static_scaled_fp8_quant") torch.ops._C_ascend.dynamic_per_token_scaled_fp8_quant = dummyFusionOp(name="dynamic_per_token_scaled_fp8_quant")
torch.ops._C_ascend.dynamic_scaled_fp8_quant = dummyFusionOp( torch.ops._C_ascend.rms_norm_static_fp8_quant = dummyFusionOp(name="rms_norm_static_fp8_quant")
name="dynamic_scaled_fp8_quant") torch.ops._C_ascend.fused_add_rms_norm_static_fp8_quant = dummyFusionOp(name="fused_add_rms_norm_static_fp8_quant")
torch.ops._C_ascend.dynamic_per_token_scaled_fp8_quant = dummyFusionOp( torch.ops._C_ascend.rms_norm_dynamic_per_token_quant = dummyFusionOp(name="rms_norm_dynamic_per_token_quant")
name="dynamic_per_token_scaled_fp8_quant")
torch.ops._C_ascend.rms_norm_static_fp8_quant = dummyFusionOp(
name="rms_norm_static_fp8_quant")
torch.ops._C_ascend.fused_add_rms_norm_static_fp8_quant = dummyFusionOp(
name="fused_add_rms_norm_static_fp8_quant")
torch.ops._C_ascend.rms_norm_dynamic_per_token_quant = dummyFusionOp(
name="rms_norm_dynamic_per_token_quant")
__all__ = [ __all__ = ["AscendQuickGELU", "AscendSiluAndMul", "AscendRotaryEmbedding", "AscendDeepseekScalingRotaryEmbedding"]
"AscendQuickGELU", "AscendSiluAndMul", "AscendRotaryEmbedding",
"AscendDeepseekScalingRotaryEmbedding"
]

View File

@@ -17,10 +17,11 @@
import torch import torch
from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul
from vllm_ascend.utils import get_weight_prefetch_method from vllm_ascend.utils import get_weight_prefetch_method
class AscendQuickGELU(QuickGELU):
class AscendQuickGELU(QuickGELU):
def forward_oot(self, x: torch.tensor) -> torch.Tensor: def forward_oot(self, x: torch.tensor) -> torch.Tensor:
import torch_npu import torch_npu
@@ -29,7 +30,6 @@ class AscendQuickGELU(QuickGELU):
class AscendSiluAndMul(SiluAndMul): class AscendSiluAndMul(SiluAndMul):
def forward_oot(self, x: torch.Tensor) -> torch.Tensor: def forward_oot(self, x: torch.Tensor) -> torch.Tensor:
import torch_npu import torch_npu

View File

@@ -1,11 +1,14 @@
from typing import Any, Dict, Optional from typing import Any
from vllm.model_executor.models.utils import extract_layer_index from vllm.model_executor.models.utils import extract_layer_index
from vllm_ascend.distributed.parallel_state import get_shard_weight_group from vllm_ascend.distributed.parallel_state import get_shard_weight_group
from vllm_ascend.ops.layer_shard_linear import ( from vllm_ascend.ops.layer_shard_linear import (
is_hidden_layer, post_process_after_loading_for_shard_weight_series, is_hidden_layer,
reach_layer_for_shard_weight_series, register_layer_to_shard_weight_series) post_process_after_loading_for_shard_weight_series,
reach_layer_for_shard_weight_series,
register_layer_to_shard_weight_series,
)
from vllm_ascend.utils import flashcomm2_enable, o_shard_enable from vllm_ascend.utils import flashcomm2_enable, o_shard_enable
@@ -26,7 +29,7 @@ class Flashcomm2OShardManager:
""" """
def __init__(self): def __init__(self):
self._shard_layers: Dict[int, Any] = {} self._shard_layers: dict[int, Any] = {}
def flashcomm2_oshard_enable(self): def flashcomm2_oshard_enable(self):
return flashcomm2_enable() and o_shard_enable() return flashcomm2_enable() and o_shard_enable()
@@ -52,12 +55,10 @@ class Flashcomm2OShardManager:
self._shard_layers[layer_idx] = layer self._shard_layers[layer_idx] = layer
register_layer_to_shard_weight_series( register_layer_to_shard_weight_series(
series_name="o_proj", series_name="o_proj", group=get_shard_weight_group(), layer=layer, prefetch_step=prefetch_step
group=get_shard_weight_group(), )
layer=layer,
prefetch_step=prefetch_step)
def get_layer(self, layer_idx: int) -> Optional[Any]: def get_layer(self, layer_idx: int) -> Any | None:
"""Safely retrieves a registered layer by its index. """Safely retrieves a registered layer by its index.
Args: Args:

View File

@@ -15,56 +15,53 @@
# This file is a part of the vllm-ascend project. # This file is a part of the vllm-ascend project.
# #
from typing import Optional, Tuple, Union
import torch import torch
from torch import nn from torch import nn
from vllm.config import get_current_vllm_config from vllm.config import get_current_vllm_config
from vllm.model_executor.layers.layernorm import GemmaRMSNorm, RMSNorm, RMSNormGated from vllm.model_executor.layers.layernorm import GemmaRMSNorm, RMSNorm, RMSNormGated
from vllm_ascend.ops.triton.layernorm_gated import layer_norm_fwd_npu
from vllm_ascend.utils import enable_custom_op from vllm_ascend.ops.triton.layernorm_gated import layer_norm_fwd_npu
from vllm_ascend.utils import get_weight_prefetch_method from vllm_ascend.utils import enable_custom_op, get_weight_prefetch_method
class AscendRMSNorm(RMSNorm): class AscendRMSNorm(RMSNorm):
def __init__( def __init__(
self, self,
hidden_size: int, hidden_size: int,
eps: float = 1e-6, eps: float = 1e-6,
var_hidden_size: Optional[int] = None, var_hidden_size: int | None = None,
has_weight: bool = True, has_weight: bool = True,
dtype: Optional[torch.dtype] = None, dtype: torch.dtype | None = None,
) -> None: ) -> None:
super().__init__(hidden_size, eps, var_hidden_size, has_weight, dtype) super().__init__(hidden_size, eps, var_hidden_size, has_weight, dtype)
vllm_config = get_current_vllm_config() vllm_config = get_current_vllm_config()
self.bias = None self.bias = None
# quantization with anti_method m4 will generate none-zero norm bias # quantization with anti_method m4 will generate none-zero norm bias
if vllm_config.quant_config is not None and \ if vllm_config.quant_config is not None and any(
any("norm.bias" in name for name in vllm_config.quant_config.quant_description.keys()): "norm.bias" in name for name in vllm_config.quant_config.quant_description
self.bias = torch.nn.Parameter(torch.zeros(hidden_size), ):
requires_grad=False) self.bias = torch.nn.Parameter(torch.zeros(hidden_size), requires_grad=False)
def forward_oot( def forward_oot(
self, self,
x: torch.Tensor, x: torch.Tensor,
residual: Optional[torch.Tensor] = None, residual: torch.Tensor | None = None,
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
import torch_npu import torch_npu
if residual is not None: if residual is not None:
if enable_custom_op(): if enable_custom_op():
x, _, residual = torch.ops._C_ascend.npu_add_rms_norm_bias( x, _, residual = torch.ops._C_ascend.npu_add_rms_norm_bias(
x, residual, self.weight, self.bias, self.variance_epsilon) x, residual, self.weight, self.bias, self.variance_epsilon
)
else: else:
x, _, residual = torch_npu.npu_add_rms_norm( x, _, residual = torch_npu.npu_add_rms_norm(x, residual, self.weight, self.variance_epsilon)
x, residual, self.weight, self.variance_epsilon)
if self.bias is not None: if self.bias is not None:
x.add_(self.bias) x.add_(self.bias)
return x, residual return x, residual
x, residual = torch_npu.npu_rms_norm(x, self.weight, x, residual = torch_npu.npu_rms_norm(x, self.weight, self.variance_epsilon)
self.variance_epsilon)
if self.bias is not None: if self.bias is not None:
x.add_(self.bias) x.add_(self.bias)
@@ -75,42 +72,30 @@ class AscendRMSNorm(RMSNorm):
class AscendGemmaRMSNorm(GemmaRMSNorm): class AscendGemmaRMSNorm(GemmaRMSNorm):
def forward_oot( def forward_oot(
self, self,
x: torch.Tensor, x: torch.Tensor,
residual: Optional[torch.Tensor] = None, residual: torch.Tensor | None = None,
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
import torch_npu import torch_npu
from vllm_ascend.utils import AscendDeviceType, get_ascend_device_type
if residual is not None: if residual is not None:
if enable_custom_op(): if enable_custom_op():
x, _, residual = torch.ops._C_ascend.npu_add_rms_norm_bias( x, _, residual = torch.ops._C_ascend.npu_add_rms_norm_bias(
x, residual, 1.0 + self.weight, None, x, residual, 1.0 + self.weight, None, self.variance_epsilon
self.variance_epsilon) )
else: else:
x, _, residual = torch_npu.npu_add_rms_norm( x, _, residual = torch_npu.npu_add_rms_norm(x, residual, 1.0 + self.weight, self.variance_epsilon)
x, residual, 1.0 + self.weight, self.variance_epsilon)
return x, residual return x, residual
x, _ = torch_npu.npu_rms_norm(x, 1.0 + self.weight, x, _ = torch_npu.npu_rms_norm(x, 1.0 + self.weight, self.variance_epsilon)
self.variance_epsilon)
return x return x
class LayerNormFn(torch.autograd.Function): class LayerNormFn(torch.autograd.Function):
@staticmethod @staticmethod
def forward(ctx, def forward(ctx, x, weight, bias, z=None, eps=1e-6, group_size=None, norm_before_gate=True, is_rms_norm=False):
x, """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))"""
weight,
bias,
z=None,
eps=1e-6,
group_size=None,
norm_before_gate=True,
is_rms_norm=False):
"""If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))
"""
x_shape_og = x.shape x_shape_og = x.shape
# reshape input data into 2D tensor # reshape input data into 2D tensor
@@ -143,16 +128,16 @@ class LayerNormFn(torch.autograd.Function):
ctx.is_rms_norm = is_rms_norm ctx.is_rms_norm = is_rms_norm
return y.reshape(x_shape_og) return y.reshape(x_shape_og)
class AscendRMSNormGated(RMSNormGated):
class AscendRMSNormGated(RMSNormGated):
def __init__( def __init__(
self, self,
hidden_size, hidden_size,
eps: float = 1e-5, eps: float = 1e-5,
group_size: Optional[int] = None, group_size: int | None = None,
norm_before_gate: bool = False, norm_before_gate: bool = False,
device: Optional[torch.device] = None, device: torch.device | None = None,
dtype: Optional[torch.dtype] = None, dtype: torch.dtype | None = None,
): ):
"""If group_size is not None, we do GroupNorm with each group having group_size elements. """If group_size is not None, we do GroupNorm with each group having group_size elements.
group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group). group_size=None is equivalent to group_size=hidden_size (i.e. there's only 1 group).
@@ -170,7 +155,5 @@ class AscendRMSNormGated(RMSNormGated):
torch.nn.init.ones_(self.weight) torch.nn.init.ones_(self.weight)
def forward_oot(self, x, z=None): def forward_oot(self, x, z=None):
"""If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z)) """If z is not None, we do norm(x) * silu(z) if norm_before_gate, else norm(x * silu(z))"""
""" return LayerNormFn.apply(x, self.weight, self.bias, z, self.eps, self.group_size, self.norm_before_gate, True)
return LayerNormFn.apply(x, self.weight, self.bias, z, self.eps, self.group_size,
self.norm_before_gate, True)

View File

@@ -19,15 +19,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from typing import Optional
import torch import torch
from torch import nn from torch import nn
from vllm.config import CacheConfig, get_current_vllm_config from vllm.config import CacheConfig, get_current_vllm_config
from vllm.distributed import get_tensor_model_parallel_world_size from vllm.distributed import get_tensor_model_parallel_world_size
from vllm.forward_context import ForwardContext, get_forward_context from vllm.forward_context import ForwardContext, get_forward_context
from vllm.model_executor.layers.mla import (MLAModules, from vllm.model_executor.layers.mla import MLAModules, MultiHeadLatentAttentionWrapper
MultiHeadLatentAttentionWrapper)
from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.quantization import QuantizationConfig
from vllm.utils.torch_utils import direct_register_custom_op from vllm.utils.torch_utils import direct_register_custom_op
from vllm.v1.attention.backend import AttentionMetadata # type: ignore from vllm.v1.attention.backend import AttentionMetadata # type: ignore
@@ -36,20 +34,20 @@ from vllm_ascend.ascend_config import get_ascend_config
from vllm_ascend.utils import vllm_version_is from vllm_ascend.utils import vllm_version_is
if vllm_version_is("v0.15.0"): if vllm_version_is("v0.15.0"):
from vllm.attention.layer import MLAAttention # type: ignore from vllm.attention.layer import MLAAttention # type: ignore
else: else:
from vllm.model_executor.layers.attention import MLAAttention from vllm.model_executor.layers.attention import MLAAttention
class IndexerWrapper(nn.Module): class IndexerWrapper(nn.Module):
''' """
A wrapper of Indexer for Deepseek v3.2. A wrapper of Indexer for Deepseek v3.2.
This wrapper is currently used to solve the fp8 hard code issue of vllm's deepseek_v2.py. This wrapper is currently used to solve the fp8 hard code issue of vllm's deepseek_v2.py.
It wraps the original Indexer, inherits its module weights It wraps the original Indexer, inherits its module weights
(including wq_b, wk, weights_proj, k_norm) (including wq_b, wk, weights_proj, k_norm)
while deletes the unused topk_indices_buffer and k_cache to save memory. while deletes the unused topk_indices_buffer and k_cache to save memory.
TODO: Will be removed once original Indexer supports different quantization methods. TODO: Will be removed once original Indexer supports different quantization methods.
''' """
def __init__(self, vllm_indexer: nn.Module) -> None: def __init__(self, vllm_indexer: nn.Module) -> None:
super().__init__() super().__init__()
@@ -71,7 +69,6 @@ class IndexerWrapper(nn.Module):
class AscendMultiHeadLatentAttention(MultiHeadLatentAttentionWrapper): class AscendMultiHeadLatentAttention(MultiHeadLatentAttentionWrapper):
def __init__( def __init__(
self, self,
hidden_size: int, hidden_size: int,
@@ -80,11 +77,11 @@ class AscendMultiHeadLatentAttention(MultiHeadLatentAttentionWrapper):
qk_nope_head_dim: int, qk_nope_head_dim: int,
qk_rope_head_dim: int, qk_rope_head_dim: int,
v_head_dim: int, v_head_dim: int,
q_lora_rank: Optional[int], q_lora_rank: int | None,
kv_lora_rank: int, kv_lora_rank: int,
mla_modules: MLAModules, mla_modules: MLAModules,
cache_config: Optional[CacheConfig] = None, cache_config: CacheConfig | None = None,
quant_config: Optional[QuantizationConfig] = None, quant_config: QuantizationConfig | None = None,
prefix: str = "", prefix: str = "",
) -> None: ) -> None:
nn.Module.__init__(self) nn.Module.__init__(self)
@@ -97,8 +94,7 @@ class AscendMultiHeadLatentAttention(MultiHeadLatentAttentionWrapper):
self.v_head_dim = v_head_dim self.v_head_dim = v_head_dim
self.prefix = prefix self.prefix = prefix
hf_config = get_current_vllm_config().model_config.hf_text_config hf_config = get_current_vllm_config().model_config.hf_text_config
self.enable_shared_expert_dp = get_ascend_config( self.enable_shared_expert_dp = get_ascend_config().enable_shared_expert_dp
).enable_shared_expert_dp
self.tp_size = get_tensor_model_parallel_world_size() self.tp_size = get_tensor_model_parallel_world_size()
self.layers = hf_config.num_hidden_layers self.layers = hf_config.num_hidden_layers
if mla_modules.indexer is not None: if mla_modules.indexer is not None:
@@ -134,6 +130,7 @@ class AscendMultiHeadLatentAttention(MultiHeadLatentAttentionWrapper):
def wrapped_process_weights(act_dtype: torch.dtype): def wrapped_process_weights(act_dtype: torch.dtype):
from vllm_ascend.attention.sfa_v1 import AscendSFAImpl from vllm_ascend.attention.sfa_v1 import AscendSFAImpl
if not isinstance(self.mla_attn.impl, AscendSFAImpl): if not isinstance(self.mla_attn.impl, AscendSFAImpl):
original_process_weights(act_dtype) original_process_weights(act_dtype)
self.mla_attn.impl.process_weights_after_loading(act_dtype) self.mla_attn.impl.process_weights_after_loading(act_dtype)
@@ -146,19 +143,17 @@ class AscendMultiHeadLatentAttention(MultiHeadLatentAttentionWrapper):
compilation_config.static_forward_context[prefix] = self compilation_config.static_forward_context[prefix] = self
def forward( def forward(
self, self,
positions: torch.Tensor, positions: torch.Tensor,
hidden_states: torch.Tensor, hidden_states: torch.Tensor,
kv_cache: Optional[torch.Tensor] = None, kv_cache: torch.Tensor | None = None,
attn_metadata: Optional[AttentionMetadata] = None) -> torch.Tensor: attn_metadata: AttentionMetadata | None = None,
) -> torch.Tensor:
need_gather_q_kv = get_forward_context().sp_enabled need_gather_q_kv = get_forward_context().sp_enabled
output_shape = hidden_states.shape output_shape = hidden_states.shape
# FIXME: This does not seem right, should make sure the buffer is fixed # FIXME: This does not seem right, should make sure the buffer is fixed
output = torch.empty(output_shape, output = torch.empty(output_shape, dtype=hidden_states.dtype, device=hidden_states.device)
dtype=hidden_states.dtype, torch.ops.vllm.mla_forward(hidden_states, need_gather_q_kv, output, self.prefix)
device=hidden_states.device)
torch.ops.vllm.mla_forward(hidden_states, need_gather_q_kv, output,
self.prefix)
output = output.view(-1, output_shape[-1]) output = output.view(-1, output_shape[-1])
return output return output
@@ -176,9 +171,9 @@ def mla_forward(
else: else:
attn_metadata = forward_context.attn_metadata attn_metadata = forward_context.attn_metadata
kv_cache = self.mla_attn.kv_cache[forward_context.virtual_engine] kv_cache = self.mla_attn.kv_cache[forward_context.virtual_engine]
self.mla_attn.impl.forward(self.mla_attn.layer_name, hidden_states, self.mla_attn.impl.forward(
kv_cache, attn_metadata, need_gather_q_kv, self.mla_attn.layer_name, hidden_states, kv_cache, attn_metadata, need_gather_q_kv, output
output) )
return return

View File

@@ -19,18 +19,15 @@ import einops
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
import torch_npu import torch_npu
from vllm.config import MultiModalConfig
from vllm.model_executor.layers.attention.mm_encoder_attention import MMEncoderAttention # type: ignore from vllm.model_executor.layers.attention.mm_encoder_attention import MMEncoderAttention # type: ignore
import vllm_ascend.envs as envs_ascend import vllm_ascend.envs as envs_ascend
MIN_PAD_SIZE = 64 # min_size to pad weight MIN_PAD_SIZE = 64 # min_size to pad weight
MAX_PAD_SIZE = 128 # max_size to pad weight MAX_PAD_SIZE = 128 # max_size to pad weight
class AscendMMEncoderAttention(MMEncoderAttention): class AscendMMEncoderAttention(MMEncoderAttention):
def __init__( def __init__(
self, self,
num_heads: int, num_heads: int,
@@ -82,13 +79,12 @@ class AscendMMEncoderAttention(MMEncoderAttention):
return query, key, value return query, key, value
def forward_oot( def forward_oot(
self, self,
query: torch.Tensor, query: torch.Tensor,
key: torch.Tensor, key: torch.Tensor,
value: torch.Tensor, value: torch.Tensor,
cu_seqlens: torch.Tensor | None = None, cu_seqlens: torch.Tensor | None = None,
max_seqlen: torch.Tensor max_seqlen: torch.Tensor | None = None, # Only used for Flash Attention
| None = None, # Only used for Flash Attention
): ):
bsz, q_len = query.size()[:2] bsz, q_len = query.size()[:2]
kv_len = key.size(1) kv_len = key.size(1)
@@ -97,9 +93,7 @@ class AscendMMEncoderAttention(MMEncoderAttention):
# q, k, v: [b, s, head, head_dim] -> [b * s, head, head_dim] # q, k, v: [b, s, head, head_dim] -> [b * s, head, head_dim]
q, k, v = self.reshape_qkv_to_3d(query, key, value, bsz, q_len, kv_len) q, k, v = self.reshape_qkv_to_3d(query, key, value, bsz, q_len, kv_len)
enable_pad = (envs_ascend.USE_OPTIMIZED_MODEL enable_pad = envs_ascend.USE_OPTIMIZED_MODEL and self.head_size > MIN_PAD_SIZE and self.head_size < MAX_PAD_SIZE
and self.head_size > MIN_PAD_SIZE
and self.head_size < MAX_PAD_SIZE)
if enable_pad: if enable_pad:
origin_shape = q.shape[-1] origin_shape = q.shape[-1]
@@ -114,10 +108,7 @@ class AscendMMEncoderAttention(MMEncoderAttention):
context_layer = torch.empty_like(q) context_layer = torch.empty_like(q)
if cu_seqlens is None: if cu_seqlens is None:
cu_seqlens = torch.arange(0, (bsz + 1) * q_len, cu_seqlens = torch.arange(0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=query.device)
step=q_len,
dtype=torch.int32,
device=query.device)
cu_seqlens = torch.diff(cu_seqlens).to("cpu") cu_seqlens = torch.diff(cu_seqlens).to("cpu")
@@ -137,11 +128,7 @@ class AscendMMEncoderAttention(MMEncoderAttention):
context_layer = context_layer[..., :origin_shape] context_layer = context_layer[..., :origin_shape]
if is_reshaped: if is_reshaped:
context_layer = einops.rearrange(context_layer, context_layer = einops.rearrange(context_layer, "(b s) h d -> b s h d", b=bsz).contiguous()
"(b s) h d -> b s h d",
b=bsz).contiguous()
else: else:
context_layer = einops.rearrange(context_layer, context_layer = einops.rearrange(context_layer, "(b s) h d -> b s (h d)", b=bsz).contiguous()
"(b s) h d -> b s (h d)",
b=bsz).contiguous()
return context_layer return context_layer

View File

@@ -1,24 +1,25 @@
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
import torch_npu import torch_npu
from vllm.distributed import (get_dp_group, get_ep_group, from vllm.distributed import (
get_tensor_model_parallel_rank, get_dp_group,
get_tensor_model_parallel_world_size, get_ep_group,
tensor_model_parallel_all_gather, get_tensor_model_parallel_rank,
tensor_model_parallel_all_reduce, get_tensor_model_parallel_world_size,
tensor_model_parallel_reduce_scatter) tensor_model_parallel_all_gather,
tensor_model_parallel_all_reduce,
tensor_model_parallel_reduce_scatter,
)
from vllm.forward_context import get_forward_context from vllm.forward_context import get_forward_context
from vllm.utils.torch_utils import direct_register_custom_op from vllm.utils.torch_utils import direct_register_custom_op
import vllm_ascend.envs as envs_ascend
from vllm_ascend.ascend_forward_context import MoECommType from vllm_ascend.ascend_forward_context import MoECommType
from vllm_ascend.ops.triton.rope import rope_forward_triton
from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch from vllm_ascend.ops.weight_prefetch import maybe_npu_prefetch
from vllm_ascend.utils import npu_stream_switch, prefetch_stream from vllm_ascend.utils import npu_stream_switch, prefetch_stream
from typing import Optional, Tuple
from vllm_ascend.ops.triton.rope import rope_forward_triton
def _maybe_chunk_residual_impl(x: torch.Tensor,
residual: torch.Tensor) -> torch.Tensor: def _maybe_chunk_residual_impl(x: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
try: try:
forward_context = get_forward_context() forward_context = get_forward_context()
except AssertionError: except AssertionError:
@@ -26,8 +27,7 @@ def _maybe_chunk_residual_impl(x: torch.Tensor,
if x.size(0) != residual.size(0): if x.size(0) != residual.size(0):
sp_enabled = forward_context.sp_enabled sp_enabled = forward_context.sp_enabled
assert sp_enabled is True, ("Currently, this situation only occurs " assert sp_enabled is True, "Currently, this situation only occurs when sp is enabled"
"when sp is enabled")
pad_size = forward_context.pad_size pad_size = forward_context.pad_size
if pad_size > 0: if pad_size > 0:
residual = F.pad(residual, (0, 0, 0, pad_size)) residual = F.pad(residual, (0, 0, 0, pad_size))
@@ -38,10 +38,7 @@ def _maybe_chunk_residual_impl(x: torch.Tensor,
return residual return residual
def _maybe_all_gather_and_maybe_unpad_impl( def _maybe_all_gather_and_maybe_unpad_impl(x: torch.Tensor, label: bool, is_ep_comm: bool = False) -> torch.Tensor:
x: torch.Tensor,
label: bool,
is_ep_comm: bool = False) -> torch.Tensor:
try: try:
forward_context = get_forward_context() forward_context = get_forward_context()
except AssertionError: except AssertionError:
@@ -59,24 +56,20 @@ def _maybe_all_gather_and_maybe_unpad_impl(
x = get_ep_group().all_gather(x, 0) x = get_ep_group().all_gather(x, 0)
# unpad # unpad
num_tokens_across_dp_cpu = dp_metadata.num_tokens_across_dp_cpu num_tokens_across_dp_cpu = dp_metadata.num_tokens_across_dp_cpu
result = torch.empty( result = torch.empty((num_tokens_across_dp_cpu.sum(), *x.shape[1:]), device=x.device, dtype=x.dtype)
(num_tokens_across_dp_cpu.sum(), *x.shape[1:]),
device=x.device,
dtype=x.dtype)
dp_size = get_dp_group().world_size dp_size = get_dp_group().world_size
x = x.view(dp_size, forward_context.padded_length, *x.shape[1:]) x = x.view(dp_size, forward_context.padded_length, *x.shape[1:])
offset = 0 offset = 0
for idx in range(dp_size): for idx in range(dp_size):
num_tokens_dp = num_tokens_across_dp_cpu[idx] num_tokens_dp = num_tokens_across_dp_cpu[idx]
result[offset:offset + num_tokens_dp] = x[idx, :num_tokens_dp] result[offset : offset + num_tokens_dp] = x[idx, :num_tokens_dp]
offset += num_tokens_dp offset += num_tokens_dp
x = result x = result
return x return x
def _maybe_pad_and_reduce_impl(x: torch.Tensor, def _maybe_pad_and_reduce_impl(x: torch.Tensor, is_ep_comm: bool = False) -> torch.Tensor:
is_ep_comm: bool = False) -> torch.Tensor:
try: try:
forward_context = get_forward_context() forward_context = get_forward_context()
except AssertionError: except AssertionError:
@@ -94,63 +87,44 @@ def _maybe_pad_and_reduce_impl(x: torch.Tensor,
else: else:
# padding # padding
dp_size = get_dp_group().world_size dp_size = get_dp_group().world_size
num_tokens_across_dp_cpu = \ num_tokens_across_dp_cpu = get_forward_context().dp_metadata.num_tokens_across_dp_cpu
get_forward_context().dp_metadata.num_tokens_across_dp_cpu padded_x = torch.empty((dp_size, forward_context.padded_length, *x.shape[1:]), device=x.device, dtype=x.dtype)
padded_x = torch.empty(
(dp_size, forward_context.padded_length, *x.shape[1:]),
device=x.device,
dtype=x.dtype)
offset = 0 offset = 0
for idx in range(dp_size): for idx in range(dp_size):
num_tokens_dp = num_tokens_across_dp_cpu[idx] num_tokens_dp = num_tokens_across_dp_cpu[idx]
padded_x[idx, :num_tokens_dp] = x[offset:offset + num_tokens_dp] padded_x[idx, :num_tokens_dp] = x[offset : offset + num_tokens_dp]
offset += num_tokens_dp offset += num_tokens_dp
return get_ep_group().reduce_scatter(padded_x.view(-1, *x.shape[1:]), return get_ep_group().reduce_scatter(padded_x.view(-1, *x.shape[1:]), 0)
0)
def _maybe_all_gather_and_maybe_unpad_fake( def _maybe_all_gather_and_maybe_unpad_fake(x: torch.Tensor, label: bool, is_ep_comm: bool = False) -> torch.Tensor:
x: torch.Tensor,
label: bool,
is_ep_comm: bool = False) -> torch.Tensor:
if get_forward_context().sp_enabled and label: if get_forward_context().sp_enabled and label:
return torch.empty( return torch.empty(
(x.shape[0] * get_tensor_model_parallel_world_size(), (x.shape[0] * get_tensor_model_parallel_world_size(), *x.shape[1:]), device=x.device, dtype=x.dtype
*x.shape[1:]), )
device=x.device,
dtype=x.dtype)
return x return x
def _maybe_pad_and_reduce_fake(x: torch.Tensor, def _maybe_pad_and_reduce_fake(x: torch.Tensor, is_ep_comm: bool = False) -> torch.Tensor:
is_ep_comm: bool = False) -> torch.Tensor:
if get_forward_context().sp_enabled: if get_forward_context().sp_enabled:
return torch.empty( return torch.empty(
(x.shape[0] // get_tensor_model_parallel_world_size(), (x.shape[0] // get_tensor_model_parallel_world_size(), *x.shape[1:]), device=x.device, dtype=x.dtype
*x.shape[1:]), )
device=x.device,
dtype=x.dtype)
return x return x
def _prefetch_preprocess_impl(weight: torch.Tensor, start_flag: torch.Tensor, def _prefetch_preprocess_impl(weight: torch.Tensor, start_flag: torch.Tensor, max_weight_size: int) -> None:
max_weight_size: int) -> None:
calculation_stream = torch_npu.npu.current_stream() calculation_stream = torch_npu.npu.current_stream()
weight_prefetch_stream = prefetch_stream() weight_prefetch_stream = prefetch_stream()
weight_prefetch_stream.wait_stream(calculation_stream) weight_prefetch_stream.wait_stream(calculation_stream)
with npu_stream_switch(weight_prefetch_stream): with npu_stream_switch(weight_prefetch_stream):
maybe_npu_prefetch(inputs=weight, maybe_npu_prefetch(inputs=weight, dependency=start_flag, max_size=max_weight_size)
dependency=start_flag,
max_size=max_weight_size)
def _prefetch_preprocess_impl_fake(weight: torch.Tensor, def _prefetch_preprocess_impl_fake(weight: torch.Tensor, start_flag: torch.Tensor, max_weight_size: int) -> None:
start_flag: torch.Tensor,
max_weight_size: int) -> None:
return return
@@ -164,20 +138,16 @@ def _prefetch_postprocess_impl_fake(stop_flag: torch.Tensor) -> None:
return return
def _maybe_all_reduce_tensor_model_parallel_impl( def _maybe_all_reduce_tensor_model_parallel_impl(final_hidden_states: torch.Tensor) -> torch.Tensor:
final_hidden_states: torch.Tensor) -> torch.Tensor:
forward_context = get_forward_context() forward_context = get_forward_context()
moe_comm_type = forward_context.moe_comm_type moe_comm_type = forward_context.moe_comm_type
if moe_comm_type in { if moe_comm_type in {MoECommType.ALLTOALL, MoECommType.MC2, MoECommType.FUSED_MC2} or forward_context.sp_enabled:
MoECommType.ALLTOALL, MoECommType.MC2, MoECommType.FUSED_MC2
} or forward_context.sp_enabled:
return final_hidden_states return final_hidden_states
else: else:
return tensor_model_parallel_all_reduce(final_hidden_states) return tensor_model_parallel_all_reduce(final_hidden_states)
def _matmul_and_reduce_impl(input_parallel: torch.Tensor, def _matmul_and_reduce_impl(input_parallel: torch.Tensor, layer_name: str) -> torch.Tensor:
layer_name: str) -> torch.Tensor:
forward_context = get_forward_context() forward_context = get_forward_context()
self = forward_context.no_compile_layers[layer_name] self = forward_context.no_compile_layers[layer_name]
assert self.custom_op is not None assert self.custom_op is not None
@@ -187,16 +157,15 @@ def _matmul_and_reduce_impl(input_parallel: torch.Tensor,
return output return output
def _matmul_and_reduce_impl_fake(input_parallel: torch.Tensor, def _matmul_and_reduce_impl_fake(input_parallel: torch.Tensor, layer_name: str) -> torch.Tensor:
layer_name: str) -> torch.Tensor:
forward_context = get_forward_context() forward_context = get_forward_context()
self = forward_context.no_compile_layers[layer_name] self = forward_context.no_compile_layers[layer_name]
num_tokens = input_parallel.size(0) num_tokens = input_parallel.size(0)
if forward_context.sp_enabled: if forward_context.sp_enabled:
num_tokens = num_tokens // self.tp_size num_tokens = num_tokens // self.tp_size
output = torch.empty(size=(num_tokens, self.output_size_per_partition), output = torch.empty(
device=input_parallel.device, size=(num_tokens, self.output_size_per_partition), device=input_parallel.device, dtype=input_parallel.dtype
dtype=input_parallel.dtype) )
return output return output
@@ -207,77 +176,96 @@ def _matmul_and_reduce_impl_fake(input_parallel: torch.Tensor,
# pass input_scale and input_scale_reciprocal at the same time to avoid redundant # pass input_scale and input_scale_reciprocal at the same time to avoid redundant
# reciprocal calculation in fussion pass. We shall remove this once # reciprocal calculation in fussion pass. We shall remove this once
# aclnnAddRmsNormQuantV2 supports div_moe=False. # aclnnAddRmsNormQuantV2 supports div_moe=False.
def _quantize_impl(in_tensor: torch.Tensor, input_scale: torch.Tensor, def _quantize_impl(
input_scale_reciprocal: torch.Tensor, in_tensor: torch.Tensor, input_scale: torch.Tensor, input_scale_reciprocal: torch.Tensor, input_offset: torch.Tensor
input_offset: torch.Tensor) -> torch.Tensor: ) -> torch.Tensor:
return torch_npu.npu_quantize(in_tensor, input_scale_reciprocal, return torch_npu.npu_quantize(in_tensor, input_scale_reciprocal, input_offset, torch.qint8, -1, False)
input_offset, torch.qint8, -1, False)
def _quantize_impl_fake(
in_tensor: torch.Tensor, input_scale: torch.Tensor, input_scale_reciprocal: torch.Tensor, input_offset: torch.Tensor
) -> torch.Tensor:
return torch_npu.npu_quantize(in_tensor, input_scale_reciprocal, input_offset, torch.qint8, -1, False)
def _quantize_impl_fake(in_tensor: torch.Tensor, input_scale: torch.Tensor,
input_scale_reciprocal: torch.Tensor,
input_offset: torch.Tensor) -> torch.Tensor:
return torch_npu.npu_quantize(in_tensor, input_scale_reciprocal,
input_offset, torch.qint8, -1, False)
def _rope_forward_triton_fake( def _rope_forward_triton_fake(
q: torch.Tensor, q: torch.Tensor,
k: torch.Tensor, k: torch.Tensor,
cos: torch.Tensor, cos: torch.Tensor,
sin: torch.Tensor, sin: torch.Tensor,
rope_dim: int = -1, rope_dim: int = -1,
is_neox_style: bool = True is_neox_style: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor]: ) -> tuple[torch.Tensor, torch.Tensor]:
return torch.empty_like(q), torch.empty_like(k) return torch.empty_like(q), torch.empty_like(k)
direct_register_custom_op(op_name="maybe_chunk_residual",
op_func=_maybe_chunk_residual_impl,
fake_impl=lambda x, residual: x,
mutates_args=[],
dispatch_key="PrivateUse1")
direct_register_custom_op(op_name="maybe_all_gather_and_maybe_unpad", direct_register_custom_op(
op_func=_maybe_all_gather_and_maybe_unpad_impl, op_name="maybe_chunk_residual",
fake_impl=_maybe_all_gather_and_maybe_unpad_fake, op_func=_maybe_chunk_residual_impl,
mutates_args=[], fake_impl=lambda x, residual: x,
dispatch_key="PrivateUse1") mutates_args=[],
dispatch_key="PrivateUse1",
)
direct_register_custom_op(op_name="maybe_pad_and_reduce", direct_register_custom_op(
op_func=_maybe_pad_and_reduce_impl, op_name="maybe_all_gather_and_maybe_unpad",
fake_impl=_maybe_pad_and_reduce_fake, op_func=_maybe_all_gather_and_maybe_unpad_impl,
mutates_args=[], fake_impl=_maybe_all_gather_and_maybe_unpad_fake,
dispatch_key="PrivateUse1") mutates_args=[],
dispatch_key="PrivateUse1",
)
direct_register_custom_op(op_name="prefetch_preprocess", direct_register_custom_op(
op_func=_prefetch_preprocess_impl, op_name="prefetch_preprocess",
fake_impl=_prefetch_preprocess_impl_fake, op_func=_prefetch_preprocess_impl,
mutates_args=[], fake_impl=_prefetch_preprocess_impl_fake,
dispatch_key="PrivateUse1") mutates_args=[],
dispatch_key="PrivateUse1",
)
direct_register_custom_op(op_name="prefetch_postprocess", direct_register_custom_op(
op_func=_prefetch_postprocess_impl, op_name="prefetch_preprocess",
fake_impl=_prefetch_postprocess_impl_fake, op_func=_prefetch_preprocess_impl,
mutates_args=[], fake_impl=_prefetch_preprocess_impl_fake,
dispatch_key="PrivateUse1") mutates_args=[],
dispatch_key="PrivateUse1",
)
direct_register_custom_op(op_name="maybe_all_reduce_tensor_model_parallel", direct_register_custom_op(
op_func=_maybe_all_reduce_tensor_model_parallel_impl, op_name="prefetch_postprocess",
fake_impl=lambda x: x, op_func=_prefetch_postprocess_impl,
mutates_args=[], fake_impl=_prefetch_postprocess_impl_fake,
dispatch_key="PrivateUse1") mutates_args=[],
dispatch_key="PrivateUse1",
)
direct_register_custom_op(op_name="matmul_and_reduce", direct_register_custom_op(
op_func=_matmul_and_reduce_impl, op_name="maybe_all_reduce_tensor_model_parallel",
fake_impl=_matmul_and_reduce_impl_fake, op_func=_maybe_all_reduce_tensor_model_parallel_impl,
mutates_args=[], fake_impl=lambda x: x,
dispatch_key="PrivateUse1") mutates_args=[],
dispatch_key="PrivateUse1",
)
direct_register_custom_op(op_name="quantize", direct_register_custom_op(
op_func=_quantize_impl, op_name="matmul_and_reduce",
fake_impl=_quantize_impl_fake, op_func=_matmul_and_reduce_impl,
mutates_args=[], fake_impl=_matmul_and_reduce_impl_fake,
dispatch_key="PrivateUse1") mutates_args=[],
direct_register_custom_op(op_name="rope_forward_triton", dispatch_key="PrivateUse1",
op_func=rope_forward_triton, )
fake_impl=_rope_forward_triton_fake,
mutates_args=[], direct_register_custom_op(
dispatch_key="PrivateUse1") op_name="quantize",
op_func=_quantize_impl,
fake_impl=_quantize_impl_fake,
mutates_args=[],
dispatch_key="PrivateUse1",
)
direct_register_custom_op(
op_name="rope_forward_triton",
op_func=rope_forward_triton,
fake_impl=_rope_forward_triton_fake,
mutates_args=[],
dispatch_key="PrivateUse1",
)

View File

@@ -15,7 +15,6 @@
# limitations under the License. # limitations under the License.
# #
from typing import Optional, Tuple
import torch import torch
from torch import nn from torch import nn
@@ -24,14 +23,20 @@ from vllm.distributed import divide
from vllm.distributed.parallel_state import get_tp_group from vllm.distributed.parallel_state import get_tp_group
from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.logits_processor import LogitsProcessor
from vllm.model_executor.layers.quantization.base_config import ( from vllm.model_executor.layers.quantization.base_config import (
QuantizationConfig, QuantizeMethodBase, method_has_implemented_embedding) QuantizationConfig,
QuantizeMethodBase,
method_has_implemented_embedding,
)
from vllm.model_executor.layers.vocab_parallel_embedding import ( from vllm.model_executor.layers.vocab_parallel_embedding import (
DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, UnquantizedEmbeddingMethod, DEFAULT_VOCAB_PADDING_SIZE,
VocabParallelEmbedding, pad_vocab_size) ParallelLMHead,
UnquantizedEmbeddingMethod,
VocabParallelEmbedding,
pad_vocab_size,
)
from vllm.model_executor.utils import set_weight_attrs from vllm.model_executor.utils import set_weight_attrs
from vllm_ascend.distributed.parallel_state import (get_embed_tp_group, from vllm_ascend.distributed.parallel_state import get_embed_tp_group, get_lmhead_tp_group
get_lmhead_tp_group)
from vllm_ascend.utils import embedding_tp_enable, lmhead_tp_enable from vllm_ascend.utils import embedding_tp_enable, lmhead_tp_enable
@@ -42,14 +47,16 @@ class AscendVocabParallelEmbedding(VocabParallelEmbedding):
Added the feature of lmheadTP in pure dp scenario Added the feature of lmheadTP in pure dp scenario
""" """
def __init__(self, def __init__(
num_embeddings: int, self,
embedding_dim: int, num_embeddings: int,
params_dtype: Optional[torch.dtype] = None, embedding_dim: int,
org_num_embeddings: Optional[int] = None, params_dtype: torch.dtype | None = None,
padding_size: int = DEFAULT_VOCAB_PADDING_SIZE, org_num_embeddings: int | None = None,
quant_config: Optional[QuantizationConfig] = None, padding_size: int = DEFAULT_VOCAB_PADDING_SIZE,
prefix: str = ""): quant_config: QuantizationConfig | None = None,
prefix: str = "",
):
nn.Module.__init__(self) nn.Module.__init__(self)
self.forward_type = None self.forward_type = None
if lmhead_tp_enable() and "head" in prefix: if lmhead_tp_enable() and "head" in prefix:
@@ -67,18 +74,20 @@ class AscendVocabParallelEmbedding(VocabParallelEmbedding):
self.padding_size = padding_size self.padding_size = padding_size
self.org_vocab_size = org_num_embeddings or num_embeddings self.org_vocab_size = org_num_embeddings or num_embeddings
num_added_embeddings = num_embeddings - self.org_vocab_size num_added_embeddings = num_embeddings - self.org_vocab_size
self.org_vocab_size_padded = pad_vocab_size(self.org_vocab_size, self.org_vocab_size_padded = pad_vocab_size(self.org_vocab_size, self.padding_size)
self.padding_size)
self.num_embeddings_padded = pad_vocab_size( self.num_embeddings_padded = pad_vocab_size(
self.org_vocab_size_padded + num_added_embeddings, self.org_vocab_size_padded + num_added_embeddings, self.padding_size
self.padding_size) )
assert self.org_vocab_size_padded <= self.num_embeddings_padded assert self.org_vocab_size_padded <= self.num_embeddings_padded
self.shard_indices = self._get_indices(self.num_embeddings_padded, self.shard_indices = self._get_indices(
self.org_vocab_size_padded, self.num_embeddings_padded,
self.num_embeddings, self.org_vocab_size_padded,
self.org_vocab_size, self.num_embeddings,
self.tp_rank, self.tp_size) self.org_vocab_size,
self.tp_rank,
self.tp_size,
)
self.embedding_dim = embedding_dim self.embedding_dim = embedding_dim
quant_method = None quant_method = None
if quant_config is not None: if quant_config is not None:
@@ -90,12 +99,12 @@ class AscendVocabParallelEmbedding(VocabParallelEmbedding):
# method must implement the embedding operation. If we are another # method must implement the embedding operation. If we are another
# layer type like ParallelLMHead, this is not important. # layer type like ParallelLMHead, this is not important.
is_embedding_layer = type(self) is VocabParallelEmbedding is_embedding_layer = type(self) is VocabParallelEmbedding
quant_method_implements_embedding = method_has_implemented_embedding( quant_method_implements_embedding = method_has_implemented_embedding(type(quant_method))
type(quant_method))
if is_embedding_layer and not quant_method_implements_embedding: if is_embedding_layer and not quant_method_implements_embedding:
raise NotImplementedError( raise NotImplementedError(
f"The class {type(quant_method).__name__} must implement " f"The class {type(quant_method).__name__} must implement "
"the 'embedding' method, see UnquantizedEmbeddingMethod.") "the 'embedding' method, see UnquantizedEmbeddingMethod."
)
self.quant_method: QuantizeMethodBase = quant_method self.quant_method: QuantizeMethodBase = quant_method
@@ -104,46 +113,47 @@ class AscendVocabParallelEmbedding(VocabParallelEmbedding):
self.params_dtype = params_dtype self.params_dtype = params_dtype
# Divide the weight matrix along the vocaburaly dimension. # Divide the weight matrix along the vocaburaly dimension.
self.num_added_embeddings = self.num_embeddings - self.org_vocab_size self.num_added_embeddings = self.num_embeddings - self.org_vocab_size
self.num_embeddings_per_partition = divide(self.num_embeddings_padded, self.num_embeddings_per_partition = divide(self.num_embeddings_padded, self.tp_size)
self.tp_size) assert self.shard_indices.num_elements_padded == self.num_embeddings_per_partition
assert (self.shard_indices.num_elements_padded ==
self.num_embeddings_per_partition)
self.num_org_embeddings_per_partition = ( self.num_org_embeddings_per_partition = (
self.shard_indices.org_vocab_end_index - self.shard_indices.org_vocab_end_index - self.shard_indices.org_vocab_start_index
self.shard_indices.org_vocab_start_index) )
self.num_added_embeddings_per_partition = ( self.num_added_embeddings_per_partition = (
self.shard_indices.added_vocab_end_index - self.shard_indices.added_vocab_end_index - self.shard_indices.added_vocab_start_index
self.shard_indices.added_vocab_start_index) )
self.quant_method.create_weights(self, self.quant_method.create_weights(
self.embedding_dim, self,
[self.num_embeddings_per_partition], self.embedding_dim,
self.embedding_dim, [self.num_embeddings_per_partition],
self.num_embeddings_padded, self.embedding_dim,
params_dtype=params_dtype, self.num_embeddings_padded,
weight_loader=self.weight_loader) params_dtype=params_dtype,
weight_loader=self.weight_loader,
)
def _get_masked_input_and_mask( def _get_masked_input_and_mask(
self, input_: torch.Tensor, org_vocab_start_index: int, self,
org_vocab_end_index: int, num_org_vocab_padding: int, input_: torch.Tensor,
added_vocab_start_index: int, org_vocab_start_index: int,
added_vocab_end_index: int) -> Tuple[torch.Tensor, torch.Tensor]: org_vocab_end_index: int,
num_org_vocab_padding: int,
added_vocab_start_index: int,
added_vocab_end_index: int,
) -> tuple[torch.Tensor, torch.Tensor]:
# torch.compile will fuse all of the pointwise ops below # torch.compile will fuse all of the pointwise ops below
# into a single kernel, making it very fast # into a single kernel, making it very fast
org_vocab_mask = (input_ >= org_vocab_start_index) & ( org_vocab_mask = (input_ >= org_vocab_start_index) & (input_ < org_vocab_end_index)
input_ < org_vocab_end_index)
# Adapt: avoid create added_vocab_mask when added_vocab_start_index == added_vocab_end_index. # Adapt: avoid create added_vocab_mask when added_vocab_start_index == added_vocab_end_index.
if added_vocab_start_index == added_vocab_end_index: if added_vocab_start_index == added_vocab_end_index:
valid_offset = (org_vocab_start_index * org_vocab_mask) valid_offset = org_vocab_start_index * org_vocab_mask
vocab_mask = org_vocab_mask vocab_mask = org_vocab_mask
else: else:
added_vocab_mask = (input_ >= added_vocab_start_index) & ( added_vocab_mask = (input_ >= added_vocab_start_index) & (input_ < added_vocab_end_index)
input_ < added_vocab_end_index) added_offset = (
added_offset = added_vocab_start_index - ( added_vocab_start_index - (org_vocab_end_index - org_vocab_start_index) - num_org_vocab_padding
org_vocab_end_index - )
org_vocab_start_index) - num_org_vocab_padding valid_offset = (org_vocab_start_index * org_vocab_mask) + (added_offset * added_vocab_mask)
valid_offset = (org_vocab_start_index *
org_vocab_mask) + (added_offset * added_vocab_mask)
vocab_mask = org_vocab_mask | added_vocab_mask vocab_mask = org_vocab_mask | added_vocab_mask
# Adapt end. # Adapt end.
input_ = vocab_mask * (input_ - valid_offset) input_ = vocab_mask * (input_ - valid_offset)
@@ -158,14 +168,15 @@ class AscendVocabParallelEmbedding(VocabParallelEmbedding):
def _forward_embed_tp(self, input_): def _forward_embed_tp(self, input_):
complete_input = self.comm_group.all_gather(input_, dim=0) complete_input = self.comm_group.all_gather(input_, dim=0)
masked_input, input_mask = self._get_masked_input_and_mask( masked_input, input_mask = self._get_masked_input_and_mask(
complete_input, self.shard_indices.org_vocab_start_index, complete_input,
self.shard_indices.org_vocab_start_index,
self.shard_indices.org_vocab_end_index, self.shard_indices.org_vocab_end_index,
self.shard_indices.num_org_vocab_padding, self.shard_indices.num_org_vocab_padding,
self.shard_indices.added_vocab_start_index, self.shard_indices.added_vocab_start_index,
self.shard_indices.added_vocab_end_index) self.shard_indices.added_vocab_end_index,
)
# Get the embeddings. # Get the embeddings.
output_parallel = self.quant_method.embedding(self, output_parallel = self.quant_method.embedding(self, masked_input.long())
masked_input.long())
output_parallel.masked_fill_(input_mask.unsqueeze(-1), 0) output_parallel.masked_fill_(input_mask.unsqueeze(-1), 0)
output = self.comm_group.reduce_scatter(output_parallel, dim=0) output = self.comm_group.reduce_scatter(output_parallel, dim=0)
output = output.view(input_.shape[0], -1) output = output.view(input_.shape[0], -1)
@@ -175,16 +186,17 @@ class AscendVocabParallelEmbedding(VocabParallelEmbedding):
if self.tp_size > 1: if self.tp_size > 1:
# Build the mask. # Build the mask.
masked_input, input_mask = self._get_masked_input_and_mask( masked_input, input_mask = self._get_masked_input_and_mask(
input_, self.shard_indices.org_vocab_start_index, input_,
self.shard_indices.org_vocab_start_index,
self.shard_indices.org_vocab_end_index, self.shard_indices.org_vocab_end_index,
self.shard_indices.num_org_vocab_padding, self.shard_indices.num_org_vocab_padding,
self.shard_indices.added_vocab_start_index, self.shard_indices.added_vocab_start_index,
self.shard_indices.added_vocab_end_index) self.shard_indices.added_vocab_end_index,
)
else: else:
masked_input = input_ masked_input = input_
# Get the embeddings. # Get the embeddings.
output_parallel = self.quant_method.embedding(self, output_parallel = self.quant_method.embedding(self, masked_input.long())
masked_input.long())
# Mask the output embedding. # Mask the output embedding.
if self.tp_size > 1: if self.tp_size > 1:
output_parallel.masked_fill_(input_mask.unsqueeze(-1), 0) output_parallel.masked_fill_(input_mask.unsqueeze(-1), 0)
@@ -197,29 +209,31 @@ class AscendParallelLMHead(ParallelLMHead):
""" """
Register ParallelLMHead as a custom op for Ascend.""" Register ParallelLMHead as a custom op for Ascend."""
def __init__(self, def __init__(
num_embeddings: int, self,
embedding_dim: int, num_embeddings: int,
bias: bool = False, embedding_dim: int,
params_dtype: Optional[torch.dtype] = None, bias: bool = False,
org_num_embeddings: Optional[int] = None, params_dtype: torch.dtype | None = None,
padding_size: int = DEFAULT_VOCAB_PADDING_SIZE, org_num_embeddings: int | None = None,
quant_config: Optional[QuantizationConfig] = None, padding_size: int = DEFAULT_VOCAB_PADDING_SIZE,
prefix: str = ""): quant_config: QuantizationConfig | None = None,
AscendVocabParallelEmbedding.__init__(self, num_embeddings, prefix: str = "",
embedding_dim, params_dtype, ):
org_num_embeddings, padding_size, AscendVocabParallelEmbedding.__init__(
quant_config, prefix) self, num_embeddings, embedding_dim, params_dtype, org_num_embeddings, padding_size, quant_config, prefix
)
self.quant_config = quant_config self.quant_config = quant_config
if bias: if bias:
self.bias = Parameter( self.bias = Parameter(torch.empty(self.num_embeddings_per_partition, dtype=params_dtype))
torch.empty(self.num_embeddings_per_partition, set_weight_attrs(
dtype=params_dtype)) self.bias,
set_weight_attrs(self.bias, { {
"output_dim": 0, "output_dim": 0,
"weight_loader": self.weight_loader, "weight_loader": self.weight_loader,
}) },
)
else: else:
self.register_parameter("bias", None) self.register_parameter("bias", None)
@@ -234,48 +248,41 @@ class AscendLogitsProcessor(LogitsProcessor):
self, self,
hidden_states: torch.Tensor, hidden_states: torch.Tensor,
lm_head: AscendParallelLMHead, lm_head: AscendParallelLMHead,
embedding_bias: Optional[torch.Tensor] = None, embedding_bias: torch.Tensor | None = None,
) -> Optional[torch.Tensor]: ) -> torch.Tensor | None:
if lmhead_tp_enable(): if lmhead_tp_enable():
return self._get_logits_lmheadtp(hidden_states, lm_head, return self._get_logits_lmheadtp(hidden_states, lm_head, embedding_bias)
embedding_bias)
else: else:
return self._get_logits_normal(hidden_states, lm_head, return self._get_logits_normal(hidden_states, lm_head, embedding_bias)
embedding_bias)
def _get_logits_lmheadtp( def _get_logits_lmheadtp(
self, self,
hidden_states: torch.Tensor, hidden_states: torch.Tensor,
lm_head: AscendParallelLMHead, lm_head: AscendParallelLMHead,
embedding_bias: Optional[torch.Tensor], embedding_bias: torch.Tensor | None,
) -> Optional[torch.Tensor]: ) -> torch.Tensor | None:
# Gather hidden states from all devices in tensor parallel group # Gather hidden states from all devices in tensor parallel group
gathered_hidden_states = get_lmhead_tp_group().all_gather( gathered_hidden_states = get_lmhead_tp_group().all_gather(hidden_states, dim=0)
hidden_states, dim=0) local_logits = lm_head.quant_method.apply(lm_head, gathered_hidden_states, bias=embedding_bias)
local_logits = lm_head.quant_method.apply(lm_head,
gathered_hidden_states,
bias=embedding_bias)
# Gather logits for tensor parallel # Gather logits for tensor parallel
logits = get_lmhead_tp_group().all_to_all(local_logits) logits = get_lmhead_tp_group().all_to_all(local_logits)
# Remove paddings in vocab (if any) # Remove paddings in vocab (if any)
if logits is not None: if logits is not None:
logits = logits[..., :self.org_vocab_size] logits = logits[..., : self.org_vocab_size]
return logits return logits
def _get_logits_normal( def _get_logits_normal(
self, self,
hidden_states: torch.Tensor, hidden_states: torch.Tensor,
lm_head: AscendParallelLMHead, lm_head: AscendParallelLMHead,
embedding_bias: Optional[torch.Tensor], embedding_bias: torch.Tensor | None,
) -> Optional[torch.Tensor]: ) -> torch.Tensor | None:
local_logits = lm_head.quant_method.apply(lm_head, local_logits = lm_head.quant_method.apply(lm_head, hidden_states, bias=embedding_bias)
hidden_states,
bias=embedding_bias)
# Gather logits for tensor parallel # Gather logits for tensor parallel
logits = self._gather_logits(local_logits) logits = self._gather_logits(local_logits)
# Remove paddings in vocab (if any) # Remove paddings in vocab (if any)
if logits is not None: if logits is not None:
logits = logits[..., :self.org_vocab_size] logits = logits[..., : self.org_vocab_size]
return logits return logits

View File

@@ -2,19 +2,18 @@ from dataclasses import dataclass, field
import torch import torch
import torch_npu import torch_npu
from vllm.forward_context import ForwardContext, get_forward_context
from vllm.config import get_current_vllm_config from vllm.config import get_current_vllm_config
from vllm.logger import logger from vllm.forward_context import ForwardContext, get_forward_context
from vllm_ascend.ascend_config import WeightPrefetchConfig from vllm_ascend.ascend_config import WeightPrefetchConfig
from vllm_ascend.ops.linear import (AscendQKVParallelLinear, from vllm_ascend.ops.linear import AscendQKVParallelLinear, AscendRowParallelLinear
AscendRowParallelLinear)
from vllm_ascend.utils import is_moe_model from vllm_ascend.utils import is_moe_model
SUPPORTED_MODULES = ["attn", "mlp", "moe"] SUPPORTED_MODULES = ["attn", "mlp", "moe"]
MOE_PREFETCH_TOKEN_THRESHOLD = 96 MOE_PREFETCH_TOKEN_THRESHOLD = 96
MAX_PREFETCH_WEIGHT_SIZE = 18 * 1024 * 1024 MAX_PREFETCH_WEIGHT_SIZE = 18 * 1024 * 1024
@dataclass @dataclass
class ModuleWeightPrefetchConfig: class ModuleWeightPrefetchConfig:
module_name: str module_name: str
@@ -24,10 +23,7 @@ class ModuleWeightPrefetchConfig:
linear_prefix_map: dict = field(default_factory=dict) linear_prefix_map: dict = field(default_factory=dict)
def __post_init__(self) -> None: def __post_init__(self) -> None:
self.prefetch_ratio = { self.prefetch_ratio = {prefix: ratio for prefix, ratio in self.prefetch_ratio.items() if 0 <= ratio <= 1}
prefix: ratio
for prefix, ratio in self.prefetch_ratio.items() if 0 <= ratio <= 1
}
assert self.module_name in SUPPORTED_MODULES, ( assert self.module_name in SUPPORTED_MODULES, (
f"Invalid module name {self.module_name}, should be one of {SUPPORTED_MODULES}" f"Invalid module name {self.module_name}, should be one of {SUPPORTED_MODULES}"
@@ -41,6 +37,7 @@ class WeightPrefetchMethod:
""" """
Unified weight prefetch method. Unified weight prefetch method.
""" """
is_moe: bool = True is_moe: bool = True
MLP_GATE_UP: str = "gate_up" MLP_GATE_UP: str = "gate_up"
MLP_DOWN: str = "down" MLP_DOWN: str = "down"
@@ -54,60 +51,53 @@ class WeightPrefetchMethod:
self.attn = ModuleWeightPrefetchConfig( self.attn = ModuleWeightPrefetchConfig(
module_name="attn", module_name="attn",
enable=weight_prefetch_config.enabled, enable=weight_prefetch_config.enabled,
prefetch_ratio=weight_prefetch_config.prefetch_ratio.get( prefetch_ratio=weight_prefetch_config.prefetch_ratio.get("attn", {}) or {"qkv": 1.0, "o": 1.0},
"attn", {}) or {'qkv': 1.0, 'o': 1.0},
linear_prefix_map={ linear_prefix_map={
AscendQKVParallelLinear.__name__: "qkv", AscendQKVParallelLinear.__name__: "qkv",
AscendRowParallelLinear.__name__: "o", AscendRowParallelLinear.__name__: "o",
}) },
)
self.moe = ModuleWeightPrefetchConfig( self.moe = ModuleWeightPrefetchConfig(
module_name="moe", module_name="moe",
enable=weight_prefetch_config.enabled and self.is_moe, enable=weight_prefetch_config.enabled and self.is_moe,
prefetch_ratio=weight_prefetch_config.prefetch_ratio.get( prefetch_ratio=weight_prefetch_config.prefetch_ratio.get("moe", {}) or {"gate_up": 0.8},
"moe", {}) or {'gate_up': 0.8}) )
self.mlp = ModuleWeightPrefetchConfig( self.mlp = ModuleWeightPrefetchConfig(
module_name="mlp", module_name="mlp",
enable=weight_prefetch_config.enabled and not self.is_moe, enable=weight_prefetch_config.enabled and not self.is_moe,
prefetch_ratio=weight_prefetch_config.prefetch_ratio.get( prefetch_ratio=weight_prefetch_config.prefetch_ratio.get("mlp", {}) or {"gate_up": 1.0, "down": 1.0},
"mlp", {}) or {'gate_up': 1.0, 'down': 1.0}) )
self.mlp_pre_version_compatibale_config = weight_prefetch_config.mlp_pre_version_compatibale_config self.mlp_pre_version_compatibale_config = weight_prefetch_config.mlp_pre_version_compatibale_config
def maybe_prefetch_attn_weight_preprocess( def maybe_prefetch_attn_weight_preprocess(
self, layer_cls_name: str, weight: torch.Tensor, self, layer_cls_name: str, weight: torch.Tensor, start_flag: torch.Tensor
start_flag: torch.Tensor) -> None: ) -> None:
if not self.attn.enable or layer_cls_name not in self.attn.linear_prefix_map: if not self.attn.enable or layer_cls_name not in self.attn.linear_prefix_map:
return return
prefix = self.attn.linear_prefix_map.get(layer_cls_name, "") prefix = self.attn.linear_prefix_map.get(layer_cls_name, "")
weight_size = weight.data.element_size() * weight.data.numel( weight_size = weight.data.element_size() * weight.data.numel() * self.attn.prefetch_ratio.get(prefix, 0)
) * self.attn.prefetch_ratio.get(prefix, 0)
torch.ops.vllm.prefetch_preprocess(weight=weight, torch.ops.vllm.prefetch_preprocess(weight=weight, start_flag=start_flag, max_weight_size=int(weight_size))
start_flag=start_flag,
max_weight_size=int(weight_size))
def maybe_prefetch_attn_weight_postprocess( def maybe_prefetch_attn_weight_postprocess(self, layer_cls_name: str, stop_flag: torch.Tensor) -> None:
self, layer_cls_name: str, stop_flag: torch.Tensor) -> None:
if not self.attn.enable or layer_cls_name not in self.attn.linear_prefix_map: if not self.attn.enable or layer_cls_name not in self.attn.linear_prefix_map:
return return
torch.ops.vllm.prefetch_postprocess(stop_flag) torch.ops.vllm.prefetch_postprocess(stop_flag)
def maybe_prefetch_moe_weight_preprocess(self, hidden_states, prefix): def maybe_prefetch_moe_weight_preprocess(self, hidden_states, prefix):
self.moe.is_active_this_forward = hidden_states.shape[ self.moe.is_active_this_forward = (
0] >= MOE_PREFETCH_TOKEN_THRESHOLD if self.moe.enable else False hidden_states.shape[0] >= MOE_PREFETCH_TOKEN_THRESHOLD if self.moe.enable else False
)
if not self.moe.is_active_this_forward: if not self.moe.is_active_this_forward:
return return
forward_context = get_forward_context() forward_context = get_forward_context()
# layer_idx is subtracted by 1 because layer_idx was incremented by 1 at layernorm. # layer_idx is subtracted by 1 because layer_idx was incremented by 1 at layernorm.
weight = forward_context.model_instance.model.layers[ weight = forward_context.model_instance.model.layers[forward_context.layer_idx - 1].mlp.experts.w13_weight
forward_context.layer_idx - 1].mlp.experts.w13_weight weight_size = weight.data.element_size() * weight.data.numel() * self.moe.prefetch_ratio.get(prefix, 0)
weight_size = weight.data.element_size() * weight.data.numel( torch.ops.vllm.prefetch_preprocess(weight=weight, start_flag=None, max_weight_size=int(weight_size))
) * self.moe.prefetch_ratio.get(prefix, 0)
torch.ops.vllm.prefetch_preprocess(weight=weight,
start_flag=None,
max_weight_size=int(weight_size))
def maybe_prefetch_moe_weight_postprocess(self, stop_flag: torch.Tensor): def maybe_prefetch_moe_weight_postprocess(self, stop_flag: torch.Tensor):
if not self.moe.is_active_this_forward: if not self.moe.is_active_this_forward:
@@ -116,7 +106,9 @@ class WeightPrefetchMethod:
torch.ops.vllm.prefetch_postprocess(stop_flag) torch.ops.vllm.prefetch_postprocess(stop_flag)
# x_dependency only eager mode can pass None # x_dependency only eager mode can pass None
def maybe_prefetch_mlp_weight_preprocess(self, prefetch_layer_name: str, x_dependency: torch.Tensor | None, curr_layer_prefix: str | None = None): def maybe_prefetch_mlp_weight_preprocess(
self, prefetch_layer_name: str, x_dependency: torch.Tensor | None, curr_layer_prefix: str | None = None
):
if not self.mlp.enable and not self.mlp_pre_version_compatibale_config: if not self.mlp.enable and not self.mlp_pre_version_compatibale_config:
self.mlp.is_active_this_forward = False self.mlp.is_active_this_forward = False
return return
@@ -140,24 +132,26 @@ class WeightPrefetchMethod:
else: else:
raise ValueError(f"Unsupported prefetch weight name: {prefetch_layer_name}") raise ValueError(f"Unsupported prefetch weight name: {prefetch_layer_name}")
def _maybe_prefetch_mlp_gate_up_weight_preprocess(self, x_dependency: torch.Tensor, forward_context: ForwardContext, curr_layer_prefix: str | None): def _maybe_prefetch_mlp_gate_up_weight_preprocess(
self, x_dependency: torch.Tensor, forward_context: ForwardContext, curr_layer_prefix: str | None
):
if not curr_layer_prefix: if not curr_layer_prefix:
raise ValueError("curr_layer_prefix must been specified when prefetching mlp gate_up_proj weight") raise ValueError("curr_layer_prefix must been specified when prefetching mlp gate_up_proj weight")
# start point of gate_up_proj weight prefetch # start point of gate_up_proj weight prefetch
if curr_layer_prefix.split('.')[-2] == "self_attn": if curr_layer_prefix.split(".")[-2] == "self_attn":
model_instance = forward_context.model_instance model_instance = forward_context.model_instance
layer_idx = int(curr_layer_prefix.split('.')[2]) layer_idx = int(curr_layer_prefix.split(".")[2])
weight = model_instance.model.layers[layer_idx].mlp.gate_up_proj.weight weight = model_instance.model.layers[layer_idx].mlp.gate_up_proj.weight
if self.mlp_pre_version_compatibale_config: if self.mlp_pre_version_compatibale_config:
weight_size = self.mlp_pre_version_compatibale_config.get(self.MLP_GATE_UP, 0) weight_size = self.mlp_pre_version_compatibale_config.get(self.MLP_GATE_UP, 0)
else: else:
weight_size = weight.data.element_size() * weight.data.numel() * self.mlp.prefetch_ratio.get(self.MLP_GATE_UP, 0) weight_size = (
weight.data.element_size() * weight.data.numel() * self.mlp.prefetch_ratio.get(self.MLP_GATE_UP, 0)
)
if weight_size > MAX_PREFETCH_WEIGHT_SIZE: if weight_size > MAX_PREFETCH_WEIGHT_SIZE:
weight_size = MAX_PREFETCH_WEIGHT_SIZE weight_size = MAX_PREFETCH_WEIGHT_SIZE
torch.ops.vllm.prefetch_preprocess(weight=weight, torch.ops.vllm.prefetch_preprocess(weight=weight, start_flag=x_dependency, max_weight_size=int(weight_size))
start_flag=x_dependency,
max_weight_size=int(weight_size))
forward_context.prefetch_mlp_gate_up_proj = True forward_context.prefetch_mlp_gate_up_proj = True
def _maybe_prefetch_mlp_down_weight_preprocess(self, x_dependency: torch.Tensor, forward_context: ForwardContext): def _maybe_prefetch_mlp_down_weight_preprocess(self, x_dependency: torch.Tensor, forward_context: ForwardContext):
@@ -167,12 +161,12 @@ class WeightPrefetchMethod:
if self.mlp_pre_version_compatibale_config: if self.mlp_pre_version_compatibale_config:
weight_size = self.mlp_pre_version_compatibale_config.get(self.MLP_DOWN, 0) weight_size = self.mlp_pre_version_compatibale_config.get(self.MLP_DOWN, 0)
else: else:
weight_size = weight.data.element_size() * weight.data.numel() * self.mlp.prefetch_ratio.get(self.MLP_DOWN, 0) weight_size = (
weight.data.element_size() * weight.data.numel() * self.mlp.prefetch_ratio.get(self.MLP_DOWN, 0)
)
if weight_size > MAX_PREFETCH_WEIGHT_SIZE: if weight_size > MAX_PREFETCH_WEIGHT_SIZE:
weight_size = MAX_PREFETCH_WEIGHT_SIZE weight_size = MAX_PREFETCH_WEIGHT_SIZE
torch.ops.vllm.prefetch_preprocess(weight=weight, torch.ops.vllm.prefetch_preprocess(weight=weight, start_flag=x_dependency, max_weight_size=int(weight_size))
start_flag=x_dependency,
max_weight_size=int(weight_size))
forward_context.prefetch_mlp_down_proj = True forward_context.prefetch_mlp_down_proj = True
forward_context.layer_idx += 1 forward_context.layer_idx += 1
@@ -185,19 +179,15 @@ class WeightPrefetchMethod:
except AssertionError: except AssertionError:
return return
if forward_context.prefetch_mlp_gate_up_proj or \ if forward_context.prefetch_mlp_gate_up_proj or forward_context.prefetch_mlp_down_proj:
forward_context.prefetch_mlp_down_proj:
torch.ops.vllm.prefetch_postprocess(stop_flag) torch.ops.vllm.prefetch_postprocess(stop_flag)
forward_context.prefetch_mlp_gate_up_proj = False forward_context.prefetch_mlp_gate_up_proj = False
forward_context.prefetch_mlp_down_proj = False forward_context.prefetch_mlp_down_proj = False
def maybe_npu_prefetch(inputs: torch.Tensor, def maybe_npu_prefetch(
dependency: torch.Tensor, inputs: torch.Tensor, dependency: torch.Tensor, max_size: int = 0, offset: int = 0, *, enabled: bool = True
max_size: int = 0, ) -> None:
offset: int = 0,
*,
enabled: bool = True) -> None:
if not enabled: if not enabled:
return return
input_size = inputs.element_size() * inputs.numel() input_size = inputs.element_size() * inputs.numel()

View File

@@ -30,10 +30,9 @@ def get_spec_decode_method(method, vllm_config, device, runner):
return EagleProposer(vllm_config, device, runner) return EagleProposer(vllm_config, device, runner)
elif method == "mtp": elif method == "mtp":
return MtpProposer(vllm_config, device, runner) return MtpProposer(vllm_config, device, runner)
elif method == 'suffix': elif method == "suffix":
return SuffixDecodingProposer(vllm_config, device, runner) return SuffixDecodingProposer(vllm_config, device, runner)
elif method == "medusa": elif method == "medusa":
return MedusaProposer(vllm_config, device, runner) return MedusaProposer(vllm_config, device, runner)
else: else:
raise ValueError("Unknown speculative decoding method: " raise ValueError(f"Unknown speculative decoding method: {method}")
f"{method}")

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,4 @@
import enum import enum
from typing import Optional
import torch import torch
from vllm.config import CUDAGraphMode, VllmConfig from vllm.config import CUDAGraphMode, VllmConfig
@@ -18,11 +17,7 @@ class SpecDcodeType(enum.Enum):
class Proposer: class Proposer:
def __init__(self, vllm_config: VllmConfig, device: torch.device = None, runner=None):
def __init__(self,
vllm_config: VllmConfig,
device: torch.device = None,
runner=None):
pass pass
def load_model(self, model): def load_model(self, model):
@@ -30,25 +25,29 @@ class Proposer:
raise NotImplementedError raise NotImplementedError
@torch.inference_mode() @torch.inference_mode()
def dummy_run(self, def dummy_run(
num_tokens: int, self,
with_prefill: bool = False, num_tokens: int,
in_graph_capturing: bool = False, with_prefill: bool = False,
num_reqs: int = 0, in_graph_capturing: bool = False,
num_tokens_across_dp: Optional[torch.Tensor] = None, num_reqs: int = 0,
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE, num_tokens_across_dp: torch.Tensor | None = None,
batch_descriptor=None): aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
batch_descriptor=None,
):
"""Called by dummy_run in modle_runner""" """Called by dummy_run in modle_runner"""
raise NotImplementedError raise NotImplementedError
def generate_token_ids(self, def generate_token_ids(
valid_sampled_token_ids: list[list[int]], self,
sampling_metadata: SamplingMetadata = None, valid_sampled_token_ids: list[list[int]],
scheduler_output: SchedulerOutput = None, sampling_metadata: SamplingMetadata = None,
spec_decode_metadata: SpecDecodeMetadata = None, scheduler_output: SchedulerOutput = None,
positions: torch.Tensor = None, spec_decode_metadata: SpecDecodeMetadata = None,
num_scheduled_tokens: int = 0, positions: torch.Tensor = None,
hidden_states: torch.Tensor = None, num_scheduled_tokens: int = 0,
aux_hidden_states: torch.Tensor = None): hidden_states: torch.Tensor = None,
aux_hidden_states: torch.Tensor = None,
):
"""Called by execute_model in model_runner""" """Called by execute_model in model_runner"""
raise NotImplementedError raise NotImplementedError

View File

@@ -1,14 +1,9 @@
from typing import Optional
import torch import torch
import torch.nn as nn
from vllm.config import CUDAGraphMode, VllmConfig from vllm.config import CUDAGraphMode, VllmConfig
from vllm.logger import init_logger from vllm.logger import init_logger
from vllm.model_executor.model_loader import get_model
from vllm.model_executor.models.interfaces import is_mixture_of_experts
from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.sample.metadata import SamplingMetadata
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
from vllm.v1.spec_decode.medusa import MedusaProposer as VllmMedusaProposer from vllm.v1.spec_decode.medusa import MedusaProposer as VllmMedusaProposer
from vllm.v1.spec_decode.metadata import SpecDecodeMetadata
from vllm_ascend.ascend_forward_context import set_ascend_forward_context from vllm_ascend.ascend_forward_context import set_ascend_forward_context
from vllm_ascend.spec_decode.interface import SpecDcodeType from vllm_ascend.spec_decode.interface import SpecDcodeType
@@ -22,72 +17,70 @@ class MedusaProposer(VllmMedusaProposer):
""" """
def __init__( def __init__(
self, self,
vllm_config: VllmConfig, vllm_config: VllmConfig,
device: torch.device, device: torch.device,
runner, runner,
): ):
# Save config parameters # Save config parameters
self.name = SpecDcodeType.MEDUSA self.name = SpecDcodeType.MEDUSA
self.vllm_config = vllm_config self.vllm_config = vllm_config
self.device = device self.device = device
self.max_num_tokens = vllm_config.scheduler_config.max_num_batched_tokens self.max_num_tokens = vllm_config.scheduler_config.max_num_batched_tokens
self.hidden_size = (vllm_config.speculative_config.draft_model_config. self.hidden_size = vllm_config.speculative_config.draft_model_config.get_hidden_size()
get_hidden_size())
self.dtype = vllm_config.model_config.dtype self.dtype = vllm_config.model_config.dtype
self.runner = runner self.runner = runner
@torch.inference_mode() @torch.inference_mode()
def dummy_run(self, def dummy_run(
num_tokens: int, self,
with_prefill: bool = False, num_tokens: int,
in_graph_capturing: bool = False, with_prefill: bool = False,
num_reqs: int = 0, in_graph_capturing: bool = False,
num_tokens_across_dp: Optional[torch.Tensor] = None, num_reqs: int = 0,
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE, num_tokens_across_dp: torch.Tensor | None = None,
batch_descriptor=None, aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
dummy_compute_logits=lambda hidden_states: None, batch_descriptor=None,
is_profile=False): dummy_compute_logits=lambda hidden_states: None,
is_profile=False,
):
hidden_states = torch.zeros( hidden_states = torch.zeros(
(self.max_num_tokens, self.hidden_size), (self.max_num_tokens, self.hidden_size),
dtype=self.dtype, dtype=self.dtype,
device=self.device, device=self.device,
) )
with set_ascend_forward_context( with set_ascend_forward_context(
None, None,
self.vllm_config, self.vllm_config,
num_tokens=num_tokens, num_tokens=num_tokens,
num_actual_tokens=0, num_actual_tokens=0,
in_profile_run=is_profile, in_profile_run=is_profile,
batch_descriptor=batch_descriptor, batch_descriptor=batch_descriptor,
aclgraph_runtime_mode=aclgraph_runtime_mode, aclgraph_runtime_mode=aclgraph_runtime_mode,
is_draft_model=True): is_draft_model=True,
):
self.model(hidden_states) self.model(hidden_states)
dummy_compute_logits(hidden_states) dummy_compute_logits(hidden_states)
def generate_token_ids(self, valid_sampled_token_ids: list[list[int]], def generate_token_ids(
sampling_metadata: SamplingMetadata, self,
spec_decode_metadata: SpecDecodeMetadata, valid_sampled_token_ids: list[list[int]],
sample_hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata,
*args, spec_decode_metadata: SpecDecodeMetadata,
**kwargs sample_hidden_states: torch.Tensor,
): *args,
**kwargs,
):
if sample_hidden_states.shape[0] == len(valid_sampled_token_ids): if sample_hidden_states.shape[0] == len(valid_sampled_token_ids):
# The input to the target model does not include draft tokens. # The input to the target model does not include draft tokens.
hidden_states = sample_hidden_states hidden_states = sample_hidden_states
else: else:
num_accepted_tokens = torch.tensor( num_accepted_tokens = torch.tensor(
[len(t) for t in valid_sampled_token_ids], [len(t) for t in valid_sampled_token_ids], device=self.device, dtype=torch.long
device=self.device, )
dtype=torch.long) num_draft_tokens = torch.tensor(spec_decode_metadata.num_draft_tokens, device=self.device, dtype=torch.long)
num_draft_tokens = torch.tensor(
spec_decode_metadata.num_draft_tokens,
device=self.device,
dtype=torch.long)
offsets = torch.cumsum(num_draft_tokens + 1, offsets = torch.cumsum(num_draft_tokens + 1, dim=0) - (num_draft_tokens + 1)
dim=0) - (num_draft_tokens + 1)
indices = offsets + num_accepted_tokens - 1 indices = offsets + num_accepted_tokens - 1
hidden_states = sample_hidden_states[indices] hidden_states = sample_hidden_states[indices]

View File

@@ -1,5 +1,3 @@
from typing import Optional, Union
import torch import torch
import torch.nn as nn import torch.nn as nn
from vllm.config import CUDAGraphMode from vllm.config import CUDAGraphMode
@@ -22,29 +20,33 @@ from vllm_ascend.utils import lmhead_tp_enable, vllm_version_is
class MtpProposer(EagleProposer): class MtpProposer(EagleProposer):
# TODO: Find out why ModelRunner does not this explicit typing? # TODO: Find out why ModelRunner does not this explicit typing?
model: Union[nn.Module, ACLGraphWrapper] model: nn.Module | ACLGraphWrapper
@torch.inference_mode() @torch.inference_mode()
def dummy_run(self, def dummy_run(
num_tokens: int, self,
with_prefill: bool = False, num_tokens: int,
in_graph_capturing: bool = False, with_prefill: bool = False,
num_reqs: int = 0, in_graph_capturing: bool = False,
num_tokens_across_dp=None, num_reqs: int = 0,
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE, num_tokens_across_dp=None,
batch_descriptor=None, aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
dummy_compute_logits=lambda hidden_states: None, batch_descriptor=None,
is_profile=False) -> None: dummy_compute_logits=lambda hidden_states: None,
if ( is_profile=False,
self.pcp_size * self.dcp_size == 1 ) -> None:
and not self.speculative_config.disable_padded_drafter_batch if self.pcp_size * self.dcp_size == 1 and not self.speculative_config.disable_padded_drafter_batch:
):
super().dummy_run( super().dummy_run(
num_tokens, with_prefill, in_graph_capturing, num_reqs, num_tokens,
num_tokens_across_dp, aclgraph_runtime_mode, batch_descriptor, with_prefill,
dummy_compute_logits, is_profile in_graph_capturing,
num_reqs,
num_tokens_across_dp,
aclgraph_runtime_mode,
batch_descriptor,
dummy_compute_logits,
is_profile,
) )
return return
( (
@@ -61,14 +63,10 @@ class MtpProposer(EagleProposer):
aclgraph_runtime_mode = CUDAGraphMode.NONE aclgraph_runtime_mode = CUDAGraphMode.NONE
if aclgraph_runtime_mode == CUDAGraphMode.FULL: if aclgraph_runtime_mode == CUDAGraphMode.FULL:
if len(self.runner.attn_groups) > 0: if len(self.runner.attn_groups) > 0:
num_computed_tokens_cpu = ( num_computed_tokens_cpu = self.runner.input_batch.num_computed_tokens_cpu_tensor[:num_reqs]
self.runner.input_batch.
num_computed_tokens_cpu_tensor[:num_reqs])
common_attn_metadata = AscendCommonAttentionMetadata( common_attn_metadata = AscendCommonAttentionMetadata(
query_start_loc=self.runner.query_start_loc.gpu[:num_reqs + query_start_loc=self.runner.query_start_loc.gpu[: num_reqs + 1],
1], query_start_loc_cpu=self.runner.query_start_loc.cpu[: num_reqs + 1],
query_start_loc_cpu=self.runner.query_start_loc.
cpu[:num_reqs + 1],
seq_lens_cpu=self.runner.seq_lens.cpu, seq_lens_cpu=self.runner.seq_lens.cpu,
seq_lens=self.runner.seq_lens.gpu[:num_reqs], seq_lens=self.runner.seq_lens.gpu[:num_reqs],
num_reqs=num_reqs, num_reqs=num_reqs,
@@ -77,27 +75,29 @@ class MtpProposer(EagleProposer):
max_query_len=self.num_speculative_tokens + 1, max_query_len=self.num_speculative_tokens + 1,
num_computed_tokens_cpu=num_computed_tokens_cpu, num_computed_tokens_cpu=num_computed_tokens_cpu,
actual_seq_lengths_q=self.runner.actual_seq_lengths_q, actual_seq_lengths_q=self.runner.actual_seq_lengths_q,
block_table_tensor=self.runner.input_batch.block_table[0]. block_table_tensor=self.runner.input_batch.block_table[0].get_device_tensor(),
get_device_tensor(), slot_mapping=self.runner.input_batch.block_table[0].slot_mapping.gpu,
slot_mapping=self.runner.input_batch.block_table[0].
slot_mapping.gpu,
positions=self.runner.positions.gpu, positions=self.runner.positions.gpu,
attn_state=self.runner.attn_state, attn_state=self.runner.attn_state,
decode_token_per_req=self.runner.decode_token_per_req, decode_token_per_req=self.runner.decode_token_per_req,
max_seq_len=0) max_seq_len=0,
)
if self.pcp_size * self.dcp_size > 1: if self.pcp_size * self.dcp_size > 1:
# update long_seq related params and flatten block_table # update long_seq related params and flatten block_table
common_attn_metadata.prefill_context_parallel_metadata = \ common_attn_metadata.prefill_context_parallel_metadata = self.runner.pcp_manager.long_seq_metadata
self.runner.pcp_manager.long_seq_metadata common_attn_metadata.block_table_tensor = self.runner.input_batch.block_table[
common_attn_metadata.block_table_tensor = \ 0
self.runner.input_batch.block_table[0].get_device_tensor()[ ].get_device_tensor()[: num_reqs * self.decode_threshold]
:num_reqs * self.decode_threshold]
builder = self.runner.attn_groups[0][0].get_metadata_builder() builder = self.runner.attn_groups[0][0].get_metadata_builder()
# `AscendAttentionState.SpecDecoding` is only designed for mla, `AscendAttentionState.ChunkedPrefill` is used in self-attention. # `AscendAttentionState.SpecDecoding` is only designed for mla,
attn_state = AscendAttentionState.SpecDecoding if self.vllm_config.model_config.use_mla else AscendAttentionState.ChunkedPrefill # `AscendAttentionState.ChunkedPrefill` is used in self-attention.
attn_metadata_mtp = builder.build_for_graph_capture( attn_state = (
common_attn_metadata, attn_state) AscendAttentionState.SpecDecoding
if self.vllm_config.model_config.use_mla
else AscendAttentionState.ChunkedPrefill
)
attn_metadata_mtp = builder.build_for_graph_capture(common_attn_metadata, attn_state)
attn_metadata = {} attn_metadata = {}
for layer_name in self.attn_layer_names: for layer_name in self.attn_layer_names:
attn_metadata[layer_name] = attn_metadata_mtp attn_metadata[layer_name] = attn_metadata_mtp
@@ -113,32 +113,34 @@ class MtpProposer(EagleProposer):
if i > 0 and not in_graph_capturing and aclgraph_runtime_mode == CUDAGraphMode.FULL: if i > 0 and not in_graph_capturing and aclgraph_runtime_mode == CUDAGraphMode.FULL:
aclgraph_runtime_mode = CUDAGraphMode.NONE aclgraph_runtime_mode = CUDAGraphMode.NONE
with set_ascend_forward_context( with set_ascend_forward_context(
attn_metadata, attn_metadata,
self.vllm_config, self.vllm_config,
num_tokens=num_tokens, num_tokens=num_tokens,
num_tokens_across_dp=num_tokens_across_dp, num_tokens_across_dp=num_tokens_across_dp,
num_actual_tokens=0, num_actual_tokens=0,
aclgraph_runtime_mode=aclgraph_runtime_mode, aclgraph_runtime_mode=aclgraph_runtime_mode,
batch_descriptor=batch_descriptor, batch_descriptor=batch_descriptor,
is_draft_model=True, is_draft_model=True,
in_profile_run=is_profile): in_profile_run=is_profile,
):
if not vllm_version_is("v0.15.0"): if not vllm_version_is("v0.15.0"):
# Reset MOE layer index for each MTP step iteration # Reset MOE layer index for each MTP step iteration
forward_context = get_forward_context() forward_context = get_forward_context()
if forward_context is not None: if forward_context is not None:
forward_context.moe_layer_index = 0 forward_context.moe_layer_index = 0
previous_hidden_states, positions = self.maybe_pad_and_reduce( previous_hidden_states, positions = self.maybe_pad_and_reduce(previous_hidden_states, positions)
previous_hidden_states, positions) self.model(input_ids=input_ids, positions=positions, hidden_states=previous_hidden_states)
self.model(input_ids=input_ids,
positions=positions,
hidden_states=previous_hidden_states)
forward_context = get_forward_context() forward_context = get_forward_context()
if forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL and \ if (
not forward_context.capturing and not self.use_sparse: forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL
and not forward_context.capturing
and not self.use_sparse
):
self._update_full_graph_params(forward_context, num_tokens) self._update_full_graph_params(forward_context, num_tokens)
previous_hidden_states, positions, _ = self.maybe_all_gather_and_unpad( previous_hidden_states, positions, _ = self.maybe_all_gather_and_unpad(
previous_hidden_states, positions) previous_hidden_states, positions
)
dummy_compute_logits(previous_hidden_states) dummy_compute_logits(previous_hidden_states)
if with_prefill: if with_prefill:
break break
@@ -153,11 +155,10 @@ class MtpProposer(EagleProposer):
target_hidden_states: torch.Tensor, target_hidden_states: torch.Tensor,
# [batch_size] # [batch_size]
next_token_ids: torch.Tensor, next_token_ids: torch.Tensor,
last_token_indices: Optional[torch.Tensor], last_token_indices: torch.Tensor | None,
common_attn_metadata: CommonAttentionMetadata, common_attn_metadata: CommonAttentionMetadata,
sampling_metadata: SamplingMetadata, sampling_metadata: SamplingMetadata,
mm_embed_inputs: Optional[tuple[list[torch.Tensor], mm_embed_inputs: tuple[list[torch.Tensor], torch.Tensor] | None = None,
torch.Tensor]] = None,
req_scheduled_tokens=None, req_scheduled_tokens=None,
long_seq_metadata=None, long_seq_metadata=None,
num_prefill_reqs=0, num_prefill_reqs=0,
@@ -165,16 +166,22 @@ class MtpProposer(EagleProposer):
scheduler_output: SchedulerOutput = None, scheduler_output: SchedulerOutput = None,
num_scheduled_tokens: int = 0, num_scheduled_tokens: int = 0,
) -> torch.Tensor: ) -> torch.Tensor:
if ( if self.pcp_size * self.dcp_size == 1 and not self.speculative_config.disable_padded_drafter_batch:
self.pcp_size * self.dcp_size == 1
and not self.speculative_config.disable_padded_drafter_batch
):
draft_token_ids = super()._propose( draft_token_ids = super()._propose(
target_token_ids, target_positions, target_hidden_states, target_token_ids,
next_token_ids, last_token_indices, common_attn_metadata, target_positions,
sampling_metadata, mm_embed_inputs, req_scheduled_tokens, target_hidden_states,
long_seq_metadata, num_prefill_reqs, num_decode_reqs, next_token_ids,
scheduler_output, num_scheduled_tokens last_token_indices,
common_attn_metadata,
sampling_metadata,
mm_embed_inputs,
req_scheduled_tokens,
long_seq_metadata,
num_prefill_reqs,
num_decode_reqs,
scheduler_output,
num_scheduled_tokens,
) )
return draft_token_ids return draft_token_ids
@@ -186,13 +193,12 @@ class MtpProposer(EagleProposer):
if self.method == "eagle3": if self.method == "eagle3":
assert isinstance(self.model, Eagle3LlamaForCausalLM) assert isinstance(self.model, Eagle3LlamaForCausalLM)
target_hidden_states = self.model.combine_hidden_states( target_hidden_states = self.model.combine_hidden_states(target_hidden_states)
target_hidden_states)
assert target_hidden_states.shape[-1] == self.hidden_size assert target_hidden_states.shape[-1] == self.hidden_size
# Shift the input ids by one token. # Shift the input ids by one token.
# E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3] # E.g., [a1, b1, b2, c1, c2, c3] -> [b1, b2, c1, c2, c3, c3]
self.input_ids[:num_tokens - 1] = target_token_ids[1:] self.input_ids[: num_tokens - 1] = target_token_ids[1:]
# Replace the last token with the next token. # Replace the last token with the next token.
# E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4] # E.g., [b1, b2, c1, c2, c3, c3] -> [a2, b2, b3, c2, c3, c4]
self.input_ids[last_token_indices] = next_token_ids self.input_ids[last_token_indices] = next_token_ids
@@ -213,20 +219,16 @@ class MtpProposer(EagleProposer):
num_tokens_d_padded = num_tokens_d * self.pcp_size num_tokens_d_padded = num_tokens_d * self.pcp_size
input_ids_d = self.input_ids[:num_tokens_d] input_ids_d = self.input_ids[:num_tokens_d]
input_ids_p = self.input_ids[num_tokens_d:num_tokens] input_ids_p = self.input_ids[num_tokens_d:num_tokens]
target_hidden_states_d_padded = \ target_hidden_states_d_padded = target_hidden_states[:num_tokens_d_padded]
target_hidden_states[:num_tokens_d_padded]
if num_tokens_d: if num_tokens_d:
# remove padding (from pcp all-gather) in decode part # remove padding (from pcp all-gather) in decode part
mask_start_loc = torch.cat([ mask_start_loc = torch.cat(
torch.tensor([0], dtype=torch.int32), [torch.tensor([0], dtype=torch.int32), torch.cumsum(query_lens_d * self.pcp_size, dim=0)[:-1]]
torch.cumsum(query_lens_d * self.pcp_size, dim=0)[:-1] )
])
mask_len = query_lens_d mask_len = query_lens_d
mask = [] mask = []
for req_id in range(num_decode_reqs): for req_id in range(num_decode_reqs):
mask += list( mask += list(range(mask_start_loc[req_id], mask_start_loc[req_id] + mask_len[req_id]))
range(mask_start_loc[req_id],
mask_start_loc[req_id] + mask_len[req_id]))
target_hidden_states_d = target_hidden_states_d_padded[mask] target_hidden_states_d = target_hidden_states_d_padded[mask]
else: else:
target_hidden_states_d = target_hidden_states_d_padded target_hidden_states_d = target_hidden_states_d_padded
@@ -234,46 +236,33 @@ class MtpProposer(EagleProposer):
req_scheduled_tokens_p = {} req_scheduled_tokens_p = {}
for i, req_id in enumerate(self.runner.input_batch.req_ids): for i, req_id in enumerate(self.runner.input_batch.req_ids):
if i >= num_decode_reqs: if i >= num_decode_reqs:
req_scheduled_tokens_p[req_id] = \ req_scheduled_tokens_p[req_id] = req_scheduled_tokens[req_id]
req_scheduled_tokens[req_id] (num_tokens_p, input_ids_p, target_hidden_states_p, max_query_len_p, seq_lens_p, cu_num_tokens_p) = (
(num_tokens_p, input_ids_p, target_hidden_states_p, self._split_pcp_input(req_scheduled_tokens_p, input_ids_p, target_hidden_states_p)
max_query_len_p, seq_lens_p, cu_num_tokens_p) = \ )
self._split_pcp_input(
req_scheduled_tokens_p, input_ids_p, target_hidden_states_p)
num_tokens = num_tokens_d + num_tokens_p num_tokens = num_tokens_d + num_tokens_p
target_positions = target_positions[:num_tokens] target_positions = target_positions[:num_tokens]
self.input_ids[:num_tokens].copy_( self.input_ids[:num_tokens].copy_(torch.cat([input_ids_d, input_ids_p], dim=0))
torch.cat([input_ids_d, input_ids_p], dim=0)) target_hidden_states = torch.cat([target_hidden_states_d, target_hidden_states_p], dim=0)
target_hidden_states = torch.cat(
[target_hidden_states_d, target_hidden_states_p], dim=0)
# 2. update sample_indices according to main model # 2. update sample_indices according to main model
if num_decode_reqs: if num_decode_reqs:
last_token_indices[:num_decode_reqs] = \ last_token_indices[:num_decode_reqs] = self.runner.logits_indices[last_token_indices[:num_decode_reqs]]
self.runner.logits_indices[last_token_indices[:num_decode_reqs]]
if num_prefill_reqs: if num_prefill_reqs:
last_token_indices[-num_prefill_reqs:] = \ last_token_indices[-num_prefill_reqs:] = self.runner.logits_indices[-num_prefill_reqs:]
self.runner.logits_indices[-num_prefill_reqs:]
# 3. update attn_metadata params that may be influenced by pcp # 3. update attn_metadata params that may be influenced by pcp
common_attn_metadata.num_actual_tokens = num_tokens common_attn_metadata.num_actual_tokens = num_tokens
common_attn_metadata.max_query_len = max( common_attn_metadata.max_query_len = max(self.decode_threshold, max_query_len_p)
self.decode_threshold, max_query_len_p)
common_attn_metadata.seq_lens[-num_prefill_reqs:] = seq_lens_p common_attn_metadata.seq_lens[-num_prefill_reqs:] = seq_lens_p
common_attn_metadata.seq_lens_cpu[ common_attn_metadata.seq_lens_cpu[-num_prefill_reqs:] = seq_lens_p
-num_prefill_reqs:] = seq_lens_p query_start_loc_p = cu_num_tokens_p[1:] + common_attn_metadata.query_start_loc[num_decode_reqs].item()
query_start_loc_p = cu_num_tokens_p[1:] + \ common_attn_metadata.query_start_loc[-num_prefill_reqs:] = query_start_loc_p
common_attn_metadata.query_start_loc[num_decode_reqs].item() common_attn_metadata.query_start_loc_cpu[-num_prefill_reqs:] = query_start_loc_p
common_attn_metadata.query_start_loc[-num_prefill_reqs:] = \
query_start_loc_p
common_attn_metadata.query_start_loc_cpu[-num_prefill_reqs:] = \
query_start_loc_p
assert self.runner is not None assert self.runner is not None
# Note(qcs): We may need to refactor these check logics. # Note(qcs): We may need to refactor these check logics.
if self.use_cuda_graph and num_scheduled_tokens <= self.runner.cudagraph_batch_sizes[ if self.use_cuda_graph and num_scheduled_tokens <= self.runner.cudagraph_batch_sizes[-1]:
-1]: num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[num_scheduled_tokens]
num_input_tokens = self.runner.cudagraph_dispatcher._bs_to_padded_graph_size[
num_scheduled_tokens]
else: else:
# Eager mode, no padding needed # Eager mode, no padding needed
num_input_tokens = num_tokens num_input_tokens = num_tokens
@@ -282,23 +271,23 @@ class MtpProposer(EagleProposer):
self._set_positions(num_tokens, target_positions) self._set_positions(num_tokens, target_positions)
self.hidden_states[:num_tokens] = target_hidden_states self.hidden_states[:num_tokens] = target_hidden_states
# eager/acl piecewise mode need to update num_tokens_across_dp # eager/acl piecewise mode need to update num_tokens_across_dp
(num_input_tokens, num_tokens_across_dp, (num_input_tokens, num_tokens_across_dp, with_prefill) = self.runner._sync_metadata_across_dp(
with_prefill) = self.runner._sync_metadata_across_dp( num_input_tokens, self.runner.with_prefill
num_input_tokens, self.runner.with_prefill) )
# Enable shared_expert_dp and MTP FULL graph may cause accuracy issues. # Enable shared_expert_dp and MTP FULL graph may cause accuracy issues.
if scheduler_output and not self.enable_shared_expert_dp: if scheduler_output and not self.enable_shared_expert_dp:
max_query_len = common_attn_metadata.max_query_len max_query_len = common_attn_metadata.max_query_len
uniform_decode = (max_query_len in list( uniform_decode = (max_query_len in list(range(1, self.num_speculative_tokens + 2))) and (
range(1, self.num_speculative_tokens + scheduler_output.total_num_scheduled_tokens
2))) and (scheduler_output.total_num_scheduled_tokens == self.runner.input_batch.num_reqs * (self.num_speculative_tokens + 1)
== self.runner.input_batch.num_reqs * )
(self.num_speculative_tokens + 1))
else: else:
uniform_decode = False uniform_decode = False
has_lora = len(self.runner.input_batch.lora_id_to_lora_request) > 0 has_lora = len(self.runner.input_batch.lora_id_to_lora_request) > 0
aclgraph_runtime_mode, batch_descriptor = \ aclgraph_runtime_mode, batch_descriptor = self.runner.cudagraph_dispatcher.dispatch(
self.runner.cudagraph_dispatcher.dispatch(num_tokens=num_input_tokens, uniform_decode=uniform_decode, has_lora=has_lora) num_tokens=num_input_tokens, uniform_decode=uniform_decode, has_lora=has_lora
)
if not self.use_cuda_graph: if not self.use_cuda_graph:
# there is synchronization between mtp steps when enabling aclgraph, # there is synchronization between mtp steps when enabling aclgraph,
# disable aclgraph when use async scheduling to avoid the # disable aclgraph when use async scheduling to avoid the
@@ -307,8 +296,10 @@ class MtpProposer(EagleProposer):
# and _propose. # and _propose.
aclgraph_runtime_mode = CUDAGraphMode.NONE aclgraph_runtime_mode = CUDAGraphMode.NONE
if self.vllm_config.compilation_config.cudagraph_mode.has_full_cudagraphs( if (
) and aclgraph_runtime_mode == CUDAGraphMode.FULL: self.vllm_config.compilation_config.cudagraph_mode.has_full_cudagraphs()
and aclgraph_runtime_mode == CUDAGraphMode.FULL
):
graph_pad_size = num_input_tokens graph_pad_size = num_input_tokens
else: else:
graph_pad_size = -1 graph_pad_size = -1
@@ -319,64 +310,58 @@ class MtpProposer(EagleProposer):
common_attn_metadata.graph_pad_size = graph_pad_size common_attn_metadata.graph_pad_size = graph_pad_size
common_attn_metadata.num_input_tokens = num_input_tokens common_attn_metadata.num_input_tokens = num_input_tokens
builder = self.runner.attn_groups[0][0].get_metadata_builder() builder = self.runner.attn_groups[0][0].get_metadata_builder()
attn_metadata_mtp = builder.build(0, common_attn_metadata, attn_metadata_mtp = builder.build(0, common_attn_metadata, self.runner.get_model())
self.runner.get_model())
attn_metadata = {} attn_metadata = {}
for layer_name in self.attn_layer_names: for layer_name in self.attn_layer_names:
attn_metadata[layer_name] = attn_metadata_mtp attn_metadata[layer_name] = attn_metadata_mtp
for step in range(self.num_speculative_tokens): for step in range(self.num_speculative_tokens):
with set_ascend_forward_context( with set_ascend_forward_context(
attn_metadata, attn_metadata,
self.vllm_config, self.vllm_config,
num_tokens=num_input_tokens, num_tokens=num_input_tokens,
num_tokens_across_dp=num_tokens_across_dp, num_tokens_across_dp=num_tokens_across_dp,
aclgraph_runtime_mode=aclgraph_runtime_mode, aclgraph_runtime_mode=aclgraph_runtime_mode,
batch_descriptor=batch_descriptor, batch_descriptor=batch_descriptor,
num_actual_tokens=num_tokens, num_actual_tokens=num_tokens,
is_draft_model=True): is_draft_model=True,
):
if not vllm_version_is("v0.15.0"): if not vllm_version_is("v0.15.0"):
# Reset MOE layer index for each MTP step to match all_moe_layers registration # Reset MOE layer index for each MTP step to match all_moe_layers registration
forward_context = get_forward_context() forward_context = get_forward_context()
if forward_context is not None: if forward_context is not None:
forward_context.moe_layer_index = 0 forward_context.moe_layer_index = 0
with record_function_or_nullcontext('mtp_forward'): with record_function_or_nullcontext("mtp_forward"):
model_kwargs = {} model_kwargs = {}
model_kwargs["attn_metadata"] = attn_metadata model_kwargs["attn_metadata"] = attn_metadata
input_ids = self.input_ids[:num_input_tokens] input_ids = self.input_ids[:num_input_tokens]
positions = self._get_positions(num_input_tokens) positions = self._get_positions(num_input_tokens)
hidden_states = self.hidden_states[:num_input_tokens] hidden_states = self.hidden_states[:num_input_tokens]
hidden_states, positions = self.maybe_pad_and_reduce( hidden_states, positions = self.maybe_pad_and_reduce(hidden_states, positions)
hidden_states, positions)
hidden_states = self.model(input_ids=input_ids, hidden_states = self.model(input_ids=input_ids, positions=positions, hidden_states=hidden_states)
positions=positions, forward_context = get_forward_context()
hidden_states=hidden_states) if forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL and not self.use_sparse:
forward_context = get_forward_context() self._update_full_graph_params(forward_context, num_input_tokens)
if forward_context.cudagraph_runtime_mode == CUDAGraphMode.FULL and not self.use_sparse:
self._update_full_graph_params(forward_context,
num_input_tokens)
hidden_states, positions, _ = self.maybe_all_gather_and_unpad( hidden_states, positions, _ = self.maybe_all_gather_and_unpad(hidden_states, positions)
hidden_states, positions)
num_indices = last_token_indices.shape[0] num_indices = last_token_indices.shape[0]
if lmhead_tp_enable(): if lmhead_tp_enable():
max_num_reqs_across_dp = self.vllm_config.scheduler_config.max_num_seqs * self.runner.uniform_decode_query_len max_num_reqs_across_dp = (
last_token_indices = nn.functional.pad( self.vllm_config.scheduler_config.max_num_seqs * self.runner.uniform_decode_query_len
last_token_indices, )
(0, max_num_reqs_across_dp - num_indices)) last_token_indices = nn.functional.pad(last_token_indices, (0, max_num_reqs_across_dp - num_indices))
if self.pcp_size > 1 and step == 0: if self.pcp_size > 1 and step == 0:
# remove graph padding before all_gather # remove graph padding before all_gather
hidden_states = hidden_states[:num_tokens] hidden_states = hidden_states[:num_tokens]
hidden_states = get_pcp_group().all_gather(hidden_states, 0) hidden_states = get_pcp_group().all_gather(hidden_states, 0)
hidden_states = torch.index_select( hidden_states = torch.index_select(
hidden_states, 0, self.runner.pcp_manager. hidden_states, 0, self.runner.pcp_manager.pcp_allgather_restore_idx.gpu[: hidden_states.shape[0]]
pcp_allgather_restore_idx.gpu[:hidden_states.shape[0]]) )
sample_hidden_states = hidden_states[last_token_indices] sample_hidden_states = hidden_states[last_token_indices]
logits = self.model.compute_logits(sample_hidden_states) logits = self.model.compute_logits(sample_hidden_states)
@@ -409,7 +394,7 @@ class MtpProposer(EagleProposer):
hidden_states = hidden_states[last_token_indices] hidden_states = hidden_states[last_token_indices]
slot_mapping = attn_metadata_i.slot_mapping[last_token_indices] slot_mapping = attn_metadata_i.slot_mapping[last_token_indices]
attn_metadata_i.slot_mapping.fill_(-1) attn_metadata_i.slot_mapping.fill_(-1)
attn_metadata_i.query_start_loc = self.arange[:batch_size + 1] attn_metadata_i.query_start_loc = self.arange[: batch_size + 1]
last_token_indices = self.arange[:batch_size] last_token_indices = self.arange[:batch_size]
if getattr(attn_metadata_i, "num_decode_tokens", 0): if getattr(attn_metadata_i, "num_decode_tokens", 0):
attn_metadata_i.num_decode_tokens = batch_size attn_metadata_i.num_decode_tokens = batch_size
@@ -420,44 +405,44 @@ class MtpProposer(EagleProposer):
# Instead, we pre-allocate mtp slot_mapping in model_runner # Instead, we pre-allocate mtp slot_mapping in model_runner
# (_generate_pcp_mtp_input), and use updated slot_indices # (_generate_pcp_mtp_input), and use updated slot_indices
# to get corresponding slot_mapping in each step. # to get corresponding slot_mapping in each step.
num_reject_tokens = torch.tensor( num_reject_tokens = (
self.runner.pcp_manager.cu_num_tokens_pcp_full, torch.tensor(self.runner.pcp_manager.cu_num_tokens_pcp_full, dtype=torch.int32).to(self.device)
dtype=torch.int32).to( - ori_last_token_indices
self.device) - ori_last_token_indices - 1 - 1
num_accept_tokens = \ )
query_lens_d.to(self.device) - num_reject_tokens num_accept_tokens = query_lens_d.to(self.device) - num_reject_tokens
ori_seq_len = attn_metadata_i.seq_lens ori_seq_len = attn_metadata_i.seq_lens
mtp_slot_mapping = self.runner.pcp_manager.mtp_slot_pad mtp_slot_mapping = self.runner.pcp_manager.mtp_slot_pad
# slot_mapping index base offset: # slot_mapping index base offset:
# scheduled tokens + pre-allocated mtp tokens + accepted tokens # scheduled tokens + pre-allocated mtp tokens + accepted tokens
slot_idx_base = ( slot_idx_base = (
torch.cat([ torch.cat(
torch.tensor( [
[0], dtype=torch.int32, device=self.device), torch.tensor([0], dtype=torch.int32, device=self.device),
(torch.cumsum(query_lens_d, dim=0)[:-1] * (torch.cumsum(query_lens_d, dim=0)[:-1] * self.pcp_size).to(self.device),
self.pcp_size).to(self.device) ]
]) + )
torch.arange(num_decode_reqs, device=self.device) * + torch.arange(num_decode_reqs, device=self.device)
(self.num_speculative_tokens - 1) * self.pcp_size + * (self.num_speculative_tokens - 1)
(num_accept_tokens - 1) * self.pcp_size) * self.pcp_size
+ (num_accept_tokens - 1) * self.pcp_size
)
slot_indices_list = [] slot_indices_list = []
for req_id in range(num_decode_reqs): for req_id in range(num_decode_reqs):
slot_indices_list.append( slot_indices_list.append(
torch.arange(slot_idx_base[req_id], torch.arange(
slot_idx_base[req_id] + self.pcp_size, slot_idx_base[req_id], slot_idx_base[req_id] + self.pcp_size, device=self.device
device=self.device)) )
)
slot_indices = torch.cat(slot_indices_list, dim=0) slot_indices = torch.cat(slot_indices_list, dim=0)
# fold block_table (restore it to original size before flattened) # fold block_table (restore it to original size before flattened)
block_indices = torch.cat([ block_indices = torch.cat(
torch.tensor([0], dtype=torch.int32), [torch.tensor([0], dtype=torch.int32), torch.cumsum(query_lens_d, dim=0)[:-1]]
torch.cumsum(query_lens_d, dim=0)[:-1] )
]) attn_metadata_i.decode.block_table[:batch_size] = attn_metadata_i.decode.block_table[block_indices]
attn_metadata_i.decode.block_table[:batch_size] = \ attn_metadata_i.decode.block_table = attn_metadata_i.decode.block_table[:batch_size]
attn_metadata_i.decode.block_table[block_indices]
attn_metadata_i.decode.block_table = \
attn_metadata_i.decode.block_table[:batch_size]
input_ids = draft_token_ids_list[-1].int() input_ids = draft_token_ids_list[-1].int()
positions += 1 positions += 1
@@ -465,38 +450,32 @@ class MtpProposer(EagleProposer):
decode_metadata = getattr(attn_metadata_i, "decode", None) decode_metadata = getattr(attn_metadata_i, "decode", None)
prefill_metadata = getattr(attn_metadata_i, "prefill", None) prefill_metadata = getattr(attn_metadata_i, "prefill", None)
# When disable_padded_drafter_batch=False, it should not to be updating these params, maybe. # When disable_padded_drafter_batch=False, it should not to be updating these params, maybe.
if decode_metadata is not None and (self.speculative_config.disable_padded_drafter_batch or \ if decode_metadata is not None and (
aclgraph_runtime_mode != CUDAGraphMode.FULL): self.speculative_config.disable_padded_drafter_batch or aclgraph_runtime_mode != CUDAGraphMode.FULL
decode_metadata.actual_seq_lengths_q = self.arange_cpu[ ):
1:batch_size + 1].tolist() decode_metadata.actual_seq_lengths_q = self.arange_cpu[1 : batch_size + 1].tolist()
if aclgraph_runtime_mode == CUDAGraphMode.FULL: if aclgraph_runtime_mode == CUDAGraphMode.FULL:
decode_metadata.actual_seq_lengths_q = \ decode_metadata.actual_seq_lengths_q = builder.pad_actual_seq_len_q_mtp_disable_pad(
builder.pad_actual_seq_len_q_mtp_disable_pad( graph_pad_size - batch_size, batch_size, decode_metadata.actual_seq_lengths_q
graph_pad_size - batch_size, )
batch_size, decode_metadata.cos, decode_metadata.sin = get_cos_and_sin_mla(positions[:batch_size])
decode_metadata.actual_seq_lengths_q)
decode_metadata.cos, decode_metadata.sin = get_cos_and_sin_mla(
positions[:batch_size])
# NOTE(woosuk): We should handle the case where the draft model # NOTE(woosuk): We should handle the case where the draft model
# generates tokens beyond the max model length. Since it is complex # generates tokens beyond the max model length. Since it is complex
# to remove such requests from the batch, we keep them in the batch # to remove such requests from the batch, we keep them in the batch
# but adjust the position ids and slot mappings to avoid the # but adjust the position ids and slot mappings to avoid the
# out-of-range access during the model execution. The draft tokens # out-of-range access during the model execution. The draft tokens
# generated with this adjustment should be ignored. # generated with this adjustment should be ignored.
exceeds_max_model_len = positions[: exceeds_max_model_len = positions[:batch_size] >= self.runner.model_config.max_model_len
batch_size] >= self.runner.model_config.max_model_len
# Mask out the position ids that exceed the max model length. # Mask out the position ids that exceed the max model length.
# Otherwise, we may get out-of-range error in RoPE. # Otherwise, we may get out-of-range error in RoPE.
clamped_positions = torch.where(exceeds_max_model_len, 0, clamped_positions = torch.where(exceeds_max_model_len, 0, positions[:batch_size])
positions[:batch_size])
# Increment the sequence lengths. # Increment the sequence lengths.
# This is an out-of-place operation to avoid modifying the original tensor # This is an out-of-place operation to avoid modifying the original tensor
# when enable async_scheduling. # when enable async_scheduling.
attn_metadata_i.seq_lens = attn_metadata_i.seq_lens + 1 attn_metadata_i.seq_lens = attn_metadata_i.seq_lens + 1
# For the requests that exceed the max model length, we set the # For the requests that exceed the max model length, we set the
# sequence length to 1 to minimize their overheads in attention. # sequence length to 1 to minimize their overheads in attention.
exceeds_mask = attn_metadata_i.seq_lens[:batch_size] > \ exceeds_mask = attn_metadata_i.seq_lens[:batch_size] > self.runner.model_config.max_model_len
self.runner.model_config.max_model_len
attn_metadata_i.seq_lens[:batch_size].masked_fill_(exceeds_mask, 1) attn_metadata_i.seq_lens[:batch_size].masked_fill_(exceeds_mask, 1)
# Mask out the slot mappings that exceed the max model length. # Mask out the slot mappings that exceed the max model length.
# Otherwise, the KV cache will be inadvertently updated with the # Otherwise, the KV cache will be inadvertently updated with the
@@ -504,13 +483,14 @@ class MtpProposer(EagleProposer):
slot_mapping += 1 slot_mapping += 1
if self.pcp_size > 1: if self.pcp_size > 1:
exceeds_max_model_len = exceeds_max_model_len.repeat_interleave( exceeds_max_model_len = exceeds_max_model_len.repeat_interleave(
slot_mapping.size(0) // exceeds_max_model_len.size(0)) slot_mapping.size(0) // exceeds_max_model_len.size(0)
)
slot_mapping.masked_fill_(exceeds_max_model_len, PADDING_SLOT_ID) slot_mapping.masked_fill_(exceeds_max_model_len, PADDING_SLOT_ID)
# copy inputs to buffer for cudagraph # copy inputs to buffer for cudagraph
self.input_ids[:batch_size] = input_ids self.input_ids[:batch_size] = input_ids
self._set_positions(batch_size, clamped_positions) self._set_positions(batch_size, clamped_positions)
self.hidden_states[:hidden_states.shape[0]] = hidden_states self.hidden_states[: hidden_states.shape[0]] = hidden_states
if self.pcp_size * self.dcp_size > 1: if self.pcp_size * self.dcp_size > 1:
# update local seq_len # update local seq_len
num_computed_tokens_of_pcp_dcp = self.runner.pcp_manager._get_cp_local_seq_lens( num_computed_tokens_of_pcp_dcp = self.runner.pcp_manager._get_cp_local_seq_lens(
@@ -519,19 +499,17 @@ class MtpProposer(EagleProposer):
self.dcp_size, self.dcp_size,
self.runner.parallel_config.cp_kv_cache_interleave_size, self.runner.parallel_config.cp_kv_cache_interleave_size,
) )
cp_seq_len = \ cp_seq_len = num_computed_tokens_of_pcp_dcp[:, self.pcp_rank, self.dcp_rank]
num_computed_tokens_of_pcp_dcp[:, self.pcp_rank, self.dcp_rank]
attn_metadata_i.decode.cp_seq_len = cp_seq_len attn_metadata_i.decode.cp_seq_len = cp_seq_len
# update slot_mapping # update slot_mapping
slot_indices += self.pcp_size slot_indices += self.pcp_size
slot_mapping = mtp_slot_mapping[slot_indices] slot_mapping = mtp_slot_mapping[slot_indices]
attn_metadata_i.slot_mapping[:batch_size * attn_metadata_i.slot_mapping[: batch_size * self.pcp_size] = slot_mapping
self.pcp_size] = slot_mapping
else: else:
attn_metadata_i.slot_mapping[:batch_size] = slot_mapping attn_metadata_i.slot_mapping[:batch_size] = slot_mapping
if self.speculative_config.disable_padded_drafter_batch: if self.speculative_config.disable_padded_drafter_batch:
if self.uses_mrope: if self.uses_mrope:
self.mrope_positions[:, batch_size:num_input_tokens] = 0 self.mrope_positions[:, batch_size:num_input_tokens] = 0
else: else:
self.positions[batch_size:num_input_tokens] = 0 self.positions[batch_size:num_input_tokens] = 0
self.input_ids[batch_size:num_input_tokens] = 0 self.input_ids[batch_size:num_input_tokens] = 0
@@ -539,31 +517,24 @@ class MtpProposer(EagleProposer):
if prefill_metadata is not None: if prefill_metadata is not None:
prefill_metadata.seq_lens = attn_metadata_i.seq_lens prefill_metadata.seq_lens = attn_metadata_i.seq_lens
prefill_metadata.seq_lens_list = prefill_metadata.seq_lens.tolist( prefill_metadata.seq_lens_list = prefill_metadata.seq_lens.tolist()
)
prefill_metadata.context_lens = attn_metadata_i.seq_lens prefill_metadata.context_lens = attn_metadata_i.seq_lens
prefill_metadata.input_positions = self._get_positions( prefill_metadata.input_positions = self._get_positions(num_input_tokens)
num_input_tokens)
prefill_metadata.max_seq_lens += 1 prefill_metadata.max_seq_lens += 1
prefill_metadata.max_seq_lens = min( prefill_metadata.max_seq_lens = min(
prefill_metadata.max_seq_lens, prefill_metadata.max_seq_lens, self.runner.model_config.max_model_len
self.runner.model_config.max_model_len) )
if decode_metadata is not None: if decode_metadata is not None:
decode_metadata.seq_lens = attn_metadata_i.seq_lens decode_metadata.seq_lens = attn_metadata_i.seq_lens
decode_metadata.seq_lens_list = decode_metadata.seq_lens.tolist( decode_metadata.seq_lens_list = decode_metadata.seq_lens.tolist()
)
decode_seq_lens_list = decode_metadata.seq_lens_list decode_seq_lens_list = decode_metadata.seq_lens_list
if aclgraph_runtime_mode == CUDAGraphMode.FULL and \ if aclgraph_runtime_mode == CUDAGraphMode.FULL and self.speculative_config.disable_padded_drafter_batch:
self.speculative_config.disable_padded_drafter_batch: decode_metadata.seq_lens_list = decode_seq_lens_list + [0] * (
decode_metadata.seq_lens_list = decode_seq_lens_list + [ graph_pad_size - len(decode_seq_lens_list)
0 )
] * (graph_pad_size - len(decode_seq_lens_list)) decode_metadata.input_positions = self._get_positions(num_input_tokens)
decode_metadata.input_positions = self._get_positions(
num_input_tokens)
decode_metadata.max_seq_lens += 1 decode_metadata.max_seq_lens += 1
decode_metadata.max_seq_lens = min( decode_metadata.max_seq_lens = min(decode_metadata.max_seq_lens, self.runner.model_config.max_model_len)
decode_metadata.max_seq_lens,
self.runner.model_config.max_model_len)
# mtp>1: [batch_size, k] # mtp>1: [batch_size, k]
draft_token_ids = torch.stack(draft_token_ids_list, dim=1) draft_token_ids = torch.stack(draft_token_ids_list, dim=1)

View File

@@ -1,13 +1,11 @@
import torch import torch
from vllm.config import CUDAGraphMode from vllm.config import CUDAGraphMode
from vllm.v1.spec_decode.ngram_proposer import \ from vllm.v1.spec_decode.ngram_proposer import NgramProposer as VllmNgramProposer
NgramProposer as VllmNgramProposer
from vllm_ascend.spec_decode.interface import Proposer, SpecDcodeType from vllm_ascend.spec_decode.interface import Proposer, SpecDcodeType
class NgramProposer(VllmNgramProposer, Proposer): class NgramProposer(VllmNgramProposer, Proposer):
def __init__(self, vllm_config, device, runner): def __init__(self, vllm_config, device, runner):
super().__init__(vllm_config) super().__init__(vllm_config)
self.name = SpecDcodeType.NGRAM self.name = SpecDcodeType.NGRAM
@@ -19,27 +17,31 @@ class NgramProposer(VllmNgramProposer, Proposer):
pass pass
@torch.inference_mode() @torch.inference_mode()
def dummy_run(self, def dummy_run(
num_tokens, self,
with_prefill=None, num_tokens,
in_graph_capturing=None, with_prefill=None,
num_reqs=None, in_graph_capturing=None,
num_tokens_across_dp=None, num_reqs=None,
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE, num_tokens_across_dp=None,
batch_descriptor=None, aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
dummy_compute_logits=lambda hidden_states: None, batch_descriptor=None,
is_profile=False): dummy_compute_logits=lambda hidden_states: None,
is_profile=False,
):
pass pass
def generate_token_ids(self, def generate_token_ids(
valid_sampled_token_ids, self,
sampling_metadata=None, valid_sampled_token_ids,
scheduler_output=None, sampling_metadata=None,
spec_decode_metadata=None, scheduler_output=None,
positions=None, spec_decode_metadata=None,
num_scheduled_tokens=None, positions=None,
hidden_states=None, num_scheduled_tokens=None,
aux_hidden_states=None) -> list[list[int]]: hidden_states=None,
aux_hidden_states=None,
) -> list[list[int]]:
valid_ngram_requests = [] valid_ngram_requests = []
for i, sampled_ids in enumerate(valid_sampled_token_ids): for i, sampled_ids in enumerate(valid_sampled_token_ids):
num_sampled_ids = len(sampled_ids) num_sampled_ids = len(sampled_ids)
@@ -57,8 +59,7 @@ class NgramProposer(VllmNgramProposer, Proposer):
start_idx = self.runner.input_batch.num_tokens_no_spec[i] start_idx = self.runner.input_batch.num_tokens_no_spec[i]
end_idx = start_idx + num_sampled_ids end_idx = start_idx + num_sampled_ids
self.runner.input_batch.token_ids_cpu[ self.runner.input_batch.token_ids_cpu[i, start_idx:end_idx] = sampled_ids
i, start_idx:end_idx] = sampled_ids
valid_ngram_requests.append(i) valid_ngram_requests.append(i)

View File

@@ -1,13 +1,11 @@
import torch import torch
from vllm.config import CUDAGraphMode from vllm.config import CUDAGraphMode
from vllm.v1.spec_decode.suffix_decoding import \ from vllm.v1.spec_decode.suffix_decoding import SuffixDecodingProposer as VllmSuffixDecodingProposer
SuffixDecodingProposer as VllmSuffixDecodingProposer
from vllm_ascend.spec_decode.interface import Proposer, SpecDcodeType from vllm_ascend.spec_decode.interface import Proposer, SpecDcodeType
class SuffixDecodingProposer(VllmSuffixDecodingProposer, Proposer): class SuffixDecodingProposer(VllmSuffixDecodingProposer, Proposer):
def __init__(self, vllm_config, device, runner): def __init__(self, vllm_config, device, runner):
super().__init__(vllm_config) super().__init__(vllm_config)
self.name = SpecDcodeType.SUFFIX self.name = SpecDcodeType.SUFFIX
@@ -19,27 +17,30 @@ class SuffixDecodingProposer(VllmSuffixDecodingProposer, Proposer):
pass pass
@torch.inference_mode() @torch.inference_mode()
def dummy_run(self, def dummy_run(
num_tokens, self,
with_prefill=None, num_tokens,
in_graph_capturing=None, with_prefill=None,
num_reqs=None, in_graph_capturing=None,
num_tokens_across_dp=None, num_reqs=None,
aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE, num_tokens_across_dp=None,
batch_descriptor=None, aclgraph_runtime_mode: CUDAGraphMode = CUDAGraphMode.NONE,
dummy_compute_logits=lambda hidden_states: None, batch_descriptor=None,
is_profile=False): dummy_compute_logits=lambda hidden_states: None,
is_profile=False,
):
pass pass
def generate_token_ids(self, def generate_token_ids(
valid_sampled_token_ids, self,
sampling_metadata=None, valid_sampled_token_ids,
scheduler_output=None, sampling_metadata=None,
spec_decode_metadata=None, scheduler_output=None,
positions=None, spec_decode_metadata=None,
num_scheduled_tokens=None, positions=None,
hidden_states=None, num_scheduled_tokens=None,
aux_hidden_states=None) -> list[list[int]]: hidden_states=None,
draft_token_ids = self.propose(self.runner.input_batch, aux_hidden_states=None,
valid_sampled_token_ids) ) -> list[list[int]]:
draft_token_ids = self.propose(self.runner.input_batch, valid_sampled_token_ids)
return draft_token_ids return draft_token_ids