support deepseek quant & mix-parallel with graphmode (#585)

### What this PR does / why we need it?
1. support deepseek with w8a8 quant;
2. support deepseek with mix-parallel(multi-DP, EP+TP);
3. support deepseek with graphmode.
---------

Signed-off-by: wen-jie666 <wenjie39@huawei.com>
Signed-off-by: Yizhou Liu <liuyizhou5@h-partners.com>
Signed-off-by: libaokui <libaokui@huawei.com>
Signed-off-by: linfeng-yuan <1102311262@qq.com>
Co-authored-by: wen-jie666 <wenjie39@huawei.com>
This commit is contained in:
zzzzwwjj
2025-04-23 16:23:25 +08:00
committed by GitHub
parent e74331a1ed
commit 5c6d05a59e
13 changed files with 520 additions and 221 deletions

View File

@@ -17,53 +17,66 @@
# limitations under the License.
#
from typing import List, Tuple
from typing import Any, List
import torch
from vllm.config import get_current_vllm_config
from vllm.utils import is_pin_memory_available
from vllm.worker.cache_engine import CacheEngine
from vllm_ascend.utils import VLLM_ENABLE_GRAPH_MODE
def allocate_kv_cache(
self,
num_blocks: int,
device: str,
) -> List[Tuple]:
) -> List[Any]:
"""Allocates KV cache on the specified device."""
kv_cache_shape = self.attn_backend.get_kv_cache_shape(
num_blocks, self.block_size, self.num_kv_heads, self.head_size)
pin_memory = is_pin_memory_available() if device == "cpu" else False
kv_cache: List[Tuple] = []
kv_cache: List[Any] = []
# Align entries so they are 256 byte aligned for better performance
# Primarily targets MLA as this typically only ends up having entries
# be 128 byte aligned.
alloc_shape = kv_cache_shape
additional_config = get_current_vllm_config().additional_config
if additional_config and additional_config.get("enable_graph_mode", False):
# Align entries so they are 256 byte aligned for better performance
# Primarily targets MLA as this typically only ends up having entries
# be 128 byte aligned.
alloc_shape = kv_cache_shape
for _ in range(self.num_attention_layers):
# null block in CpuGpuBlockAllocator requires at least that
# block to be zeroed-out.
# We zero-out everything for simplicity.
layer_kv_cache_nope = torch.zeros(
alloc_shape[:-1] +
(self.model_config.hf_text_config.kv_lora_rank, ),
dtype=self.dtype,
pin_memory=pin_memory,
device=device)
layer_kv_cache_pe = torch.zeros(
alloc_shape[:-1] +
(self.model_config.hf_text_config.qk_rope_head_dim, ),
dtype=self.dtype,
pin_memory=pin_memory,
device=device)
for _ in range(self.num_attention_layers):
# null block in CpuGpuBlockAllocator requires at least that
# block to be zeroed-out.
# We zero-out everything for simplicity.
layer_kv_cache_nope = torch.zeros(
alloc_shape[:-1] +
(self.model_config.hf_text_config.kv_lora_rank, ),
dtype=self.dtype,
pin_memory=pin_memory,
device=device)
layer_kv_cache_pe = torch.zeros(
alloc_shape[:-1] +
(self.model_config.hf_text_config.qk_rope_head_dim, ),
dtype=self.dtype,
pin_memory=pin_memory,
device=device)
# view back to (TOTAL_PAGES, PAGE_SIZE, entry_shape...) for cases
# when entry_shape is higher than 1D
kv_cache.append((layer_kv_cache_nope, layer_kv_cache_pe))
# view back to (TOTAL_PAGES, PAGE_SIZE, entry_shape...) for cases
# when entry_shape is higher than 1D
kv_cache.append((layer_kv_cache_nope, layer_kv_cache_pe))
else:
for _ in range(self.num_attention_layers):
# null block in CpuGpuBlockAllocator requires at least that
# block to be zeroed-out.
# We zero-out everything for simplicity.
layer_kv_cache = torch.zeros(kv_cache_shape,
dtype=self.dtype,
pin_memory=pin_memory,
device=device)
# view back to (TOTAL_PAGES, PAGE_SIZE, entry_shape...) for cases
# when entry_shape is higher than 1D
kv_cache.append(layer_kv_cache)
return kv_cache
if VLLM_ENABLE_GRAPH_MODE == '1':
CacheEngine._allocate_kv_cache = allocate_kv_cache
CacheEngine._allocate_kv_cache = allocate_kv_cache

View File

@@ -32,7 +32,7 @@ import torch_npu
import vllm.envs as envs
from vllm.attention import AttentionMetadata, get_attn_backend
from vllm.attention.backends.utils import CommonAttentionState
from vllm.config import CompilationLevel, VllmConfig
from vllm.config import VllmConfig
from vllm.core.scheduler import SchedulerOutputs
from vllm.distributed import get_pp_group
from vllm.forward_context import set_forward_context
@@ -56,7 +56,7 @@ from vllm.prompt_adapter.request import PromptAdapterRequest
from vllm.sampling_params import SamplingParams
from vllm.sequence import IntermediateTensors, SequenceGroupMetadata
from vllm.utils import (DeviceMemoryProfiler, PyObjectCache, flatten_2d_lists,
is_pin_memory_available, supports_dynamo)
is_pin_memory_available)
from vllm.worker.model_runner_base import (
ModelRunnerBase, ModelRunnerInputBase, ModelRunnerInputBuilderBase,
_add_attn_metadata_broadcastable_dict,
@@ -546,8 +546,7 @@ class ModelInputForNPUBuilder(ModelRunnerInputBuilderBase[ModelInputForNPU]):
}
# Add graph_pad_size here
if self.runner.vllm_config.compilation_config.level ==\
CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
if self.runner.enable_graph_mode:
graph_pad_size = self.runner.scheduler_config.max_num_seqs - len(
seq_lens)
else:
@@ -609,8 +608,7 @@ class ModelInputForNPUBuilder(ModelRunnerInputBuilderBase[ModelInputForNPU]):
]
multi_modal_kwargs = MultiModalKwargs.batch(multi_modal_kwargs_list)
if self.runner.vllm_config.compilation_config.level ==\
CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
if self.runner.enable_graph_mode:
torch._dynamo.mark_static(input_tokens_tensor)
torch._dynamo.mark_static(input_positions_tensor)
torch._dynamo.mark_static(attn_metadata.block_tables)
@@ -871,6 +869,12 @@ class NPUModelRunnerBase(ModelRunnerBase[TModelInputForNPU]):
self.max_batchsize_to_capture = \
self.vllm_config.compilation_config.max_capture_size
self.enable_graph_mode = False
additional_config = vllm_config.additional_config
if additional_config:
self.enable_graph_mode = additional_config.get(
"enable_graph_mode", False)
self.has_inner_state = model_config.has_inner_state
self.in_profile_run = False
@@ -971,8 +975,7 @@ class NPUModelRunnerBase(ModelRunnerBase[TModelInputForNPU]):
self.model = self.lora_manager.create_lora_manager(self.model)
# adapter torch compile with npu_backend
if self.vllm_config.compilation_config.level ==\
CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
if self.enable_graph_mode:
import torchair # type: ignore
from torchair import patch_for_hcom # type: ignore
@@ -1279,15 +1282,12 @@ class NPUModelRunner(NPUModelRunnerBase[ModelInputForNPUWithSamplingMetadata]):
self.attn_state.begin_forward(model_input)
assert model_input.attn_metadata is not None
if self.vllm_config.compilation_config.level ==\
CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
# TODO(zzzzwwjj): Do we need to do it every time?
if self.enable_graph_mode:
torch._dynamo.mark_static(model_input.input_tokens)
torch._dynamo.mark_static(model_input.input_positions)
torch._dynamo.mark_static(model_input.attn_metadata.block_tables)
torch._dynamo.mark_static(model_input.attn_metadata.slot_mapping)
torch._dynamo.mark_static(
model_input.attn_metadata.query_start_loc)
torch._dynamo.mark_static(model_input.attn_metadata.seq_start_loc)
for kv in kv_caches:
if isinstance(kv, tuple):
torch._dynamo.mark_static(kv[0])
@@ -1298,7 +1298,7 @@ class NPUModelRunner(NPUModelRunnerBase[ModelInputForNPUWithSamplingMetadata]):
virtual_engine = model_input.virtual_engine
prefill_meta = model_input.attn_metadata.prefill_metadata
previous_hidden_states = kwargs.get("previous_hidden_states")
if prefill_meta is None and self.vllm_config.compilation_config.level > 0:
if prefill_meta is None and self.enable_graph_mode:
model_executable = self.compile_model
# Note: graph_batch_size value not same as GPU
graph_batch_size = model_input.input_tokens.shape[ # type: ignore
@@ -1341,9 +1341,8 @@ class NPUModelRunner(NPUModelRunnerBase[ModelInputForNPUWithSamplingMetadata]):
"request_ids_to_seq_ids": model_input.request_ids_to_seq_ids,
} if self.has_inner_state else {}
if self.vllm_config.compilation_config.level ==\
CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
model_kwargs = {"inputs_embeds": None}
if self.enable_graph_mode:
model_kwargs: Dict[str, Any] = {"inputs_embeds": None}
else:
model_kwargs = {}
if previous_hidden_states is not None:
@@ -1360,6 +1359,9 @@ class NPUModelRunner(NPUModelRunnerBase[ModelInputForNPUWithSamplingMetadata]):
self.vllm_config, virtual_engine):
if model_input.attn_metadata is not None:
model_input.attn_metadata.input_positions = model_input.input_positions
if self.enable_graph_mode:
model_kwargs["kv_caches"] = kv_caches
model_kwargs["attn_metadata"] = model_input.attn_metadata
hidden_or_intermediate_states = model_executable(
input_ids=model_input.input_tokens,
positions=model_input.input_positions,
@@ -1430,8 +1432,7 @@ class NPUModelRunner(NPUModelRunnerBase[ModelInputForNPUWithSamplingMetadata]):
hidden_states = hidden_or_intermediate_states.index_select(
0, indices)
output.prefill_hidden_states = hidden_or_intermediate_states
elif self.vllm_config.compilation_config.level == \
CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
elif self.enable_graph_mode:
hidden_states = hidden_or_intermediate_states[:len(indices)]
else:
hidden_states = hidden_or_intermediate_states

View File

@@ -24,7 +24,7 @@ import torch
import torch.distributed
from torch import nn
from vllm import envs
from vllm.config import VllmConfig
from vllm.config import VllmConfig, set_current_vllm_config
from vllm.distributed import (ensure_model_parallel_initialized,
init_distributed_environment,
set_custom_all_reduce)
@@ -300,7 +300,8 @@ class NPUWorker(LocalOrDistributedWorkerBase):
from contextlib import nullcontext
context = nullcontext() # type: ignore
with context:
self._init_cache_engine()
with set_current_vllm_config(self.vllm_config):
self._init_cache_engine()
self._warm_up_model()
def _init_cache_engine(self):
@@ -511,10 +512,9 @@ class NPUWorker(LocalOrDistributedWorkerBase):
parallel_config.tensor_parallel_size,
parallel_config.pipeline_parallel_size)
expert_tensor_parallel_size = 1
if additional_config is not None and hasattr(
additional_config, "expert_tensor_parallel_size"):
expert_tensor_parallel_size = getattr(
additional_config, "expert_tensor_parallel_size")
if additional_config:
expert_tensor_parallel_size = additional_config.get(
"expert_tensor_parallel_size", 1)
init_ascend_model_parallel(parallel_config.tensor_parallel_size,
parallel_config.pipeline_parallel_size,
expert_tensor_parallel_size)