This reverts commitd886b81971. it breaks pd function - vLLM version: v0.13.0 - vLLM main:bde38c11dfSigned-off-by: zhaomingyu <zhaomingyu13@h-partners.com>
This commit is contained in:
@@ -130,27 +130,6 @@ class EagleProposer(VllmEagleProposer):
|
||||
|
||||
self.use_sparse = hasattr(vllm_config.model_config.hf_text_config,
|
||||
"index_topk")
|
||||
# NOTE:
|
||||
# `draft_tensor_parallel_size` does not take effect for Eagle:
|
||||
# the draft model uses the same TP size as the target model in practice.
|
||||
# so we applied this patch to set tp=1 of draft model separately.
|
||||
# Due to verification of `_verify_and_get_draft_tp` in vllm,
|
||||
# the value of `draft_tensor_parallel_size` here will either be 1 separately
|
||||
# or the same as target model.
|
||||
# TODO(zhaomingyu13): If we want to adapt to the case where draft model tp
|
||||
# is not 1 and differs from target model, this part should be rewritten.
|
||||
if (vllm_config.parallel_config.tensor_parallel_size
|
||||
!= self.speculative_config.draft_tensor_parallel_size):
|
||||
tp_group = init_model_parallel_group(
|
||||
[[get_world_group().rank]],
|
||||
get_world_group().rank,
|
||||
torch.distributed.get_backend(get_world_group().device_group),
|
||||
use_message_queue_broadcaster=True,
|
||||
group_name="tp",
|
||||
)
|
||||
self.tp_group_context = patch_tensor_parallel_group(tp_group)
|
||||
else:
|
||||
self.tp_group_context = nullcontext()
|
||||
|
||||
# TODO: Remove it when the bug of fx-graph is solved
|
||||
self.maybe_eager_context: ContextManager[Any] = nullcontext()
|
||||
|
||||
@@ -165,10 +165,6 @@ def graph_capture(device: torch.device):
|
||||
yield graph_capture_context
|
||||
|
||||
|
||||
def get_tp_context(drafter):
|
||||
return getattr(drafter, "tp_group_context", nullcontext())
|
||||
|
||||
|
||||
class ExecuteModelState(NamedTuple):
|
||||
"""Ephemeral cached state transferred between execute_model() and
|
||||
sample_tokens(), after execute_model() returns None."""
|
||||
@@ -2326,8 +2322,7 @@ class NPUModelRunner(GPUModelRunner):
|
||||
model_register(self.model, self.model_config)
|
||||
if self.drafter:
|
||||
logger.info("Loading drafter model...")
|
||||
with get_tp_context(self.drafter):
|
||||
self.drafter.load_model(self.model)
|
||||
self.drafter.load_model(self.model)
|
||||
if self.use_aux_hidden_state_outputs:
|
||||
self.model.set_aux_hidden_state_layers(
|
||||
self.model.get_eagle3_aux_hidden_state_layers())
|
||||
@@ -2703,15 +2698,11 @@ class NPUModelRunner(GPUModelRunner):
|
||||
kernel_block_sizes = []
|
||||
for kv_cache_group_id, kv_cache_group in enumerate(
|
||||
kv_cache_config.kv_cache_groups):
|
||||
kv_cache_spec = kv_cache_group.kv_cache_spec
|
||||
if isinstance(kv_cache_spec, UniformTypeKVCacheSpecs):
|
||||
# All layers in the UniformTypeKVCacheSpecs have the same type,
|
||||
# Pick an arbitrary one to dispatch.
|
||||
kv_cache_spec = next(
|
||||
iter(kv_cache_spec.kv_cache_specs.values()))
|
||||
if isinstance(kv_cache_spec, EncoderOnlyAttentionSpec):
|
||||
|
||||
if isinstance(kv_cache_group.kv_cache_spec,
|
||||
EncoderOnlyAttentionSpec):
|
||||
continue
|
||||
elif isinstance(kv_cache_spec, AttentionSpec):
|
||||
elif isinstance(kv_cache_group.kv_cache_spec, AttentionSpec):
|
||||
# This is an attention backend that supports virtual
|
||||
# block splitting. Get the supported block sizes from
|
||||
# the backend.
|
||||
|
||||
Reference in New Issue
Block a user