Main2main upgrade to vllm 0317 afternoon (#7409)

### What this PR does / why we need it?

1.fix "TypeError: get_attn_backend() remove variable": [Refactor
`check_and_update_config`](https://github.com/vllm-project/vllm/pull/35122)

2.fix [Rename `compile_ranges_split_points` to
`compile_ranges_endpoints`](https://github.com/vllm-project/vllm/pull/36027)

3.fix "RuntimeError: device_allocator not a DeviceAllocator":[Replace
memory related torch.cuda
APIs"](https://github.com/vllm-project/vllm/pull/37031)

4.fix [Support multiple KV groups in OffloadingSpec
](https://github.com/vllm-project/vllm/pull/36610) removed
self.offloaded_block_size and changed self.gpu_block_size from a scalar
to a tuple of per-group block sizes, adding block_size_factor.

5.fix [Consolidate
SupportsEagle](https://github.com/vllm-project/vllm/pull/36063) renamed
get_eagle3_aux_hidden_state_layers() to
get_eagle3_default_aux_hidden_state_layers() and added a
supports_eagle3() guard before calling it.

### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
E2E


- vLLM version: v0.17.0
- vLLM main:
8a680463fa

---------

Signed-off-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: Claude Code <noreply@anthropic.com>
This commit is contained in:
Nengjun Ma
2026-03-18 23:24:27 +08:00
committed by GitHub
parent 305820f1a9
commit 8b79d4de52
13 changed files with 125 additions and 41 deletions

View File

@@ -292,16 +292,27 @@ class NPUModelRunner(GPUModelRunner):
if self.use_sparse_c8_indexer:
self.c8_k_cache_dtype = torch.int8
self.c8_k_scale_cache_dtype = torch.float16
from vllm_ascend.utils import vllm_version_is
self.attn_backend = get_attn_backend(
0,
self.dtype,
None,
self.block_size,
use_mla=self.model_config.use_mla,
use_sparse=self.use_sparse,
use_mm_prefix=self.model_config is not None and self.model_config.is_mm_prefix_lm,
)
if vllm_version_is("0.17.0"):
self.attn_backend = get_attn_backend(
0,
self.dtype,
None,
self.block_size,
use_mla=self.model_config.use_mla,
use_sparse=self.use_sparse,
use_mm_prefix=self.model_config is not None and self.model_config.is_mm_prefix_lm,
)
else:
self.attn_backend = get_attn_backend(
0,
self.dtype,
None,
use_mla=self.model_config.use_mla,
use_sparse=self.use_sparse,
use_mm_prefix=self.model_config is not None and self.model_config.is_mm_prefix_lm,
)
try:
self.dcp_size = get_dcp_group().world_size
@@ -2553,7 +2564,17 @@ class NPUModelRunner(GPUModelRunner):
with get_tp_context(self.drafter):
self.drafter.load_model(self.model)
if self.use_aux_hidden_state_outputs:
self.model.set_aux_hidden_state_layers(self.model.get_eagle3_aux_hidden_state_layers())
if vllm_version_is("0.17.0"):
self.model.set_aux_hidden_state_layers(self.model.get_eagle3_aux_hidden_state_layers())
else:
from vllm.model_executor.models.interfaces import supports_eagle3
if not supports_eagle3(self.model):
raise RuntimeError(
"Model does not support EAGLE3 interface but "
"aux_hidden_state_outputs was requested"
)
aux_layers = self.model.get_eagle3_default_aux_hidden_state_layers()
self.model.set_aux_hidden_state_layers(aux_layers)
if self.lora_config:
self.model = self.load_lora_model(self.model, self.vllm_config, self.device)