Main2main upgrade to vllm 0317 afternoon (#7409)

### What this PR does / why we need it?

1.fix "TypeError: get_attn_backend() remove variable": [Refactor
`check_and_update_config`](https://github.com/vllm-project/vllm/pull/35122)

2.fix [Rename `compile_ranges_split_points` to
`compile_ranges_endpoints`](https://github.com/vllm-project/vllm/pull/36027)

3.fix "RuntimeError: device_allocator not a DeviceAllocator":[Replace
memory related torch.cuda
APIs"](https://github.com/vllm-project/vllm/pull/37031)

4.fix [Support multiple KV groups in OffloadingSpec
](https://github.com/vllm-project/vllm/pull/36610) removed
self.offloaded_block_size and changed self.gpu_block_size from a scalar
to a tuple of per-group block sizes, adding block_size_factor.

5.fix [Consolidate
SupportsEagle](https://github.com/vllm-project/vllm/pull/36063) renamed
get_eagle3_aux_hidden_state_layers() to
get_eagle3_default_aux_hidden_state_layers() and added a
supports_eagle3() guard before calling it.

### Does this PR introduce _any_ user-facing change?
NA
### How was this patch tested?
E2E


- vLLM version: v0.17.0
- vLLM main:
8a680463fa

---------

Signed-off-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: Claude Code <noreply@anthropic.com>
This commit is contained in:
Nengjun Ma
2026-03-18 23:24:27 +08:00
committed by GitHub
parent 305820f1a9
commit 8b79d4de52
13 changed files with 125 additions and 41 deletions

View File

@@ -12,6 +12,7 @@ from vllm.v1.kv_offload.spec import OffloadingSpec
from vllm.v1.kv_offload.worker.worker import OffloadingHandler
from vllm_ascend.kv_offload.cpu_npu import CpuNpuOffloadingHandler
from vllm_ascend.utils import vllm_version_is
class NPUOffloadingSpec(OffloadingSpec):
@@ -31,12 +32,23 @@ class NPUOffloadingSpec(OffloadingSpec):
def get_manager(self) -> OffloadingManager:
if not self._manager:
kv_events_config = self.vllm_config.kv_events_config
enable_events = kv_events_config is not None and kv_events_config.enable_kv_cache_events
self._manager = LRUOffloadingManager(
CPUBackend(block_size=self.offloaded_block_size, num_blocks=self.num_cpu_blocks),
enable_events=enable_events,
)
if vllm_version_is("0.17.0"):
kv_events_config = self.vllm_config.kv_events_config
enable_events = kv_events_config is not None and kv_events_config.enable_kv_cache_events
self._manager = LRUOffloadingManager(
CPUBackend(block_size=self.offloaded_block_size, num_blocks=self.num_cpu_blocks),
enable_events=enable_events,
)
else:
kv_events_config = self.vllm_config.kv_events_config
enable_events = kv_events_config is not None and kv_events_config.enable_kv_cache_events
assert len(self.gpu_block_size) == 1
gpu_block_size = self.gpu_block_size[0]
offloaded_block_size = gpu_block_size * self.block_size_factor
self._manager = LRUOffloadingManager(
CPUBackend(block_size=offloaded_block_size, num_blocks=self.num_cpu_blocks),
enable_events=enable_events,
)
return self._manager
def get_handlers(
@@ -45,13 +57,24 @@ class NPUOffloadingSpec(OffloadingSpec):
attn_backends: dict[str, type[AttentionBackend]],
) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]]:
if not self._handler:
self._handler = CpuNpuOffloadingHandler(
attn_backends=attn_backends,
gpu_block_size=self.gpu_block_size,
cpu_block_size=self.offloaded_block_size,
num_cpu_blocks=self.num_cpu_blocks,
gpu_caches=kv_caches,
)
if vllm_version_is("0.17.0"):
self._handler = CpuNpuOffloadingHandler(
attn_backends=attn_backends,
gpu_block_size=self.gpu_block_size,
cpu_block_size=self.offloaded_block_size,
num_cpu_blocks=self.num_cpu_blocks,
gpu_caches=kv_caches,
)
else:
assert len(self.gpu_block_size) == 1
gpu_block_size = self.gpu_block_size[0]
self._handler = CpuNpuOffloadingHandler(
attn_backends=attn_backends,
gpu_block_size=gpu_block_size,
cpu_block_size=gpu_block_size * self.block_size_factor,
num_cpu_blocks=self.num_cpu_blocks,
gpu_caches=kv_caches,
)
assert self._handler is not None
yield GPULoadStoreSpec, CPULoadStoreSpec, self._handler