[BugFix]Fix incorrect get_current_vllm_config (#5121)

### What this PR does / why we need it?
This PR fixes some incorrect `get_current_vllm_config` calling, which
creates empty vllm_config instead.

### Does this PR introduce _any_ user-facing change?
No.

### How was this patch tested?

- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c

---------

Signed-off-by: Angazenn <supperccell@163.com>
This commit is contained in:
Angazenn
2025-12-18 22:21:36 +08:00
committed by GitHub
parent fd9a47c04d
commit 632eab28b7
6 changed files with 12 additions and 15 deletions

View File

@@ -1,10 +1,9 @@
from dataclasses import dataclass
from functools import lru_cache
from typing import Any, List, Optional
import torch
import torch.nn.functional as F
from vllm.config import get_current_vllm_config
from vllm.config import VllmConfig
from vllm.distributed.kv_transfer import (get_kv_transfer_group,
has_kv_transfer_group,
is_v1_kv_transfer_group)
@@ -14,9 +13,7 @@ from vllm_ascend.utils import (AscendDeviceType, get_ascend_config,
get_ascend_device_type)
@lru_cache
def using_paged_attention(runtime_shape: int) -> bool:
vllm_config = get_current_vllm_config()
def using_paged_attention(runtime_shape: int, vllm_config: VllmConfig) -> bool:
if vllm_config.speculative_config is not None:
return False
if get_ascend_device_type() == AscendDeviceType.A5: