[Main2Main] Upgrade vllm commit to 0113 (#5839)

### What this PR does / why we need it?
Upgrade vllm commit to 0113 (11b6af5280d6d6dfb8953af16e67b25f819b3be9)

- Modify import paths due to the refactors
https://github.com/vllm-project/vllm/pull/31916
https://github.com/vllm-project/vllm/pull/32054

- Fix `TypeError: NPUOffloadingSpec.__init__() takes 2 positional
arguments but 3 were given` due to
https://github.com/vllm-project/vllm/pull/24498

- Skip the async-scheduling tests in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are never
verified
https://github.com/vllm-project/vllm/pull/31998

- Skip some pooling tests, which are caused by
https://github.com/vllm-project/vllm/pull/32148
where vllm is also failed
https://buildkite.com/vllm/ci/builds/46705/steps/canvas?jid=019bb329-3834-4685-862b-1613b8e0f5d4

We will reopen those tests when main2main reachs
https://github.com/vllm-project/vllm/pull/32243

- Skip some cases in
`tests/e2e/multicard/4-cards/long_sequence/test_mtp.py`, which are
broken by
https://github.com/vllm-project/vllm/pull/32118

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

Signed-off-by: wjunLu <wjunlu217@gmail.com>
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
Co-authored-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
wjunLu
2026-01-15 09:48:53 +08:00
committed by GitHub
parent e67608041d
commit c11a05c4e1
29 changed files with 229 additions and 54 deletions

View File

@@ -136,14 +136,11 @@ class TestAscendAttentionBackendImpl(TestBase):
self.layer.layer_name = "test_layer"
self.layer._k_scale_float = 1.0
self.layer._v_scale_float = 1.0
self.attention_type = MagicMock()
self.attention_type.DECODER = "decoder"
self.attention_type.ENCODER = "encoder"
self.attn_metadata = MagicMock()
self.attn_metadata.return_value = "1"
self.layer_no_quant = MagicMock(
spec=['layer_name', '_k_scale_float', '_v_scale_float'])
self.layer_no_quant.layer_name = "test_layer"

View File

@@ -380,6 +380,7 @@ class TestAscendDeepseekScalingRotaryEmbedding(TestBase):
class TestAscendMRotaryEmbedding(unittest.TestCase):
def setUp(self):
# Common setup for tests
self.config_patcher = patch('vllm.config.vllm.get_current_vllm_config')
self.mock_get_config = self.config_patcher.start()
mock_config = MagicMock()

View File

@@ -3,14 +3,21 @@ from unittest.mock import MagicMock, patch
import pytest
import torch
from vllm.attention.selector import AttentionSelectorConfig
from vllm.config.compilation import CompilationMode, CUDAGraphMode
from vllm.platforms import PlatformEnum
from tests.ut.base import TestBase
from vllm_ascend.platform import NPUPlatform
from vllm_ascend.utils import (ASCEND_QUANTIZATION_METHOD,
COMPRESSED_TENSORS_METHOD, AscendDeviceType)
COMPRESSED_TENSORS_METHOD, AscendDeviceType,
vllm_version_is)
# isort: off
if vllm_version_is('0.13.0'):
from vllm.attention.selector import AttentionSelectorConfig # type: ignore
else:
from vllm.v1.attention.selector import AttentionSelectorConfig # type: ignore
# isort: on
class TestNPUPlatform(TestBase):
@@ -37,6 +44,9 @@ class TestNPUPlatform(TestBase):
def setUp(self):
self.platform = NPUPlatform()
self.platform.supported_quantization[:] = [
"ascend", "compressed-tensors"
]
def test_class_variables(self):
self.assertEqual(NPUPlatform._enum, PlatformEnum.OOT)