[Main2Main] Upgrade vllm commit to 0105 (#5595)

### What this PR does / why we need it?

Upgrade vllm commit to 0105 (8be6432bdaf6275664d857b1e5e9bf8ed1ce299e)

1. Remove `maybe_padded_num_tokens` arg in `model_runner_v1.py` since
https://github.com/vllm-project/vllm/pull/31517 deleted unused arg

2. Remove dense `Qwen/Qwen3-0.6B` in
`tests/e2e/multicard/test_aclgraph_capture_replay.py` and
`tests/e2e/multicard/test_data_parallel.py` due to
https://github.com/vllm-project/vllm/pull/30739
where offline data parallel mode will not be supported/useful for dense
models

3. Adapt `vllm_ascend/worker/worker.py` due to
https://github.com/vllm-project/vllm/pull/31584

4. Adapt `self.block_size` calling due to
https://github.com/vllm-project/vllm/pull/31540

5. Modify `test_mla_v1.py` due to
https://github.com/vllm-project/vllm/pull/28454 , which refactorred
`get_head_size()`

### Does this PR introduce _any_ user-facing change?

### How was this patch tested?

- vLLM version: v0.13.0
- vLLM main:
7157596103

Signed-off-by: wjunLu <wjunlu217@gmail.com>
This commit is contained in:
wjunLu
2026-01-06 08:44:29 +08:00
committed by GitHub
parent c5e2f48510
commit 3cf059a72b
15 changed files with 61 additions and 38 deletions

View File

@@ -28,7 +28,8 @@ from vllm.utils.network_utils import get_open_port
from vllm_ascend.utils import AscendDeviceType, get_ascend_device_type
MODELS = [
"Qwen/Qwen3-0.6B",
# Offline data parallel mode will be not supported/useful for dense models
# "Qwen/Qwen3-0.6B",
"vllm-ascend/DeepSeek-V2-Lite-W8A8",
]

View File

@@ -27,9 +27,7 @@ from unittest.mock import patch
import pytest
MODELS = [
"Qwen/Qwen3-0.6B", "Qwen/Qwen3-30B-A3B", "vllm-ascend/Qwen3-30B-A3B-W8A8"
]
MODELS = ["Qwen/Qwen3-30B-A3B", "vllm-ascend/Qwen3-30B-A3B-W8A8"]
@pytest.mark.parametrize("model", MODELS)

View File

@@ -9,7 +9,7 @@ from unittest.mock import patch
import pytest
MODELS = ["Qwen/Qwen3-0.6B"]
MODELS = ["Qwen/Qwen3-30B-A3B"]
@pytest.mark.parametrize("model", MODELS)

View File

@@ -17,6 +17,7 @@ from vllm_ascend.attention.mla_v1 import (AscendMLABackend,
AscendMLAPrefillMetadata,
ChunkedContextMetadata)
from vllm_ascend.attention.utils import AscendCommonAttentionMetadata
from vllm_ascend.utils import vllm_version_is
class TestAscendMLABackend(TestBase):
@@ -392,7 +393,10 @@ class TestAscendMLAMetadataBuilderBuild(TestBase):
self.mock_vllm_config.model_config = model_config
self.kv_cache_spec = MagicMock()
self.kv_cache_spec.num_layers = 32
self.kv_cache_spec.head_size = 128
if vllm_version_is('0.13.0'):
self.kv_cache_spec.head_size = 128
else:
self.kv_cache_spec.head_size = 64
self.kv_cache_spec.num_heads = 32
@patch("vllm_ascend.attention.mla_v1.get_cos_and_sin_mla")

View File

@@ -18,13 +18,6 @@
import sys
from unittest.mock import MagicMock
from vllm_ascend.utils import adapt_patch # noqa E402
from vllm_ascend.utils import register_ascend_customop
# triton and torch_npu is not available in the environment, so we need to mock them
sys.modules['torch_npu'].npu.current_device = MagicMock(return_value=0)
sys.modules['torch_npu._inductor'] = MagicMock()
triton_runtime = MagicMock()
triton_runtime.driver.active.utils.get_device_properties.return_value = {
'num_aic': 8,
@@ -32,6 +25,13 @@ triton_runtime.driver.active.utils.get_device_properties.return_value = {
}
sys.modules['triton.runtime'] = triton_runtime
from vllm_ascend.utils import adapt_patch # noqa E402
from vllm_ascend.utils import register_ascend_customop # noqa E402
# triton and torch_npu is not available in the environment, so we need to mock them
sys.modules['torch_npu'].npu.current_device = MagicMock(return_value=0)
sys.modules['torch_npu._inductor'] = MagicMock()
adapt_patch()
adapt_patch(True)

View File

@@ -58,7 +58,6 @@ class TestEagleProposerInitialization(TestBase):
device=self.device,
runner=self.runner)
self.assertEqual(proposer.block_size, 16)
self.assertEqual(proposer.hidden_size, 4096)
self.assertTrue(proposer.use_cuda_graph)

View File

@@ -86,7 +86,6 @@ class TestMtpProposer:
assert proposer.dtype == torch.float16
assert proposer.num_speculative_tokens == 2
assert proposer.hidden_size == 4096
assert proposer.block_size == 16
# Test with mrope enabled
assert hasattr(proposer, "positions")