[Main2Main] Upgrade vllm commit to 0109 (#5752)

### What this PR does / why we need it?
Upgrade vllm commit to 0109 (bde38c11df0ea066a740efe9b77fff5418be45df)

1. remove `init_cached_hf_modules ` due to
https://github.com/vllm-project/vllm/pull/31786
2. fix spec_decode e2e test due to
https://github.com/vllm-project/vllm/pull/29821 break
3. fix `vllm.v1.attention.backends.utils` duo to
https://github.com/vllm-project/vllm/pull/31891
4. fix `self.seq_lens - query_lens` on same device due to
https://github.com/vllm-project/vllm/pull/31773
5. skip model_runner_v2 e2e test due to `'_OpNamespace' '_C' object has
no attribute 'get_cuda_view_from_cpu_tensor'`

- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef

Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
zhangxinyuehfad
2026-01-13 19:14:43 +08:00
committed by GitHub
parent eed9e366a7
commit f7b904641e
21 changed files with 203 additions and 38 deletions

View File

@@ -22,6 +22,23 @@ from vllm_ascend.utils import vllm_version_is
class TestAscendMLABackend(TestBase):
def setUp(self):
self.mock_config = MagicMock()
mock_parallel_config = MagicMock()
mock_parallel_config.prefill_context_parallel_size = 1
mock_parallel_config.decode_context_parallel_size = 1
self.mock_config.parallel_config = mock_parallel_config
self.utils_patcher = patch(
'vllm_ascend.attention.utils.get_current_vllm_config',
return_value=self.mock_config)
self.utils_patcher.start()
from vllm_ascend.attention.utils import enable_cp
enable_cp.cache_clear()
def test_get_name(self):
self.assertEqual(AscendMLABackend.get_name(), "ASCEND_MLA")