[Main2Main] Upgrade vllm commit to 0109 (#5752)
### What this PR does / why we need it?
Upgrade vllm commit to 0109 (bde38c11df0ea066a740efe9b77fff5418be45df)
1. remove `init_cached_hf_modules ` due to
https://github.com/vllm-project/vllm/pull/31786
2. fix spec_decode e2e test due to
https://github.com/vllm-project/vllm/pull/29821 break
3. fix `vllm.v1.attention.backends.utils` duo to
https://github.com/vllm-project/vllm/pull/31891
4. fix `self.seq_lens - query_lens` on same device due to
https://github.com/vllm-project/vllm/pull/31773
5. skip model_runner_v2 e2e test due to `'_OpNamespace' '_C' object has
no attribute 'get_cuda_view_from_cpu_tensor'`
- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -33,6 +33,11 @@ class TestAscendAttentionCPImpl(TestBase):
|
||||
self.layer_no_quant.layer_name = "test_layer"
|
||||
self.layer_no_quant._k_scale_float = 1.0
|
||||
self.layer_no_quant._v_scale_float = 1.0
|
||||
self.mock_vllm_config = MagicMock()
|
||||
self.config_patcher = patch(
|
||||
'vllm_ascend.attention.attention_v1.get_current_vllm_config',
|
||||
return_value=self.mock_vllm_config)
|
||||
self.config_patcher.start()
|
||||
|
||||
self.impl = AscendAttentionCPImpl(
|
||||
num_heads=8,
|
||||
|
||||
@@ -13,6 +13,23 @@ from vllm_ascend.utils import AscendDeviceType
|
||||
|
||||
class TestAscendAttentionBackend(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_config = MagicMock()
|
||||
|
||||
mock_parallel_config = MagicMock()
|
||||
mock_parallel_config.prefill_context_parallel_size = 1
|
||||
mock_parallel_config.decode_context_parallel_size = 1
|
||||
|
||||
self.mock_config.parallel_config = mock_parallel_config
|
||||
|
||||
self.utils_patcher = patch(
|
||||
'vllm_ascend.attention.utils.get_current_vllm_config',
|
||||
return_value=self.mock_config)
|
||||
self.utils_patcher.start()
|
||||
|
||||
from vllm_ascend.attention.utils import enable_cp
|
||||
enable_cp.cache_clear()
|
||||
|
||||
def test_get_name(self):
|
||||
self.assertEqual(AscendAttentionBackend.get_name(), "CUSTOM")
|
||||
|
||||
@@ -102,6 +119,19 @@ class TestAscendAttentionMetadataBuilder(TestBase):
|
||||
class TestAscendAttentionBackendImpl(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_event = MagicMock()
|
||||
self.mock_event.record.return_value = None
|
||||
self.mock_event.wait.return_value = None
|
||||
|
||||
self.mock_stream = MagicMock()
|
||||
self.event_patcher = patch('torch_npu.npu.Event',
|
||||
return_value=self.mock_event)
|
||||
self.stream_patcher = patch('torch_npu.npu.current_stream',
|
||||
return_value=self.mock_stream)
|
||||
|
||||
self.event_patcher.start()
|
||||
self.stream_patcher.start()
|
||||
|
||||
self.layer = MagicMock()
|
||||
self.layer.layer_name = "test_layer"
|
||||
self.layer._k_scale_float = 1.0
|
||||
@@ -119,6 +149,11 @@ class TestAscendAttentionBackendImpl(TestBase):
|
||||
self.layer_no_quant.layer_name = "test_layer"
|
||||
self.layer_no_quant._k_scale_float = 1.0
|
||||
self.layer_no_quant._v_scale_float = 1.0
|
||||
self.mock_vllm_config = MagicMock()
|
||||
self.config_patcher = patch(
|
||||
'vllm_ascend.attention.attention_v1.get_current_vllm_config',
|
||||
return_value=self.mock_vllm_config)
|
||||
self.config_patcher.start()
|
||||
|
||||
self.impl = AscendAttentionBackendImpl(
|
||||
num_heads=8,
|
||||
|
||||
@@ -22,6 +22,23 @@ from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
class TestAscendMLABackend(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_config = MagicMock()
|
||||
|
||||
mock_parallel_config = MagicMock()
|
||||
mock_parallel_config.prefill_context_parallel_size = 1
|
||||
mock_parallel_config.decode_context_parallel_size = 1
|
||||
|
||||
self.mock_config.parallel_config = mock_parallel_config
|
||||
|
||||
self.utils_patcher = patch(
|
||||
'vllm_ascend.attention.utils.get_current_vllm_config',
|
||||
return_value=self.mock_config)
|
||||
self.utils_patcher.start()
|
||||
|
||||
from vllm_ascend.attention.utils import enable_cp
|
||||
enable_cp.cache_clear()
|
||||
|
||||
def test_get_name(self):
|
||||
self.assertEqual(AscendMLABackend.get_name(), "ASCEND_MLA")
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ if 'torch_npu._inductor' not in sys.modules:
|
||||
from vllm_ascend.attention.sfa_v1 import (AscendSFABackend, AscendSFAImpl,
|
||||
AscendSFAMetadata,
|
||||
AscendSFAMetadataBuilder)
|
||||
from vllm_ascend.utils import enable_dsa_cp
|
||||
|
||||
|
||||
class TestAscendSFABackend(TestBase):
|
||||
@@ -83,6 +84,27 @@ class TestAscendSFAMetadata(TestBase):
|
||||
|
||||
class TestAscendSFAMetadataBuilder(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_cfg = MagicMock()
|
||||
|
||||
self.mock_cfg.parallel_config = MagicMock()
|
||||
self.mock_cfg.parallel_config.tensor_parallel_size = 1
|
||||
self.mock_cfg.parallel_config.prefill_context_parallel_size = 1
|
||||
self.mock_cfg.parallel_config.decode_context_parallel_size = 1
|
||||
|
||||
self.mock_cfg.compilation_config = MagicMock()
|
||||
self.mock_cfg.compilation_config.pass_config = MagicMock()
|
||||
self.mock_cfg.compilation_config.pass_config.enable_sp = False
|
||||
|
||||
self.mock_cfg.speculative_config.num_speculative_tokens = 0
|
||||
|
||||
self.patcher = patch("vllm.config.get_current_vllm_config",
|
||||
return_value=self.mock_cfg)
|
||||
self.patcher.start()
|
||||
|
||||
if hasattr(enable_dsa_cp, "cache_clear"):
|
||||
enable_dsa_cp.cache_clear()
|
||||
|
||||
def test_ascend_sfa_metadata_builder_default(self):
|
||||
kv_cache_spec = MagicMock()
|
||||
layer_names = ["layer1", "layer2"]
|
||||
|
||||
Reference in New Issue
Block a user