[Main2Main] Upgrade vllm commit to 0109 (#5752)
### What this PR does / why we need it?
Upgrade vllm commit to 0109 (bde38c11df0ea066a740efe9b77fff5418be45df)
1. remove `init_cached_hf_modules ` due to
https://github.com/vllm-project/vllm/pull/31786
2. fix spec_decode e2e test due to
https://github.com/vllm-project/vllm/pull/29821 break
3. fix `vllm.v1.attention.backends.utils` duo to
https://github.com/vllm-project/vllm/pull/31891
4. fix `self.seq_lens - query_lens` on same device due to
https://github.com/vllm-project/vllm/pull/31773
5. skip model_runner_v2 e2e test due to `'_OpNamespace' '_C' object has
no attribute 'get_cuda_view_from_cpu_tensor'`
- vLLM version: v0.13.0
- vLLM main:
2f4e6548ef
Signed-off-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -33,6 +33,11 @@ class TestAscendAttentionCPImpl(TestBase):
|
||||
self.layer_no_quant.layer_name = "test_layer"
|
||||
self.layer_no_quant._k_scale_float = 1.0
|
||||
self.layer_no_quant._v_scale_float = 1.0
|
||||
self.mock_vllm_config = MagicMock()
|
||||
self.config_patcher = patch(
|
||||
'vllm_ascend.attention.attention_v1.get_current_vllm_config',
|
||||
return_value=self.mock_vllm_config)
|
||||
self.config_patcher.start()
|
||||
|
||||
self.impl = AscendAttentionCPImpl(
|
||||
num_heads=8,
|
||||
|
||||
@@ -13,6 +13,23 @@ from vllm_ascend.utils import AscendDeviceType
|
||||
|
||||
class TestAscendAttentionBackend(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_config = MagicMock()
|
||||
|
||||
mock_parallel_config = MagicMock()
|
||||
mock_parallel_config.prefill_context_parallel_size = 1
|
||||
mock_parallel_config.decode_context_parallel_size = 1
|
||||
|
||||
self.mock_config.parallel_config = mock_parallel_config
|
||||
|
||||
self.utils_patcher = patch(
|
||||
'vllm_ascend.attention.utils.get_current_vllm_config',
|
||||
return_value=self.mock_config)
|
||||
self.utils_patcher.start()
|
||||
|
||||
from vllm_ascend.attention.utils import enable_cp
|
||||
enable_cp.cache_clear()
|
||||
|
||||
def test_get_name(self):
|
||||
self.assertEqual(AscendAttentionBackend.get_name(), "CUSTOM")
|
||||
|
||||
@@ -102,6 +119,19 @@ class TestAscendAttentionMetadataBuilder(TestBase):
|
||||
class TestAscendAttentionBackendImpl(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_event = MagicMock()
|
||||
self.mock_event.record.return_value = None
|
||||
self.mock_event.wait.return_value = None
|
||||
|
||||
self.mock_stream = MagicMock()
|
||||
self.event_patcher = patch('torch_npu.npu.Event',
|
||||
return_value=self.mock_event)
|
||||
self.stream_patcher = patch('torch_npu.npu.current_stream',
|
||||
return_value=self.mock_stream)
|
||||
|
||||
self.event_patcher.start()
|
||||
self.stream_patcher.start()
|
||||
|
||||
self.layer = MagicMock()
|
||||
self.layer.layer_name = "test_layer"
|
||||
self.layer._k_scale_float = 1.0
|
||||
@@ -119,6 +149,11 @@ class TestAscendAttentionBackendImpl(TestBase):
|
||||
self.layer_no_quant.layer_name = "test_layer"
|
||||
self.layer_no_quant._k_scale_float = 1.0
|
||||
self.layer_no_quant._v_scale_float = 1.0
|
||||
self.mock_vllm_config = MagicMock()
|
||||
self.config_patcher = patch(
|
||||
'vllm_ascend.attention.attention_v1.get_current_vllm_config',
|
||||
return_value=self.mock_vllm_config)
|
||||
self.config_patcher.start()
|
||||
|
||||
self.impl = AscendAttentionBackendImpl(
|
||||
num_heads=8,
|
||||
|
||||
@@ -22,6 +22,23 @@ from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
class TestAscendMLABackend(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_config = MagicMock()
|
||||
|
||||
mock_parallel_config = MagicMock()
|
||||
mock_parallel_config.prefill_context_parallel_size = 1
|
||||
mock_parallel_config.decode_context_parallel_size = 1
|
||||
|
||||
self.mock_config.parallel_config = mock_parallel_config
|
||||
|
||||
self.utils_patcher = patch(
|
||||
'vllm_ascend.attention.utils.get_current_vllm_config',
|
||||
return_value=self.mock_config)
|
||||
self.utils_patcher.start()
|
||||
|
||||
from vllm_ascend.attention.utils import enable_cp
|
||||
enable_cp.cache_clear()
|
||||
|
||||
def test_get_name(self):
|
||||
self.assertEqual(AscendMLABackend.get_name(), "ASCEND_MLA")
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ if 'torch_npu._inductor' not in sys.modules:
|
||||
from vllm_ascend.attention.sfa_v1 import (AscendSFABackend, AscendSFAImpl,
|
||||
AscendSFAMetadata,
|
||||
AscendSFAMetadataBuilder)
|
||||
from vllm_ascend.utils import enable_dsa_cp
|
||||
|
||||
|
||||
class TestAscendSFABackend(TestBase):
|
||||
@@ -83,6 +84,27 @@ class TestAscendSFAMetadata(TestBase):
|
||||
|
||||
class TestAscendSFAMetadataBuilder(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_cfg = MagicMock()
|
||||
|
||||
self.mock_cfg.parallel_config = MagicMock()
|
||||
self.mock_cfg.parallel_config.tensor_parallel_size = 1
|
||||
self.mock_cfg.parallel_config.prefill_context_parallel_size = 1
|
||||
self.mock_cfg.parallel_config.decode_context_parallel_size = 1
|
||||
|
||||
self.mock_cfg.compilation_config = MagicMock()
|
||||
self.mock_cfg.compilation_config.pass_config = MagicMock()
|
||||
self.mock_cfg.compilation_config.pass_config.enable_sp = False
|
||||
|
||||
self.mock_cfg.speculative_config.num_speculative_tokens = 0
|
||||
|
||||
self.patcher = patch("vllm.config.get_current_vllm_config",
|
||||
return_value=self.mock_cfg)
|
||||
self.patcher.start()
|
||||
|
||||
if hasattr(enable_dsa_cp, "cache_clear"):
|
||||
enable_dsa_cp.cache_clear()
|
||||
|
||||
def test_ascend_sfa_metadata_builder_default(self):
|
||||
kv_cache_spec = MagicMock()
|
||||
layer_names = ["layer1", "layer2"]
|
||||
|
||||
@@ -13,10 +13,11 @@
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from vllm.config import set_current_vllm_config
|
||||
from vllm.model_executor.layers.activation import QuickGELU, SiluAndMul
|
||||
|
||||
from vllm_ascend.utils import AscendDeviceType
|
||||
@@ -27,8 +28,20 @@ def dummy_tensor():
|
||||
return torch.randn(4, 8, dtype=torch.float16)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def default_vllm_config():
|
||||
mock_config = MagicMock()
|
||||
|
||||
mock_config.compilation_config.dispatch_forward_backend = "eager"
|
||||
|
||||
mock_config.compilation_config.custom_ops = ["all"]
|
||||
|
||||
with set_current_vllm_config(mock_config):
|
||||
yield mock_config
|
||||
|
||||
|
||||
@patch("torch_npu.npu_fast_gelu", side_effect=lambda x: x + 1)
|
||||
def test_QuickGELU_forward(mock_gelu, dummy_tensor):
|
||||
def test_QuickGELU_forward(mock_gelu, dummy_tensor, default_vllm_config):
|
||||
layer = QuickGELU()
|
||||
out = layer.forward(dummy_tensor)
|
||||
|
||||
@@ -45,7 +58,7 @@ def test_QuickGELU_forward(mock_gelu, dummy_tensor):
|
||||
side_effect=lambda x: None)
|
||||
def test_SiluAndMul_forward(mock_maybe_prefetch_mlp_down_proj,
|
||||
mock_maybe_wait_prefetch_done, mock_swiglu,
|
||||
is_310p, dummy_tensor):
|
||||
is_310p, dummy_tensor, default_vllm_config):
|
||||
|
||||
with patch("vllm_ascend.utils.get_ascend_device_type",
|
||||
return_value=AscendDeviceType._310P
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
from vllm.config import set_current_vllm_config
|
||||
from vllm.model_executor.layers.layernorm import RMSNorm
|
||||
|
||||
from vllm_ascend.utils import AscendDeviceType
|
||||
@@ -20,13 +21,22 @@ def mock_add_rms_norm(x, residual, weight, eps):
|
||||
return 2 * x, None, 2 * residual
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def default_vllm_config():
|
||||
mock_config = MagicMock()
|
||||
mock_config.compilation_config.custom_ops = ["all"]
|
||||
|
||||
with set_current_vllm_config(mock_config):
|
||||
yield mock_config
|
||||
|
||||
|
||||
@pytest.mark.parametrize("is_310p", [True, False])
|
||||
@pytest.mark.parametrize("residual",
|
||||
[None, torch.randn(4, 8, dtype=torch.float32)])
|
||||
@patch("torch_npu.npu_rms_norm", side_effect=mock_rms_norm)
|
||||
@patch("torch_npu.npu_add_rms_norm", side_effect=mock_add_rms_norm)
|
||||
def test_RMSNorm_forward(mock_add_rmsnorm, mock_rmsnorm, is_310p, residual,
|
||||
dummy_tensor):
|
||||
dummy_tensor, default_vllm_config):
|
||||
|
||||
with patch("vllm_ascend.utils.get_ascend_device_type",
|
||||
return_value=AscendDeviceType._310P
|
||||
|
||||
@@ -78,6 +78,12 @@ class TestAscendRotaryEmbedding(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Common setup for tests
|
||||
self.config_patcher = patch('vllm.config.vllm.get_current_vllm_config')
|
||||
self.mock_get_config = self.config_patcher.start()
|
||||
mock_config = MagicMock()
|
||||
mock_config.compilation_config.custom_ops = ["all"]
|
||||
|
||||
self.mock_get_config.return_value = mock_config
|
||||
self.positions = torch.tensor([1, 2, 3])
|
||||
self.query = torch.randn(3, 1, 32, dtype=torch.float16)
|
||||
self.key = torch.randn(3, 1, 32, dtype=torch.float16)
|
||||
@@ -242,6 +248,12 @@ class TestAscendDeepseekScalingRotaryEmbedding(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
# Common setup for tests
|
||||
self.config_patcher = patch('vllm.config.vllm.get_current_vllm_config')
|
||||
self.mock_get_config = self.config_patcher.start()
|
||||
mock_config = MagicMock()
|
||||
mock_config.compilation_config.custom_ops = ["all"]
|
||||
|
||||
self.mock_get_config.return_value = mock_config
|
||||
self.positions = torch.tensor([1, 2, 3])
|
||||
self.query = torch.randn(3, 1, 32, dtype=torch.float16)
|
||||
self.key = torch.randn(3, 1, 32, dtype=torch.float16)
|
||||
@@ -368,7 +380,11 @@ class TestAscendDeepseekScalingRotaryEmbedding(TestBase):
|
||||
class TestAscendMRotaryEmbedding(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
# Common setup for tests
|
||||
self.config_patcher = patch('vllm.config.vllm.get_current_vllm_config')
|
||||
self.mock_get_config = self.config_patcher.start()
|
||||
mock_config = MagicMock()
|
||||
mock_config.compilation_config.custom_ops = ["all"]
|
||||
self.mock_get_config.return_value = mock_config
|
||||
self.number_tokens = 3
|
||||
self.num_head = 8
|
||||
self.num_kvhead = 8
|
||||
|
||||
@@ -29,6 +29,23 @@ from vllm_ascend.ops.fused_moe.token_dispatcher import ( # isort: skip
|
||||
class TestTokenDispatcherWithMC2(TestBase):
|
||||
|
||||
def setUp(self):
|
||||
self.config_patcher = patch(
|
||||
'vllm_ascend.ops.fused_moe.token_dispatcher.get_current_vllm_config'
|
||||
)
|
||||
self.mock_get_config = self.config_patcher.start()
|
||||
|
||||
mock_config = MagicMock()
|
||||
|
||||
mock_config.scheduler_config.max_num_seqs = 256
|
||||
mock_config.scheduler_config.decode_max_num_seqs = 256
|
||||
|
||||
mock_config.compilation_config.custom_ops = ["all"]
|
||||
|
||||
mock_config.speculative_config = None
|
||||
|
||||
mock_config.parallel_config.tensor_parallel_size = 1
|
||||
|
||||
self.mock_get_config.return_value = mock_config
|
||||
self.mc2_group = MagicMock()
|
||||
self.mc2_group.device_group.return_value._get_backend.return_value.get_hccl_comm_name.return_value = "hccl_123"
|
||||
self.mc2_group.rank_in_group = 0
|
||||
|
||||
@@ -208,6 +208,15 @@ class TestCustomVocabParallelEmbedding(unittest.TestCase):
|
||||
class TestAscendLogitsProcessor(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.mock_vllm_config = MagicMock()
|
||||
self.mock_vllm_config.compilation_config.custom_ops = ["all"]
|
||||
|
||||
from vllm.config.vllm import set_current_vllm_config
|
||||
set_current_vllm_config(self.mock_vllm_config)
|
||||
|
||||
self.config_patch = patch("vllm.config.vllm.get_current_vllm_config",
|
||||
return_value=self.mock_vllm_config)
|
||||
self.config_patch.start()
|
||||
self.vocab_size = 50
|
||||
self.num_embeddings = 50
|
||||
self.embedding_dim = 10
|
||||
|
||||
@@ -5,6 +5,7 @@ import torch
|
||||
from vllm.config import CacheConfig, ModelConfig, ParallelConfig, VllmConfig
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
|
||||
init_cached_hf_modules_path = "vllm.utils.import_utils.init_cached_hf_modules"
|
||||
|
||||
@@ -52,7 +53,7 @@ class TestNPUWorker(TestBase):
|
||||
@patch("vllm_ascend.worker.worker.get_ascend_config")
|
||||
@patch("vllm_ascend.worker.worker.init_ascend_config")
|
||||
@patch("vllm_ascend.worker.worker.check_ascend_device_type")
|
||||
@patch(init_cached_hf_modules_path)
|
||||
@patch(init_cached_hf_modules_path, create=True)
|
||||
@patch("vllm_ascend.worker.worker.NPUWorker._init_profiler")
|
||||
def test_init_npu_worker_normal_case(
|
||||
self,
|
||||
@@ -106,7 +107,7 @@ class TestNPUWorker(TestBase):
|
||||
@patch("vllm_ascend.worker.worker.get_ascend_config")
|
||||
@patch("vllm_ascend.worker.worker.init_ascend_config")
|
||||
@patch("vllm_ascend.worker.worker.check_ascend_device_type")
|
||||
@patch(init_cached_hf_modules_path)
|
||||
@patch(init_cached_hf_modules_path, create=True)
|
||||
@patch("vllm_ascend.worker.worker.NPUWorker._init_profiler")
|
||||
def test_init_npu_worker_with_trust_remote_code(
|
||||
self,
|
||||
@@ -140,7 +141,10 @@ class TestNPUWorker(TestBase):
|
||||
)
|
||||
|
||||
# Verify init_cached_hf_modules is called (trust_remote_code=True)
|
||||
mock_init_cached_hf_modules.assert_called_once()
|
||||
if vllm_version_is('0.13.0'):
|
||||
mock_init_cached_hf_modules.assert_called_once()
|
||||
else:
|
||||
mock_init_cached_hf_modules.assert_not_called()
|
||||
|
||||
@patch("vllm_ascend.utils.adapt_patch")
|
||||
@patch("vllm_ascend.ops")
|
||||
@@ -149,7 +153,7 @@ class TestNPUWorker(TestBase):
|
||||
@patch("vllm_ascend.worker.worker.get_ascend_config")
|
||||
@patch("vllm_ascend.worker.worker.init_ascend_config")
|
||||
@patch("vllm_ascend.worker.worker.check_ascend_device_type")
|
||||
@patch(init_cached_hf_modules_path)
|
||||
@patch(init_cached_hf_modules_path, create=True)
|
||||
@patch("vllm_ascend.worker.worker.NPUWorker._init_profiler")
|
||||
def test_init_npu_worker_with_custom_cache_dtype(
|
||||
self,
|
||||
@@ -813,10 +817,11 @@ class TestNPUWorker(TestBase):
|
||||
mock_scheduler_output, None)
|
||||
self.assertEqual(result, mock_model_output)
|
||||
|
||||
@patch("vllm_ascend.worker.worker.enable_sp", return_value=False)
|
||||
@patch("vllm_ascend.worker.worker.get_pp_group")
|
||||
@patch("vllm_ascend.worker.worker.get_tp_group")
|
||||
def test_execute_model_middle_rank(self, mock_get_tp_group,
|
||||
mock_get_pp_group):
|
||||
mock_get_pp_group, mock_enable_sp):
|
||||
"""Test execute_model method - middle rank case"""
|
||||
from vllm.sequence import IntermediateTensors
|
||||
|
||||
@@ -1113,12 +1118,14 @@ class TestNPUWorker(TestBase):
|
||||
worker.model_runner.initialize_kv_cache.assert_called_once_with(
|
||||
mock_kv_cache_config)
|
||||
|
||||
@patch("vllm_ascend.worker.worker.enable_sp", return_value=False)
|
||||
@patch("vllm_ascend.worker.worker.get_pp_group")
|
||||
@patch("vllm_ascend.worker.worker.get_tp_group")
|
||||
@patch("vllm_ascend.worker.worker.EMPTY_MODEL_RUNNER_OUTPUT")
|
||||
def test_execute_model_kv_connector_not_finished(self, mock_empty_output,
|
||||
mock_get_tp_group,
|
||||
mock_get_pp_group):
|
||||
mock_get_pp_group,
|
||||
mock_enable_sp):
|
||||
"""Test execute_model method - kv_connector_output not finished sending/recving case"""
|
||||
from vllm.sequence import IntermediateTensors
|
||||
|
||||
|
||||
Reference in New Issue
Block a user