[main2main] upgrade vllm main 0202 (#6560)
### What this PR does / why we need it? 1. Fix `TypeError: FusedMoEParallelConfig.__init__() missing 1 required positional argument: 'is_sequence_parallel'` due to https://github.com/vllm-project/vllm/pull/32567 2. Fix ` TypeError: '>' not supported between instances of 'MagicMock' and 'int'` due to https://github.com/vllm-project/vllm/pull/33035 3. Fix `TypeError: Can't instantiate abstract class AscendMLAImpl with abstract methods forward_mha, forward_mqa` and AttributeError: 'bool' object has no attribute 'process_weights_after_loading' due to https://github.com/vllm-project/vllm/pull/33284 4. Fix `'AscendSharedFusedMoE' object has no attribute '_routed_input_transform'`due to https://github.com/vllm-project/vllm/pull/32790 5. Fix `NPUModelRunner._dummy_run() got an unexpected keyword argument 'num_active_loras'` due to https://github.com/vllm-project/vllm/pull/32005 6. Fix the problem caused by` 'tuple' object has no attribute 'job_id'` due to https://github.com/vllm-project/vllm/pull/27492 7. Fix the problem that all_moe_layers is not equal to vllm.moe_forward, vllm.moe_forward_shared due to https://github.com/vllm-project/vllm/pull/33184 8. Add patch to fix the problem "got multiple values for keyword argument 'add_special_tokens'" due to https://github.com/vllm-project/vllm/pull/32863 ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.15.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.15.0 --------- Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com> Signed-off-by: Meihan-chen <jcccx.cmh@gmail.com> Signed-off-by: hfadzxy <starmoon_zhang@163.com> Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com> Co-authored-by: hfadzxy <starmoon_zhang@163.com>
This commit is contained in:
@@ -922,4 +922,7 @@ PROMPT_CONFIGS = {
|
||||
|
||||
@pytest.fixture(params=PROMPT_CONFIGS.keys())
|
||||
def vl_config(request):
|
||||
return PROMPT_CONFIGS[request.param]
|
||||
config = PROMPT_CONFIGS[request.param]
|
||||
if "skip" in config:
|
||||
pytest.skip(config["skip"])
|
||||
return config
|
||||
|
||||
@@ -9,6 +9,7 @@ from vllm.model_executor.layers.fused_moe.config import FusedMoEConfig, FusedMoE
|
||||
|
||||
from vllm_ascend.ascend_config import init_ascend_config
|
||||
from vllm_ascend.eplb.core.eplb_utils import init_eplb_config
|
||||
from vllm_ascend.utils import vllm_version_is
|
||||
# isort: on
|
||||
|
||||
|
||||
@@ -21,7 +22,13 @@ class TestAscendConfig(unittest.TestCase):
|
||||
"eplb_config": {"dynamic_eplb": True, "num_redundant_experts": 2},
|
||||
}
|
||||
from vllm.model_executor.layers.fused_moe.config import RoutingMethodType
|
||||
moe_parallel_config = FusedMoEParallelConfig(2, 0, 1, 2, 1, 1, 1, 1, True, "hccl", enable_eplb=True)
|
||||
if vllm_version_is("0.15.0"):
|
||||
moe_parallel_config = FusedMoEParallelConfig(
|
||||
2, 0, 1, 2, 1, 1, 1, 1, True, "hccl", enable_eplb=True)
|
||||
else:
|
||||
moe_parallel_config = FusedMoEParallelConfig(
|
||||
2, 0, 1, 2, 1, 1, 1, 1, True, "hccl",
|
||||
is_sequence_parallel=False, enable_eplb=True)
|
||||
moe_config = FusedMoEConfig(
|
||||
num_experts=8,
|
||||
experts_per_token=8,
|
||||
|
||||
@@ -82,8 +82,13 @@ class TestAscendMultiHeadLatentAttention(TestBase):
|
||||
@patch("vllm_ascend.ops.mla.get_tensor_model_parallel_world_size")
|
||||
def test_initialization(self, mock_tp_size, mock_ascend_config,
|
||||
mock_get_vllm_config):
|
||||
# Create a proper mock for MLAAttention that has the required attributes
|
||||
mock_mla_attn = MagicMock()
|
||||
mock_mla_attn.process_weights_after_loading = MagicMock()
|
||||
mock_mla_attn.impl = MagicMock()
|
||||
mock_mla_attn.impl.process_weights_after_loading = MagicMock()
|
||||
|
||||
with patch("vllm_ascend.ops.mla.MLAAttention", return_value=True):
|
||||
with patch("vllm_ascend.ops.mla.MLAAttention", return_value=mock_mla_attn):
|
||||
mock_tp_size.return_value = 2
|
||||
mock_ascend_config.return_value.enable_shared_expert_dp = True
|
||||
mock_vllm_config = MagicMock(spec=VllmConfig)
|
||||
@@ -126,7 +131,14 @@ class TestAscendMultiHeadLatentAttention(TestBase):
|
||||
num_hidden_layers=32, first_k_dense_replace=False)
|
||||
mock_get_vllm_config.return_value = mock_vllm_config
|
||||
mock_vllm_config.compilation_config = CompilationConfig()
|
||||
with patch("vllm_ascend.ops.mla.MLAAttention", return_value=True):
|
||||
|
||||
# Create a proper mock for MLAAttention that has the required attributes
|
||||
mock_mla_attn = MagicMock()
|
||||
mock_mla_attn.process_weights_after_loading = MagicMock()
|
||||
mock_mla_attn.impl = MagicMock()
|
||||
mock_mla_attn.impl.process_weights_after_loading = MagicMock()
|
||||
|
||||
with patch("vllm_ascend.ops.mla.MLAAttention", return_value=mock_mla_attn):
|
||||
attn = AscendMultiHeadLatentAttention(
|
||||
hidden_size=self.hidden_size,
|
||||
num_heads=self.num_heads,
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from vllm.attention.layer import Attention
|
||||
from vllm.model_executor.layers.fused_moe import FusedMoE
|
||||
from vllm.model_executor.layers.fused_moe.config import FusedMoEConfig
|
||||
from vllm.model_executor.layers.linear import LinearBase
|
||||
@@ -8,7 +7,12 @@ from vllm.model_executor.layers.linear import LinearBase
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.ops.linear import AscendUnquantizedLinearMethod
|
||||
from vllm_ascend.quantization.modelslim_config import AscendModelSlimConfig
|
||||
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD
|
||||
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD, vllm_version_is
|
||||
|
||||
if vllm_version_is("v0.15.0"):
|
||||
from vllm.attention.layer import Attention # type: ignore
|
||||
else:
|
||||
from vllm.model_executor.layers.attention import Attention
|
||||
|
||||
|
||||
class TestAscendModelSlimConfig(TestBase):
|
||||
|
||||
@@ -28,12 +28,15 @@ class TestEagleProposerInitialization(TestBase):
|
||||
self.vllm_config.model_config.dtype = torch.float16
|
||||
self.vllm_config.model_config.max_model_len = 2048
|
||||
self.vllm_config.model_config.uses_mrope = False
|
||||
self.vllm_config.model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.parallel_config.tensor_parallel_size = 1
|
||||
self.vllm_config.parallel_config.data_parallel_rank = 0
|
||||
self.vllm_config.speculative_config.draft_tensor_parallel_size = 1
|
||||
self.vllm_config.speculative_config.num_speculative_tokens = 2
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(2)
|
||||
])
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.additional_config = None
|
||||
|
||||
self.mock_cpugpubuffer = patch(
|
||||
@@ -141,12 +144,15 @@ class TestEagleProposerLoadModel(TestBase):
|
||||
self.vllm_config.model_config.dtype = torch.float16
|
||||
self.vllm_config.model_config.max_model_len = 2048
|
||||
self.vllm_config.model_config.uses_mrope = False
|
||||
self.vllm_config.model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.parallel_config.tensor_parallel_size = 1
|
||||
self.vllm_config.parallel_config.data_parallel_rank = 0
|
||||
self.vllm_config.speculative_config.draft_tensor_parallel_size = 1
|
||||
self.vllm_config.speculative_config.num_speculative_tokens = 2
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(2)
|
||||
])
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.additional_config = None
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
@@ -285,12 +291,15 @@ class TestEagleProposerDummyRun(TestBase):
|
||||
self.vllm_config.model_config.dtype = torch.float16
|
||||
self.vllm_config.model_config.max_model_len = 2048
|
||||
self.vllm_config.model_config.uses_mrope = False
|
||||
self.vllm_config.model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.model_config.use_mla = False
|
||||
self.vllm_config.parallel_config.tensor_parallel_size = 1
|
||||
self.vllm_config.parallel_config.data_parallel_rank = 0
|
||||
self.vllm_config.speculative_config.draft_tensor_parallel_size = 1
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(4)
|
||||
])
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.additional_config = None
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
@@ -404,12 +413,15 @@ class TestEagleProposerHelperMethods(TestBase):
|
||||
self.vllm_config.model_config.dtype = torch.float16
|
||||
self.vllm_config.model_config.max_model_len = 2048
|
||||
self.vllm_config.model_config.uses_mrope = False
|
||||
self.vllm_config.model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.parallel_config.tensor_parallel_size = 1
|
||||
self.vllm_config.parallel_config.data_parallel_rank = 0
|
||||
self.vllm_config.speculative_config.draft_tensor_parallel_size = 1
|
||||
self.vllm_config.speculative_config.num_speculative_tokens = 2
|
||||
self.vllm_config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(2)
|
||||
])
|
||||
self.vllm_config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
self.vllm_config.additional_config = None
|
||||
init_ascend_config(self.vllm_config)
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ class TestMtpProposer:
|
||||
config.speculative_config.draft_model_config = MagicMock()
|
||||
config.speculative_config.draft_model_config.get_hidden_size.return_value = 4096
|
||||
config.speculative_config.draft_model_config.uses_mrope = False
|
||||
config.speculative_config.draft_model_config.uses_xdrope_dim = 0
|
||||
config.speculative_config.speculative_token_tree = str([
|
||||
(i + 1) * (0, ) for i in range(2)
|
||||
])
|
||||
@@ -42,9 +43,11 @@ class TestMtpProposer:
|
||||
config.model_config.dtype = torch.float16
|
||||
config.model_config.max_model_len = 2048
|
||||
config.model_config.uses_mrope = False
|
||||
config.model_config.uses_xdrope_dim = 0
|
||||
config.model_config.hf_text_config = None
|
||||
config.model_config.hf_config = None
|
||||
config.parallel_config.tensor_parallel_size = 1
|
||||
config.parallel_config.data_parallel_rank = 0
|
||||
config.speculative_config.draft_tensor_parallel_size = 1
|
||||
|
||||
config.load_config = None
|
||||
|
||||
Reference in New Issue
Block a user