[CI] Follow vLLM FusedMoEParallelConfig interface change and clean up unused config (#1625)

This commit
78fe77534b
from vllm reverted the change for FusedMoEParallelConfig

This PR do the same to fix the CI error

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-07-04 17:54:33 +08:00
committed by GitHub
parent 4e910186de
commit 343955c7ac
4 changed files with 13 additions and 26 deletions

View File

@@ -5,8 +5,8 @@ from unittest import mock
from transformers import PretrainedConfig
from vllm.config import ModelConfig, VllmConfig
from vllm_ascend.ascend_config import (check_ascend_config,
check_torchair_supported,
from vllm_ascend.ascend_config import (_check_torchair_supported,
check_ascend_config,
clear_ascend_config, get_ascend_config,
init_ascend_config)
@@ -248,5 +248,5 @@ class TestAscendConfig(unittest.TestCase):
test_cases = [('deepseek_v3', True), ('PanguProMoE', True),
('qwen', False), ('llama', False)]
for model_type, expected_output in test_cases:
self.assertEqual(check_torchair_supported(model_type),
self.assertEqual(_check_torchair_supported(model_type),
expected_output)