[CI] Follow vLLM FusedMoEParallelConfig interface change and clean up unused config (#1625)

This commit
78fe77534b
from vllm reverted the change for FusedMoEParallelConfig

This PR do the same to fix the CI error

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-07-04 17:54:33 +08:00
committed by GitHub
parent 4e910186de
commit 343955c7ac
4 changed files with 13 additions and 26 deletions

View File

@@ -27,7 +27,6 @@ from vllm.attention.backends.utils import PAD_SLOT_ID, CommonAttentionState
from vllm.v1.core.sched.output import SchedulerOutput
from vllm.v1.worker.gpu_input_batch import InputBatch
from vllm_ascend.ascend_config import get_ascend_config
from vllm_ascend.attention.attention_v1 import AscendAttentionState
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_NZ, aligned_16, is_310p,
nd_to_nz_2d)
@@ -160,8 +159,6 @@ class AscendAttentionTorchairMetadataBuilder:
def __init__(self, runner):
self.runner = runner
self.torchair_graph_enabled = get_ascend_config(
).torchair_graph_config.enabled
def reorder_batch(self, input_batch: "InputBatch",
scheduler_output: "SchedulerOutput") -> bool: