[Patch] format patch module to make it more clear (#601)

Format patch module to make it more clear. 
Add the patch doc description, the new patch must follow this guide.

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-04-22 14:13:00 +08:00
committed by GitHub
parent ad845bfe82
commit 538a69c145
7 changed files with 136 additions and 141 deletions

View File

@@ -14,17 +14,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
# What's Patched and how it works:
# ** File: platform/patch_0_8_4/patch_config.py**
# 1. `vllm.config.ModelConfig.__init__()`
# Why:
# It is hard coded for sleep mode to support cuda platform only
# How
# Using a new method to check if sleep mode is available
# Related PR (if no, explain why): 1. refused by vllm. 2. vllm doesn't support 3. prepare to submit....
# https://github.com/vllm-project/vllm/pull/16562
# Future Plan:
# This patch is only used for 084 and can't be revert. just keep as it is.
import vllm_ascend.patch.platform.patch_0_8_4.patch_config # noqa
import vllm_ascend.patch.platform.patch_0_8_4.patch_distributed # noqa

View File

@@ -13,4 +13,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import vllm_ascend.patch.platform.patch_common.patch_distributed # noqa

View File

@@ -27,40 +27,6 @@ from torch.distributed.distributed_c10d import (Backend, PrefixStore,
from torch.distributed.rendezvous import rendezvous
from vllm.config import ParallelConfig
# What's Patched and how it works:
# ** File: platform/patch_0_8_4/patch_distributed.py**
# 1. `vllm.distributed.parallel_state.destroy_model_parallel()`
# Why:
# vllm dose not support outside platform maintain its own `CoordinatorGroup`, vllm-ascend maintain EP and ETP
# inside of the repo, and needs a common interface to destroy them, this patch add the interface of destroy
# platform owned `CoordinatorGroup` to make sure all the CoordinateGroup can be properly destroyed
# How
# Call platform method `destroy_platform_model_parallel` to destroy all the `CoordinateGroup`
# Related PR (if no, explain why): no related PR, we want add this ability into vllm
# Future Plan:
# Remove those patch when vllm merged them
# 2. `vllm.distributed.stateless_init_torch_distributed_process_group()`
# Why:
# The stateless process group can not be initialized except from gloo and nccl backend, vllm-ascend
# needs to initialize its own stateless process group for communication, so we add the platform related
# call to the `stateless_init_torch_distributed_process_group`, to enable other platform which may support
# stateless process group initialize method
# How
# Call platform method `platform_has_backend_register` to judge if there is a stateless process group initialize
# method and call platform method `platform_register_backend` to initialize them
# Related PR (if no, explain why): no related PR, we want add this ability into vllm
# Future Plan:
# Remove those patch when vllm merged them
# 3. `ParallelConfig.get_next_dp_init_port`
# Why:
# We want to get dp port from env variable, so the multi-node inference can be properly initialized and run.
# How
# Get the dp port from env variable enable multi-mode dp inference
# Related PR (if no, explain why): no related PR, we want add this ability into vllm
# Future Plan:
# Its a workaround in vllm-ascend to enable multi-node dp inference, maybe removed if vllm have better plan
# on multi-node dp inference implementation
def ascend_destroy_model_parallel():
"""Set the groups to none and destroy them."""

View File

@@ -14,4 +14,3 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import vllm_ascend.patch.platform.patch_main.patch_distributed # noqa F401

View File

@@ -1,32 +0,0 @@
#
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
# Copyright 2023 The vLLM team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from vllm/model_executor/models/qwen2_vl.py
# This file is a part of the vllm-ascend project.
import vllm
import vllm.distributed
from vllm.config import ParallelConfig
from vllm_ascend.patch.platform.patch_0_8_4.patch_distributed import (
ascend_destroy_model_parallel,
ascend_stateless_init_torch_distributed_process_group,
parallel_config_get_dp_port)
# All details of those patch please refer to vllm_ascend/patch/platform/patch_0_8_4/patch_distributed.py
vllm.distributed.parallel_state.destroy_model_parallel = ascend_destroy_model_parallel
vllm.distributed.stateless_init_torch_distributed_process_group = ascend_stateless_init_torch_distributed_process_group
ParallelConfig.get_next_dp_init_port = parallel_config_get_dp_port