It comes from 0.9.1dev
[0.9.1][Feature]Moe alltoallv communication optimization for unquantized
RL training sence & alltoallv support dpo (#1547)
- vLLM version: v0.10.0
- vLLM main:
97608dc276
---------
Signed-off-by: weijinqian_v1 <weijinqian@huawei.com>
Signed-off-by: whx-sjtu <2952154980@qq.com>
Signed-off-by: curryliu <120010041@link.cuhk.edu.cn>
Signed-off-by: wangli <wangli858794774@gmail.com>
Signed-off-by: ChenTaoyu-SJTU <ctynb@qq.com>
Signed-off-by: taoxudonghaha <justsheldon@163.com>
Signed-off-by: shen-shanshan <467638484@qq.com>
Signed-off-by: Shanshan Shen <87969357+shen-shanshan@users.noreply.github.com>
Signed-off-by: leo-pony <nengjunma@outlook.com>
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Signed-off-by: MengqingCao <cmq0113@163.com>
Co-authored-by: weijinqian_v1 <weijinqian@huawei.com>
Co-authored-by: whx <56632993+whx-sjtu@users.noreply.github.com>
Co-authored-by: curryliu <99582471+Irving11-BKN@users.noreply.github.com>
Co-authored-by: Li Wang <wangli858794774@gmail.com>
Co-authored-by: TaoYu Chen <ctynb@qq.com>
Co-authored-by: taoxudonghaha <justsheldon@163.com>
Co-authored-by: Shanshan Shen <467638484@qq.com>
Co-authored-by: leo-pony <nengjunma@outlook.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: Mengqing Cao <cmq0113@163.com>
63 lines
2.3 KiB
Python
63 lines
2.3 KiB
Python
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
# Copyright 2023 The vLLM team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
import torch
|
|
import torch.distributed
|
|
import torch.distributed as dist
|
|
import torch_npu
|
|
|
|
COMM_STREAM = None
|
|
|
|
|
|
def async_all_to_all(input_,
|
|
output_split_sizes,
|
|
input_split_sizes,
|
|
group,
|
|
event=None):
|
|
if output_split_sizes is None:
|
|
# Equal split (all2all)
|
|
a2a_out = torch.empty_like(input_)
|
|
else:
|
|
# Unequal split (all2all-v)
|
|
a2a_out = input_.new_empty(
|
|
size=[sum(output_split_sizes)] + list(input_.size()[1:]),
|
|
dtype=input_.dtype,
|
|
device=torch.npu.current_device(),
|
|
)
|
|
|
|
if event:
|
|
# multi stream wait event
|
|
global COMM_STREAM
|
|
if COMM_STREAM is None:
|
|
COMM_STREAM = torch_npu.npu.Stream(
|
|
device=torch.npu.current_device())
|
|
with torch_npu.npu.stream(COMM_STREAM):
|
|
event.wait()
|
|
handle = dist.all_to_all_single(
|
|
a2a_out,
|
|
input_.contiguous(),
|
|
output_split_sizes=output_split_sizes,
|
|
input_split_sizes=input_split_sizes,
|
|
group=group,
|
|
async_op=True)
|
|
else:
|
|
handle = dist.all_to_all_single(a2a_out,
|
|
input_.contiguous(),
|
|
output_split_sizes=output_split_sizes,
|
|
input_split_sizes=input_split_sizes,
|
|
group=group,
|
|
async_op=True)
|
|
return input_, a2a_out, handle
|