[Feature] Integrate quick allreduce and select the best allreduce implementation (#6619)

Signed-off-by: Haoyang Li <Haoyang.Li@amd.com>
Co-authored-by: ilmarkov <imarkov@redhat.com>
This commit is contained in:
li haoyang
2025-07-25 11:48:42 +08:00
committed by GitHub
parent f4674df646
commit 28d4d47280
14 changed files with 2031 additions and 109 deletions

View File

@@ -54,6 +54,25 @@ TORCH_LIBRARY_EXPAND(sgl_kernel, m) {
m.def("get_meta_buffer_ipc_handle", &get_meta_buffer_ipc_handle);
m.impl("get_meta_buffer_ipc_handle", torch::kCPU, &get_meta_buffer_ipc_handle);
// quick allreduce
#ifdef USE_ROCM
m.def(
"qr_all_reduce(int fa, Tensor inp, Tensor out, int quant_level, bool "
"cast_bf2half) -> ()");
m.impl("qr_all_reduce", torch::kCUDA, &qr_all_reduce);
m.def("init_custom_qr", &init_custom_qr);
m.def("qr_destroy", &qr_destroy);
m.def("qr_get_handle", &qr_get_handle);
m.def("qr_open_handles(int _fa, Tensor[](b!) handles) -> ()");
m.impl("qr_open_handles", torch::kCPU, &qr_open_handles);
// Max input size in bytes
m.def("qr_max_size", &qr_max_size);
#endif
/*
* From csrc/moe
*/