add dispatch_gmm_combine kernel (#3532)
### What this PR does / why we need it? This PR introduces the Ascend implementation of the `dispatch_ffn_combine` kernel and wires it into the vLLM-Ascend runtime, together with follow‑up fixes to ensure the kernel builds and runs correctly in CI. - Add full host and device implementation of the `dispatch_ffn_combine` kernel under `csrc/dispatch_ffn_combine`, including tiling logic, MOE routing helpers, and kernel utilities for quantized FFN dispatch. - Integrate the new kernel with the PyTorch binding (csrc/torch_binding.cpp, csrc/torch_binding_meta.cpp) and the Ascend runtime (vllm_ascend/ascend_forward_context.py, vllm_ascend/worker/model_runner_v1.py). - Extend fused MoE communication and token dispatch support in `vllm_ascend/ops/fused_moe`, adding methods/utilities needed by the new dispatch path. - Update quantization logic in vllm_ascend/quantization/w8a8_dynamic.py to support the new FFN dispatch flow. - Fix kernel build issues by adjusting `csrc/build_aclnn.sh`, CMake configuration, and include/namespace usage in the new kernel files. - Add an end‑to‑end nightly test `tests/e2e/nightly/ops/test_dispatch_ffn_combine.py` and helper utilities in `vllm_ascend/utils.py` to validate the new kernel. ### Does this PR introduce _any_ user-facing change? ### How was this patch tested? - vLLM version: v0.12.0 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.12.0 --------- Signed-off-by: mojave2 <chenchen145@huawei.com> Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -911,6 +911,9 @@ def get_hccl_config_for_pg_options(group_name: str) -> Optional[dict]:
|
||||
"dp": {
|
||||
"hccl_buffer_size": calculate_dp_buffer_size()
|
||||
},
|
||||
"ep": {
|
||||
"hccl_buffer_size": calculate_ep_buffer_size()
|
||||
},
|
||||
}
|
||||
return hccl_config_map.get(group_name, get_default_buffer_config())
|
||||
|
||||
@@ -932,6 +935,30 @@ def calculate_dp_buffer_size() -> int:
|
||||
return max(dp_buffer_size, _MIN_DP_BUFFER_SIZE)
|
||||
|
||||
|
||||
def calculate_ep_buffer_size() -> int:
|
||||
"""
|
||||
formula of ep buffer size:
|
||||
batch_size * hidden_size * topk * 4
|
||||
"""
|
||||
ep_buffer_size = _DEFAULT_BUFFER_SIZE
|
||||
try:
|
||||
from vllm.config import get_current_vllm_config
|
||||
vllm_config = get_current_vllm_config()
|
||||
hf_config = vllm_config.model_config.hf_config
|
||||
|
||||
hidden_size = hf_config.hidden_size
|
||||
topk = getattr(hf_config, "num_experts_per_token", 1)
|
||||
batch_size = vllm_config.scheduler_config.max_num_batched_tokens
|
||||
int8_size = torch.iinfo(torch.int8).bits // 8
|
||||
bf16_size = torch.finfo(torch.bfloat16).bits // 8
|
||||
ep_buffer_size = math.ceil(
|
||||
(batch_size * hidden_size * topk *
|
||||
(int8_size * 2 + bf16_size)) / (1024 * 1024))
|
||||
except Exception:
|
||||
pass
|
||||
return max(ep_buffer_size, _DEFAULT_BUFFER_SIZE)
|
||||
|
||||
|
||||
# Currently, when in A2, setting the environment variables HCCL_INTRA_PCIE_ENABLE=1
|
||||
# and HCCL_INTRA_ROCE_ENABLE=0 can reduce cross-machine communication traffic and
|
||||
# significantly improve communication performance of MC2 ops dispatch/combine.
|
||||
|
||||
Reference in New Issue
Block a user