### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|`vllm_ascend/ops/layer_shard_linear.py`|
|`vllm_ascend/ops/linear.py`|
|`vllm_ascend/ops/linear_op.py`|
|`vllm_ascend/worker/worker.py`|
| ` vllm_ascend/patch/worker/patch_bert.py` |
| ` vllm_ascend/patch/worker/patch_deepseek.py` |
| ` vllm_ascend/patch/worker/patch_distributed.py` |
| ` vllm_ascend/patch/worker/patch_module.py` |
| ` vllm_ascend/patch/worker/patch_multimodal_merge.py` |
| ` vllm_ascend/patch/worker/patch_qwen3_next.py` |
| ` vllm_ascend/patch/worker/patch_qwen3_next_mtp.py` |
| ` vllm_ascend/patch/worker/patch_rejection_sampler.py` |
| ` vllm_ascend/patch/worker/patch_rope.py` |
| ` vllm_ascend/patch/worker/patch_triton.py` |
| ` vllm_ascend/patch/worker/patch_unquantized_gemm.py` |
| ` vllm_ascend/patch/worker/patch_v2_egale.py` |
|` vllm_ascend/worker/npu_input_batch.py`|
|` vllm_ascend/worker/v2/aclgraph_utils.py`|
|` vllm_ascend/worker/v2/attn_utils.py`|
|` vllm_ascend/worker/v2/model_runner.py`|
|` vllm_ascend/worker/v2/sample/gumbel.py`|
|` vllm_ascend/worker/v2/sample/penalties.py`|
|` vllm_ascend/worker/v2/sample/sampler.py`|
|` vllm_ascend/worker/v2/spec_decode/__init__.py`|
|` vllm_ascend/worker/v2/spec_decode/eagle.py`|
|` vllm_ascend/worker/v2/states.py`|
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.14.0
- vLLM main:
d68209402d
Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: SILONG ZENG <2609716663@qq.com>
Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
Co-authored-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
@@ -1,5 +1,3 @@
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from vllm.distributed.parallel_state import GroupCoordinator, get_dp_group
|
||||
@@ -8,7 +6,9 @@ from vllm.forward_context import get_forward_context
|
||||
from vllm_ascend.distributed.parallel_state import get_fc3_quant_x_group
|
||||
|
||||
|
||||
def fc3_all_gather_and_maybe_unpad_impl(x: torch.Tensor, ) -> torch.Tensor:
|
||||
def fc3_all_gather_and_maybe_unpad_impl(
|
||||
x: torch.Tensor,
|
||||
) -> torch.Tensor:
|
||||
try:
|
||||
forward_context = get_forward_context()
|
||||
except AssertionError:
|
||||
@@ -22,34 +22,26 @@ def fc3_all_gather_and_maybe_unpad_impl(x: torch.Tensor, ) -> torch.Tensor:
|
||||
else:
|
||||
# unpad
|
||||
num_tokens_across_dp_cpu = dp_metadata.num_tokens_across_dp_cpu
|
||||
result = torch.empty((num_tokens_across_dp_cpu.sum(), *x.shape[1:]),
|
||||
device=x.device,
|
||||
dtype=x.dtype)
|
||||
result = torch.empty((num_tokens_across_dp_cpu.sum(), *x.shape[1:]), device=x.device, dtype=x.dtype)
|
||||
dp_size = get_dp_group().world_size
|
||||
x = x.view(dp_size, forward_context.padded_length, *x.shape[1:])
|
||||
offset = 0
|
||||
for idx in range(dp_size):
|
||||
num_tokens_dp = num_tokens_across_dp_cpu[idx]
|
||||
result[offset:offset + num_tokens_dp] = x[idx, :num_tokens_dp]
|
||||
result[offset : offset + num_tokens_dp] = x[idx, :num_tokens_dp]
|
||||
offset += num_tokens_dp
|
||||
x = result
|
||||
|
||||
return x
|
||||
|
||||
|
||||
def all_gather_async(input: torch.Tensor,
|
||||
group: GroupCoordinator,
|
||||
output: Optional[torch.Tensor] = None,
|
||||
async_op: bool = True):
|
||||
def all_gather_async(
|
||||
input: torch.Tensor, group: GroupCoordinator, output: torch.Tensor | None = None, async_op: bool = True
|
||||
):
|
||||
if group.world_size == 1:
|
||||
return input, None
|
||||
if output is None:
|
||||
input_size = input.size()
|
||||
output_size = (input_size[0] * group.world_size, ) + input_size[1:]
|
||||
output = torch.empty(output_size,
|
||||
dtype=input.dtype,
|
||||
device=input.device)
|
||||
return output, dist.all_gather_into_tensor(output,
|
||||
input,
|
||||
group=group.device_group,
|
||||
async_op=async_op)
|
||||
output_size = (input_size[0] * group.world_size,) + input_size[1:]
|
||||
output = torch.empty(output_size, dtype=input.dtype, device=input.device)
|
||||
return output, dist.all_gather_into_tensor(output, input, group=group.device_group, async_op=async_op)
|
||||
|
||||
Reference in New Issue
Block a user