### What this PR does / why we need it?
break:
- https://github.com/vllm-project/vllm/pull/34102
Disable_full param replaced with valid_modes/invalid_modes API
- https://github.com/vllm-project/vllm/pull/35503
Now must return float compilation_time
- https://github.com/vllm-project/vllm/pull/35564
New sequence_lengths param added
- https://github.com/vllm-project/vllm/pull/33807
A check was performed (if runner_backend != "auto")
- https://github.com/vllm-project/vllm/pull/34861
`BaseDeviceCommunicator` now accesses PyTorch's internal `pg_map` to
check process group state
- https://github.com/vllm-project/vllm/pull/35274
**Important change:**
- https://github.com/vllm-project/vllm/pull/28672
`matcher_utils` directly accesses `torch.ops._C.*` during the import
phase. In the Ascend environment, some unregistered ops trigger
`AttributeError`, causing e2e initialization failure.
https://github.com/vllm-project/vllm-ascend/actions/runs/22607260487/job/65502047131#step:10:2323
https://github.com/vllm-project/vllm/blob/main/vllm/compilation/passes/fusion/matcher_utils.py#L29
This PR adds temporary compatibility placeholders (rms_norm,
fused_add_rms_norm, rotate_embedding, static/dynamic fp8 quant,
silu_and_mul) to
`vllm_ascend/patch/platform/patch_fusion_matcher_compat_ops.py` to
ensure no crashes during the import phase. Upstream repairs will be
considered later.
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.16.0
- vLLM main:
15d76f74e2
---------
Signed-off-by: MrZ20 <2609716663@qq.com>
Signed-off-by: gcanlin <canlinguosdu@gmail.com>
Co-authored-by: Meihan-chen <jcccx.cmh@gmail.com>
Co-authored-by: Claude Code <noreply@anthropic.com>
Co-authored-by: gcanlin <canlinguosdu@gmail.com>
93 lines
3.7 KiB
Python
93 lines
3.7 KiB
Python
import unittest
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
import torch
|
|
import torch.distributed as dist
|
|
|
|
from vllm_ascend.distributed.device_communicators.npu_communicator import \
|
|
NPUCommunicator
|
|
|
|
|
|
class TestNPUCommunicator(unittest.TestCase):
|
|
|
|
@patch("vllm.config.get_current_vllm_config", return_value=None)
|
|
@patch("torch.npu.current_device", return_value=MagicMock())
|
|
@patch("torch.npu.set_device", return_value=MagicMock())
|
|
@patch("torch.distributed.get_process_group_ranks",
|
|
return_value={
|
|
0: 0,
|
|
1: 1
|
|
})
|
|
@patch("torch.distributed.get_group_rank", return_value={0: 0, 1: 1})
|
|
@patch("torch.distributed.is_initialized", return_value=True)
|
|
@patch("torch.distributed.get_rank", return_value=1)
|
|
@patch("torch.distributed.is_initialized", return_value=True)
|
|
@patch("torch.distributed.get_backend", return_value="hccl")
|
|
@patch("torch.distributed.get_rank", return_value=1)
|
|
@patch("torch.distributed.get_world_size", return_value=2)
|
|
@patch("torch.distributed.get_process_group_ranks", return_value=[0, 1])
|
|
@patch("torch.npu.device")
|
|
def test_all_to_all_with_sizes(self, *_):
|
|
|
|
def patched_all_to_all(output_tensor_list,
|
|
input_tensor_list,
|
|
group=None,
|
|
async_op=False):
|
|
output_tensor_list[:] = ([
|
|
torch.tensor([10, 20]),
|
|
torch.tensor([50, 60])
|
|
])
|
|
|
|
torch.distributed.all_to_all = patched_all_to_all
|
|
|
|
scatter_sizes = [2, 2]
|
|
gather_sizes = [2, 2]
|
|
input_ = torch.tensor([10, 20, 30, 40])
|
|
|
|
with patch.dict(dist.distributed_c10d._world.pg_map, {dist.group.WORLD: MagicMock()}, clear=False):
|
|
comm = NPUCommunicator(cpu_group=dist.group.WORLD)
|
|
|
|
output = comm.all_to_all(input_,
|
|
scatter_sizes=scatter_sizes,
|
|
gather_sizes=gather_sizes)
|
|
|
|
assert output.tolist() == [10, 20, 50, 60]
|
|
|
|
@patch("vllm.config.get_current_vllm_config", return_value=None)
|
|
@patch("torch.npu.current_device", return_value=MagicMock())
|
|
@patch("torch.npu.set_device", return_value=MagicMock())
|
|
@patch("torch.distributed.get_process_group_ranks",
|
|
return_value={
|
|
0: 0,
|
|
1: 1
|
|
})
|
|
@patch("torch.distributed.get_group_rank", return_value={0: 0, 1: 1})
|
|
@patch("torch.distributed.is_initialized", return_value=True)
|
|
@patch("torch.distributed.get_rank", return_value=1)
|
|
@patch("torch.distributed.is_initialized", return_value=True)
|
|
@patch("torch.distributed.get_backend", return_value="hccl")
|
|
@patch("torch.distributed.get_rank", return_value=1)
|
|
@patch("torch.distributed.get_world_size", return_value=2)
|
|
@patch("torch.distributed.get_process_group_ranks", return_value=[0, 1])
|
|
@patch("torch.npu.device")
|
|
def test_all_to_all_without_sizes(self, *_):
|
|
|
|
def patched_all_to_all(output_tensor_list,
|
|
input_tensor_list,
|
|
group=None,
|
|
async_op=False):
|
|
output_tensor_list[:] = ([
|
|
torch.tensor([[10, 20]]),
|
|
torch.tensor([[50, 60]])
|
|
])
|
|
|
|
torch.distributed.all_to_all = patched_all_to_all
|
|
|
|
input_ = torch.tensor([[10, 20], [30, 40]])
|
|
|
|
with patch.dict(dist.distributed_c10d._world.pg_map, {dist.group.WORLD: MagicMock()}, clear=False):
|
|
comm = NPUCommunicator(cpu_group=dist.group.WORLD)
|
|
output = comm.all_to_all(input_, scatter_dim=0, gather_dim=0)
|
|
|
|
assert output.tolist() == [[10, 20], [50, 60]]
|