Update torch-npu version to 2.7.1 (#3896)

### What this PR does / why we need it?
Upgrade torch-npu to the official release version 2.7.1


- vLLM version: v0.11.0
- vLLM main:
83f478bb19

---------

Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
This commit is contained in:
wangxiyuan
2025-10-31 17:16:31 +08:00
committed by GitHub
parent 5f6d1b3323
commit fcc9a0eaeb
15 changed files with 83 additions and 168 deletions

View File

@@ -23,9 +23,9 @@ class TestAscendW8A8FusedMoEMethod(TestBase):
@patch("torch_npu.npu_swiglu")
@patch("torch_npu.npu_dynamic_quant")
@patch("torch_npu.npu_moe_finalize_routing")
@patch("torch_npu.npu_moe_init_routing")
@patch("torch_npu.npu_moe_init_routing_quant")
def test_torchair_fused_experts_with_all2all(
self, mock_moe_init_routing, mock_moe_finalize_routing,
self, mock_npu_moe_init_routing_quant, mock_moe_finalize_routing,
mock_dynamic_quant, mock_swiglu, mock_grouped_matmul,
mock_moe_re_routing, mock_all_to_all_single):
@@ -38,11 +38,10 @@ class TestAscendW8A8FusedMoEMethod(TestBase):
placeholder_ones = torch.ones(self.num_tokens, dtype=torch.int32)
mock_all_to_all_single.side_effect = lambda output, input, *args, **kwargs: output.copy_(
input)
mock_moe_init_routing.return_value = (
placeholder_int8,
placeholder_ones,
placeholder_ones,
)
mock_npu_moe_init_routing_quant.return_value = (
placeholder_int8, placeholder_ones, placeholder_ones,
torch.bincount(placeholder_ones, minlength=len(expert_map)),
torch.randn(self.num_tokens))
mock_moe_re_routing.return_value = (placeholder_int8, self.placeholder,
torch.randint(0,
100,