From 2fda60464c287fe456b4a2f27e63996edc65dd40 Mon Sep 17 00:00:00 2001 From: Pr0Wh1teGivee <132029610+Pr0Wh1teGivee@users.noreply.github.com> Date: Wed, 25 Jun 2025 20:59:06 +0800 Subject: [PATCH] [Perf] Use fused ops npu_top_k_top_p (#1308) ### What this PR does / why we need it? Use fused ops torch_npu.npu_top_k_top_p(logits, p, k) when p and k are not None, otherwise fallback to the original one. The replacement will take place automatically when `VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE=1` . This patch are using `npu_top_k_top_p` which required torch_npu>=2.5.1.post1.dev20250619 ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? Tested by DeepSeek R1 and UT passed Signed-off-by: Pr0Wh1teGivee --- .../worker/patch_common/test_patch_sampler.py | 28 +++++++++++++++++++ .../worker/patch_common/patch_sampler.py | 7 ++++- 2 files changed, 34 insertions(+), 1 deletion(-) create mode 100644 tests/ut/worker/patch_common/test_patch_sampler.py diff --git a/tests/ut/worker/patch_common/test_patch_sampler.py b/tests/ut/worker/patch_common/test_patch_sampler.py new file mode 100644 index 0000000..a062a97 --- /dev/null +++ b/tests/ut/worker/patch_common/test_patch_sampler.py @@ -0,0 +1,28 @@ +import importlib +import os +import unittest +from unittest import mock + +import torch +from vllm.v1.sample.ops import topk_topp_sampler + + +class TestTopKTopPSamplerOptimize(unittest.TestCase): + + @mock.patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_TOPK_OPTIMIZE": "1"}) + @mock.patch("torch_npu.npu_top_k_top_p") + def test_npu_topk_topp_called_when_optimized(self, mock_npu_op): + import vllm_ascend.patch.worker.patch_common.patch_sampler + importlib.reload(vllm_ascend.patch.worker.patch_common.patch_sampler) + + mock_npu_op.return_value = (torch.randn(1, 3)) + sampler = topk_topp_sampler.TopKTopPSampler() + + logits = torch.tensor([[1.0, 2.0, 3.0]]) + k = torch.tensor([2]) + p = torch.tensor([0.9]) + generators = {0: torch.Generator()} + generators[0].manual_seed(42) + + sampler.forward_native(logits, generators, k, p) + mock_npu_op.assert_called_once_with(logits, p, k) diff --git a/vllm_ascend/patch/worker/patch_common/patch_sampler.py b/vllm_ascend/patch/worker/patch_common/patch_sampler.py index a6fbfbc..69fcd69 100644 --- a/vllm_ascend/patch/worker/patch_common/patch_sampler.py +++ b/vllm_ascend/patch/worker/patch_common/patch_sampler.py @@ -19,6 +19,7 @@ from typing import Optional import torch +import torch_npu from vllm.v1.sample.ops.topk_topp_sampler import TopKTopPSampler, random_sample from vllm.v1.sample.sampler import Sampler @@ -48,9 +49,13 @@ def apply_min_p( def _apply_top_k_top_p( logits: torch.Tensor, - p: torch.Tensor, k: torch.Tensor, + p: torch.Tensor, ) -> torch.Tensor: + if p is not None and k is not None: + # npu_top_k_top_p's parameter order is (logits, p, k), not (logits, k, p) + return torch_npu.npu_top_k_top_p(logits, p, k) + probs = logits.softmax(dim=-1) probs_sort, _ = probs.sort(dim=-1, descending=False)