Pre-issued exponential distribution operator.
Result:
Single inference saves 200-300 microseconds.
before:
<img width="2257" height="1058" alt="2"
src="https://github.com/user-attachments/assets/c1da19e2-a439-42cb-9d7c-c0218e61fd4c"
/>
After:
<img width="2211" height="342" alt="image"
src="https://github.com/user-attachments/assets/03c84292-c802-4755-949c-4266a9a72fc0"
/>
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
---------
Signed-off-by: weijinqian_v1 <weijinqian@huawei.com>
Co-authored-by: weijinqian_v1 <weijinqian@huawei.com>
36 lines
1.2 KiB
Python
36 lines
1.2 KiB
Python
from unittest import mock
|
|
|
|
import torch
|
|
|
|
from tests.ut.base import TestBase
|
|
from vllm_ascend.sample.sampler import AscendSampler, AscendTopKTopPSampler
|
|
|
|
|
|
class TestAscendSampler(TestBase):
|
|
|
|
def test_init_with_raw_logprobs(self):
|
|
sampler = AscendSampler(logprobs_mode="raw_logprobs")
|
|
self.assertEqual(sampler.logprobs_mode, "raw_logprobs")
|
|
self.assertTrue(hasattr(sampler, 'topk_topp_sampler'))
|
|
self.assertIsInstance(sampler.topk_topp_sampler, AscendTopKTopPSampler)
|
|
|
|
|
|
class TestAscendTopKTopPSampler(TestBase):
|
|
|
|
@mock.patch("vllm_ascend.sample.sampler.random_sample")
|
|
@mock.patch("torch_npu.npu_top_k_top_p")
|
|
def test_npu_topk_topp_called_when_optimized(self, mock_npu_op,
|
|
mock_random_sample):
|
|
mock_npu_op.return_value = (torch.randn(1, 3))
|
|
mock_random_sample.return_value = torch.randn(3)
|
|
sampler = AscendTopKTopPSampler()
|
|
|
|
logits = torch.tensor([[1.0, 2.0, 3.0]])
|
|
k = torch.tensor([2])
|
|
p = torch.tensor([0.9])
|
|
generators = {0: torch.Generator()}
|
|
generators[0].manual_seed(42)
|
|
|
|
sampler.forward_native(logits, generators, k, p)
|
|
mock_npu_op.assert_called_once_with(logits, p, k)
|