2025-12-19 14:27:24 +08:00
|
|
|
import os
|
2025-07-02 16:40:51 +08:00
|
|
|
from unittest.mock import MagicMock, patch
|
|
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
|
|
|
|
|
from tests.ut.base import TestBase
|
2025-12-19 09:00:07 +08:00
|
|
|
from vllm_ascend.quantization.w8a8 import (AscendW8A8LinearMethod,
|
2025-08-14 11:50:53 +08:00
|
|
|
quant_per_tensor)
|
[refact] unified soc_version code (#4359)
### What this PR does / why we need it?
Currently, there are two paths to judge the chip type in code,
`get_ascend_soc_version` use `get_soc_version` api in torch_npu, and
`is_310p` `use _build_info.__soc_version__`, which generate when
install. We need to unify the two paths.
We need to unify these codes based on the following points:
1. We need to ensure consistency in chip type judgment between compiling
and running states;
2. In compiling state, we need chip type to complete op's compilation,
but in running state, we only need device
type(910B/910_93/310P/910_95/etc) to make code branch judgement;
3. In compiling state, torch_npu may not have been installed yet, so we
can't use torch_npu's api.
Based on the above points, we have made the following changes:
1. When user set env `SOC_VERSION`, use it; when not set, query
soc_version by `npu-smi`;
2. generate device_type based on soc_version when compiling, and write
`__device_type__` instead of `__soc_version__` in `_build_info.py`;
3. In running state, use `__device_type__` to judge code branch.
### Does this PR introduce _any_ user-facing change?
When not set env `SOC_VERSION`, it will not be `ASCEND910B1` by default,
we will query soc_version by `npu-smi`. And env `SOC_VERSION` must be in
the list `soc_to_device` in `setup.py`.
- vLLM version: v0.11.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2918c1b49c88c29783c86f78d2c4221cb9622379
Signed-off-by: zzzzwwjj <1183291235@qq.com>
2025-11-26 14:28:55 +08:00
|
|
|
from vllm_ascend.utils import AscendDeviceType
|
2025-07-02 16:40:51 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestQuantPerTensor(TestBase):
|
|
|
|
|
|
|
|
|
|
@patch("torch_npu.npu_quantize")
|
|
|
|
|
def test_quant_per_tensor(self, mock_npu_quantize):
|
|
|
|
|
in_tensor = torch.randn(32, 128)
|
|
|
|
|
input_scale = torch.tensor(0.1)
|
|
|
|
|
input_offset = torch.tensor(0)
|
|
|
|
|
|
|
|
|
|
expected_output = torch.randint(-128, 127, (32, 128), dtype=torch.int8)
|
|
|
|
|
mock_npu_quantize.return_value = expected_output
|
|
|
|
|
|
|
|
|
|
output = quant_per_tensor(in_tensor, input_scale, input_offset)
|
|
|
|
|
|
|
|
|
|
mock_npu_quantize.assert_called_once_with(
|
|
|
|
|
in_tensor,
|
|
|
|
|
input_scale,
|
|
|
|
|
input_offset,
|
|
|
|
|
torch.qint8,
|
|
|
|
|
-1,
|
|
|
|
|
False,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
self.assertTrue(torch.equal(output, expected_output))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestAscendW8A8LinearMethod(TestBase):
|
|
|
|
|
|
|
|
|
|
def setUp(self):
|
|
|
|
|
self.method = AscendW8A8LinearMethod()
|
|
|
|
|
|
|
|
|
|
def test_get_weight(self):
|
|
|
|
|
weight = self.method.get_weight(10, 20)
|
|
|
|
|
self.assertEqual(weight['weight'].dtype, torch.int8)
|
|
|
|
|
self.assertEqual(weight['weight'].shape, (20, 10))
|
|
|
|
|
|
|
|
|
|
def test_get_pertensor_param(self):
|
|
|
|
|
params = self.method.get_pertensor_param(torch.bfloat16)
|
|
|
|
|
self.assertEqual(params['input_scale'].dtype, torch.bfloat16)
|
|
|
|
|
self.assertEqual(params['input_offset'].dtype, torch.int8)
|
|
|
|
|
self.assertEqual(params['input_scale'].shape, (1, ))
|
|
|
|
|
self.assertEqual(params['input_offset'].shape, (1, ))
|
|
|
|
|
|
|
|
|
|
def test_get_perchannel_param(self):
|
|
|
|
|
params = self.method.get_perchannel_param(10, torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(params['quant_bias'].dtype, torch.int32)
|
|
|
|
|
self.assertEqual(params['deq_scale'].dtype, torch.float32)
|
|
|
|
|
self.assertEqual(params['weight_scale'].dtype, torch.bfloat16)
|
|
|
|
|
self.assertEqual(params['weight_offset'].dtype, torch.bfloat16)
|
|
|
|
|
self.assertEqual(params['quant_bias'].shape, (10, ))
|
|
|
|
|
self.assertEqual(params['deq_scale'].shape, (10, ))
|
|
|
|
|
self.assertEqual(params['weight_scale'].shape, (10, 1))
|
|
|
|
|
self.assertEqual(params['weight_offset'].shape, (10, 1))
|
|
|
|
|
|
2025-12-23 08:49:52 +08:00
|
|
|
@patch("vllm_ascend.quantization.w8a8.get_weight_prefetch_method")
|
2025-12-18 20:25:44 +08:00
|
|
|
@patch("torch.ops.vllm.quantize")
|
2025-07-02 16:40:51 +08:00
|
|
|
@patch("torch_npu.npu_quant_matmul")
|
2025-12-18 20:25:44 +08:00
|
|
|
def test_apply_with_x_not_int8(self, mock_npu_quant_matmul, mock_quantize,
|
2025-12-23 08:49:52 +08:00
|
|
|
mock_get_weight_prefetch_method):
|
2025-07-02 16:40:51 +08:00
|
|
|
layer = MagicMock()
|
|
|
|
|
layer.aclnn_input_scale = 0.1
|
|
|
|
|
layer.aclnn_input_offset = 0.2
|
|
|
|
|
layer.weight = torch.randn(128, 256)
|
|
|
|
|
layer.deq_scale = 0.3
|
|
|
|
|
|
2025-12-23 08:49:52 +08:00
|
|
|
mock_get_weight_prefetch_method.return_value = MagicMock()
|
2025-10-09 20:38:39 +08:00
|
|
|
|
2025-07-02 16:40:51 +08:00
|
|
|
x = torch.randn(32, 128)
|
|
|
|
|
bias = torch.randn(256)
|
2025-12-18 20:25:44 +08:00
|
|
|
mock_quantize.return_value = torch.randint(-128,
|
|
|
|
|
127,
|
|
|
|
|
x.shape,
|
|
|
|
|
dtype=torch.int8)
|
2025-07-02 16:40:51 +08:00
|
|
|
|
|
|
|
|
expected_y_output = torch.randn(32, 256)
|
|
|
|
|
mock_npu_quant_matmul.return_value = expected_y_output
|
|
|
|
|
|
|
|
|
|
output = self.method.apply(layer, x, bias)
|
|
|
|
|
|
|
|
|
|
expected_y_output += bias
|
|
|
|
|
self.assertTrue(torch.equal(output, expected_y_output))
|
|
|
|
|
|
|
|
|
|
@patch("torch_npu.npu_quant_matmul")
|
|
|
|
|
def test_apply_with_x_is_int8(self, mock_npu_quant_matmul):
|
|
|
|
|
layer = MagicMock()
|
|
|
|
|
layer.aclnn_input_scale = 0.1
|
|
|
|
|
layer.aclnn_input_offset = 0.2
|
|
|
|
|
layer.weight = torch.randn(128, 256)
|
|
|
|
|
layer.deq_scale = 0.3
|
|
|
|
|
|
|
|
|
|
x = torch.randint(-128, 127, (32, 128), dtype=torch.int8)
|
|
|
|
|
bias = torch.randn(256)
|
|
|
|
|
|
|
|
|
|
expected_y_output = torch.randn(32, 256)
|
|
|
|
|
mock_npu_quant_matmul.return_value = expected_y_output
|
|
|
|
|
|
|
|
|
|
output = self.method.apply(layer, x, bias)
|
|
|
|
|
expected_y_output += bias
|
|
|
|
|
self.assertTrue(torch.equal(output, expected_y_output))
|
|
|
|
|
|
[refact] unified soc_version code (#4359)
### What this PR does / why we need it?
Currently, there are two paths to judge the chip type in code,
`get_ascend_soc_version` use `get_soc_version` api in torch_npu, and
`is_310p` `use _build_info.__soc_version__`, which generate when
install. We need to unify the two paths.
We need to unify these codes based on the following points:
1. We need to ensure consistency in chip type judgment between compiling
and running states;
2. In compiling state, we need chip type to complete op's compilation,
but in running state, we only need device
type(910B/910_93/310P/910_95/etc) to make code branch judgement;
3. In compiling state, torch_npu may not have been installed yet, so we
can't use torch_npu's api.
Based on the above points, we have made the following changes:
1. When user set env `SOC_VERSION`, use it; when not set, query
soc_version by `npu-smi`;
2. generate device_type based on soc_version when compiling, and write
`__device_type__` instead of `__soc_version__` in `_build_info.py`;
3. In running state, use `__device_type__` to judge code branch.
### Does this PR introduce _any_ user-facing change?
When not set env `SOC_VERSION`, it will not be `ASCEND910B1` by default,
we will query soc_version by `npu-smi`. And env `SOC_VERSION` must be in
the list `soc_to_device` in `setup.py`.
- vLLM version: v0.11.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2918c1b49c88c29783c86f78d2c4221cb9622379
Signed-off-by: zzzzwwjj <1183291235@qq.com>
2025-11-26 14:28:55 +08:00
|
|
|
@patch('vllm_ascend.utils.get_ascend_device_type',
|
|
|
|
|
return_value=AscendDeviceType._310P)
|
2025-07-03 22:12:46 +08:00
|
|
|
@patch("torch_npu.npu_quant_matmul")
|
[refact] unified soc_version code (#4359)
### What this PR does / why we need it?
Currently, there are two paths to judge the chip type in code,
`get_ascend_soc_version` use `get_soc_version` api in torch_npu, and
`is_310p` `use _build_info.__soc_version__`, which generate when
install. We need to unify the two paths.
We need to unify these codes based on the following points:
1. We need to ensure consistency in chip type judgment between compiling
and running states;
2. In compiling state, we need chip type to complete op's compilation,
but in running state, we only need device
type(910B/910_93/310P/910_95/etc) to make code branch judgement;
3. In compiling state, torch_npu may not have been installed yet, so we
can't use torch_npu's api.
Based on the above points, we have made the following changes:
1. When user set env `SOC_VERSION`, use it; when not set, query
soc_version by `npu-smi`;
2. generate device_type based on soc_version when compiling, and write
`__device_type__` instead of `__soc_version__` in `_build_info.py`;
3. In running state, use `__device_type__` to judge code branch.
### Does this PR introduce _any_ user-facing change?
When not set env `SOC_VERSION`, it will not be `ASCEND910B1` by default,
we will query soc_version by `npu-smi`. And env `SOC_VERSION` must be in
the list `soc_to_device` in `setup.py`.
- vLLM version: v0.11.0
- vLLM main:
https://github.com/vllm-project/vllm/commit/2918c1b49c88c29783c86f78d2c4221cb9622379
Signed-off-by: zzzzwwjj <1183291235@qq.com>
2025-11-26 14:28:55 +08:00
|
|
|
def test_apply_with_x_is_310p(self, mock_npu_quant_matmul,
|
|
|
|
|
mock_soc_version):
|
2025-07-03 22:12:46 +08:00
|
|
|
layer = MagicMock()
|
|
|
|
|
layer.aclnn_input_scale = 0.1
|
|
|
|
|
layer.aclnn_input_offset = 0.2
|
|
|
|
|
layer.weight = torch.randn(128, 256)
|
|
|
|
|
layer.deq_scale = 0.3
|
|
|
|
|
|
|
|
|
|
x = torch.randint(-128, 127, (32, 128), dtype=torch.int8)
|
|
|
|
|
bias = torch.randn(256)
|
|
|
|
|
|
|
|
|
|
expected_y_output = torch.randn(32, 256)
|
|
|
|
|
mock_npu_quant_matmul.return_value = expected_y_output
|
|
|
|
|
|
|
|
|
|
output = self.method.apply(layer, x, bias)
|
|
|
|
|
expected_y_output += bias
|
|
|
|
|
self.assertTrue(torch.equal(output, expected_y_output))
|
|
|
|
|
|
2025-12-19 14:27:24 +08:00
|
|
|
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"})
|
2025-07-02 16:40:51 +08:00
|
|
|
@patch('torch_npu.npu_format_cast')
|
2025-12-19 14:27:24 +08:00
|
|
|
def test_process_weights_after_loading_with_nz0(self,
|
|
|
|
|
mock_npu_format_cast):
|
2025-07-02 16:40:51 +08:00
|
|
|
layer = MagicMock()
|
|
|
|
|
|
2025-12-19 14:27:24 +08:00
|
|
|
layer.weight.data = torch.randint(-127,
|
|
|
|
|
128, (128, 256),
|
|
|
|
|
dtype=torch.int8)
|
2025-07-02 16:40:51 +08:00
|
|
|
layer.input_scale.data = torch.tensor([0.1])
|
|
|
|
|
layer.input_offset.data = torch.tensor([0])
|
|
|
|
|
layer.deq_scale = torch.tensor([0.5])
|
|
|
|
|
layer.weight_scale.data = torch.randn(128, 1)
|
|
|
|
|
layer.weight_offset.data = torch.randn(128, 1)
|
|
|
|
|
|
|
|
|
|
mock_npu_format_cast.return_value = MagicMock
|
|
|
|
|
self.method.process_weights_after_loading(layer)
|
|
|
|
|
|
|
|
|
|
expected_offset = torch.tensor([0]).repeat(256).to(torch.int8)
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
torch.equal(layer.aclnn_input_offset.data, expected_offset))
|
|
|
|
|
self.assertFalse(layer.aclnn_input_offset.requires_grad)
|
|
|
|
|
|
|
|
|
|
self.assertFalse(layer.deq_scale.requires_grad)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(layer.weight_scale.data.shape, (128, ))
|
|
|
|
|
self.assertEqual(layer.weight_offset.data.shape, (128, ))
|
2025-10-14 17:39:26 +08:00
|
|
|
mock_npu_format_cast.assert_not_called()
|
|
|
|
|
|
2025-12-19 14:27:24 +08:00
|
|
|
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "1"})
|
2025-10-14 17:39:26 +08:00
|
|
|
@patch('torch_npu.npu_format_cast')
|
2025-12-19 14:27:24 +08:00
|
|
|
def test_process_weights_after_loading_with_nz1(self,
|
|
|
|
|
mock_npu_format_cast):
|
2025-10-14 17:39:26 +08:00
|
|
|
layer = MagicMock()
|
|
|
|
|
|
2025-12-19 14:27:24 +08:00
|
|
|
layer.weight.data = torch.randint(-127,
|
|
|
|
|
128, (128, 256),
|
|
|
|
|
dtype=torch.int8)
|
|
|
|
|
layer.input_scale.data = torch.tensor([0.1])
|
|
|
|
|
layer.input_offset.data = torch.tensor([0])
|
|
|
|
|
layer.deq_scale = torch.tensor([0.5])
|
|
|
|
|
layer.weight_scale.data = torch.randn(128, 1)
|
|
|
|
|
layer.weight_offset.data = torch.randn(128, 1)
|
|
|
|
|
|
|
|
|
|
mock_npu_format_cast.return_value = MagicMock
|
|
|
|
|
self.method.process_weights_after_loading(layer)
|
|
|
|
|
|
|
|
|
|
expected_offset = torch.tensor([0]).repeat(256).to(torch.int8)
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
torch.equal(layer.aclnn_input_offset.data, expected_offset))
|
|
|
|
|
self.assertFalse(layer.aclnn_input_offset.requires_grad)
|
|
|
|
|
|
|
|
|
|
self.assertFalse(layer.deq_scale.requires_grad)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(layer.weight_scale.data.shape, (128, ))
|
|
|
|
|
self.assertEqual(layer.weight_offset.data.shape, (128, ))
|
|
|
|
|
mock_npu_format_cast.assert_called_once()
|
|
|
|
|
|
|
|
|
|
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "2"})
|
|
|
|
|
@patch('torch_npu.npu_format_cast')
|
|
|
|
|
def test_process_weights_after_loading_with_nz2(self,
|
|
|
|
|
mock_npu_format_cast):
|
|
|
|
|
layer = MagicMock()
|
|
|
|
|
|
|
|
|
|
layer.weight.data = torch.randint(-127,
|
|
|
|
|
128, (128, 256),
|
|
|
|
|
dtype=torch.int8)
|
2025-10-14 17:39:26 +08:00
|
|
|
layer.input_scale.data = torch.tensor([0.1])
|
|
|
|
|
layer.input_offset.data = torch.tensor([0])
|
|
|
|
|
layer.deq_scale = torch.tensor([0.5])
|
|
|
|
|
layer.weight_scale.data = torch.randn(128, 1)
|
|
|
|
|
layer.weight_offset.data = torch.randn(128, 1)
|
|
|
|
|
|
|
|
|
|
mock_npu_format_cast.return_value = MagicMock
|
|
|
|
|
self.method.process_weights_after_loading(layer)
|
|
|
|
|
|
|
|
|
|
expected_offset = torch.tensor([0]).repeat(256).to(torch.int8)
|
|
|
|
|
self.assertTrue(
|
|
|
|
|
torch.equal(layer.aclnn_input_offset.data, expected_offset))
|
|
|
|
|
self.assertFalse(layer.aclnn_input_offset.requires_grad)
|
|
|
|
|
|
|
|
|
|
self.assertFalse(layer.deq_scale.requires_grad)
|
|
|
|
|
|
|
|
|
|
self.assertEqual(layer.weight_scale.data.shape, (128, ))
|
|
|
|
|
self.assertEqual(layer.weight_offset.data.shape, (128, ))
|
|
|
|
|
mock_npu_format_cast.assert_called_once()
|