[refactor] refactor weight trans nz and transpose (#4878)
### What this PR does / why we need it?
Now `VLLM_ASCEND_ENABLE_NZ` will have three options:
0: disable nz;
1: only quant case enable nz;
2: enable nz as long as possible;
And `VLLM_ASCEND_ENABLE_NZ`=1 by default.
All cases are shown in the table below:
| | W4A4 | W4A8 | W8A8 | fp16/bf16 | fp32 |
|---|---|---|---|---|---|
| trans nz | can't support nz | trans nz by default | trans nz by
default | trans nz when VLLM_ASCEND_ENABLE_NZ is 2 | can't support nz |
| transpose | only support not transpose case | only support transpose
case | only support transpose case | linear: only support not transpose
case<br>gmm: only support transpose case | same to fp16/bf16 |
Some exceptional cases:
1. MLAPO op need to do some additional processing on the weights,
including trans nz. If use MLAPO op, some weight will be transformed to
nz forcely;
2. MLA/SFA's weight `W_UV` will be used by op
`torch.ops._C_ascend.batch_matmul_transpose`, and this op can't support
nz currently;
### Does this PR introduce _any_ user-facing change?
Now fp16/bf16 weight will not trans nz by default.
### How was this patch tested?
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
Signed-off-by: zzzzwwjj <1183291235@qq.com>
This commit is contained in:
@@ -4,7 +4,8 @@ from unittest.mock import MagicMock, patch
|
||||
import torch
|
||||
from vllm.config import CacheConfig, ModelConfig, SchedulerConfig, VllmConfig
|
||||
from vllm.distributed.parallel_state import GroupCoordinator
|
||||
from vllm.model_executor.layers.linear import LinearBase
|
||||
from vllm.model_executor.layers.linear import (LinearBase,
|
||||
UnquantizedLinearMethod)
|
||||
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.ascend_config import init_ascend_config
|
||||
@@ -972,16 +973,13 @@ class TestAscendMLAImpl(TestBase):
|
||||
def test_process_weights_after_loading(self, mock_format_cast):
|
||||
layer = MagicMock(spec=LinearBase)
|
||||
layer.input_size_per_partition = 10
|
||||
quant_method = MagicMock()
|
||||
apply = MagicMock()
|
||||
quant_method.apply = apply
|
||||
quant_method = MagicMock(spec=UnquantizedLinearMethod)
|
||||
layer.quant_method = quant_method
|
||||
shape_0 = self.impl.num_heads * (self.impl.qk_nope_head_dim +
|
||||
self.impl.v_head_dim)
|
||||
shape_1 = self.impl.kv_lora_rank
|
||||
layer.weight = torch.randn(shape_0, shape_1)
|
||||
self.impl.kv_b_proj = layer
|
||||
apply.return_value = layer.weight.T
|
||||
mock_format_cast.return_value = layer.weight
|
||||
self.impl.process_weights_after_loading(torch.bfloat16)
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
import unittest
|
||||
from unittest import mock
|
||||
from unittest.mock import MagicMock, patch
|
||||
@@ -61,22 +62,24 @@ class TestAscendUnquantizedLinearMethod(TestBase):
|
||||
mock_dtype = mock.PropertyMock(return_value=torch.float16)
|
||||
type(self.layer.weight.data).dtype = mock_dtype
|
||||
|
||||
@mock.patch("vllm_ascend.ops.linear.is_enable_nz")
|
||||
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"})
|
||||
@mock.patch("torch_npu.npu_format_cast")
|
||||
def test_process_weights_after_loading_enable_nz(self, mock_format_cast,
|
||||
mock_is_nz):
|
||||
mock_is_nz.return_value = 1
|
||||
self.method.process_weights_after_loading(self.layer)
|
||||
mock_format_cast.assert_called_once()
|
||||
|
||||
@mock.patch("vllm_ascend.ops.linear.is_enable_nz")
|
||||
@mock.patch("torch_npu.npu_format_cast")
|
||||
def test_process_weights_after_loading_disable_nz(self, mock_format_cast,
|
||||
mock_is_nz):
|
||||
mock_is_nz.return_value = 0
|
||||
def test_process_weights_after_loading_with_nz0(self, mock_format_cast):
|
||||
self.method.process_weights_after_loading(self.layer)
|
||||
mock_format_cast.assert_not_called()
|
||||
|
||||
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "1"})
|
||||
@mock.patch("torch_npu.npu_format_cast")
|
||||
def test_process_weights_after_loading_with_nz1(self, mock_format_cast):
|
||||
self.method.process_weights_after_loading(self.layer)
|
||||
mock_format_cast.assert_not_called()
|
||||
|
||||
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "2"})
|
||||
@mock.patch("torch_npu.npu_format_cast")
|
||||
def test_process_weights_after_loading_with_nz2(self, mock_format_cast):
|
||||
self.method.process_weights_after_loading(self.layer)
|
||||
mock_format_cast.assert_called_once()
|
||||
|
||||
|
||||
class TestAscendRowParallelLinear(BaseLinearTest):
|
||||
|
||||
|
||||
@@ -199,7 +199,6 @@ class TestW4A4FlatQuantDynamic(unittest.TestCase):
|
||||
(self.output_size, self.input_size // 8),
|
||||
dtype=torch.int32)
|
||||
mock_pack_weights.return_value = mock_packed
|
||||
self.method.transpose_weight = False
|
||||
self.method.process_weights_after_loading(layer)
|
||||
mock_pack_weights.assert_called_once()
|
||||
self.assertFalse(hasattr(layer, 'weight'))
|
||||
@@ -212,35 +211,6 @@ class TestW4A4FlatQuantDynamic(unittest.TestCase):
|
||||
self.assertEqual(layer.left_trans.shape, (24, 24))
|
||||
self.assertTrue(layer.left_trans.is_contiguous())
|
||||
|
||||
@patch('vllm_ascend.quantization.w4a4_flatquant_dynamic.pack_int4_weights')
|
||||
def test_process_weights_after_loading_with_transpose(
|
||||
self, mock_pack_weights):
|
||||
"""Tests weight processing after loading, with transpose."""
|
||||
layer = nn.Module()
|
||||
layer.weight = torch.randint(-8,
|
||||
7, (self.output_size, self.input_size),
|
||||
dtype=torch.int8)
|
||||
layer.weight_scale = torch.randn(self.output_size,
|
||||
1,
|
||||
dtype=torch.bfloat16)
|
||||
layer.weight_offset = torch.randn(self.output_size,
|
||||
1,
|
||||
dtype=torch.bfloat16)
|
||||
layer.left_trans = torch.randn(24, 24)
|
||||
layer.right_trans = torch.randn(32, 32)
|
||||
layer.clip_ratio = torch.tensor([0.9])
|
||||
mock_packed = torch.randint(0,
|
||||
100,
|
||||
(self.output_size, self.input_size // 8),
|
||||
dtype=torch.int32)
|
||||
mock_pack_weights.return_value = mock_packed
|
||||
self.method.transpose_weight = True
|
||||
self.method.process_weights_after_loading(layer)
|
||||
self.assertTrue(hasattr(layer, 'weight_packed'))
|
||||
self.assertEqual(layer.weight_packed.shape,
|
||||
(self.input_size // 8, self.output_size))
|
||||
self.assertTrue(layer.weight_packed.is_contiguous())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main(argv=['first-arg-is-ignored'], exit=False)
|
||||
|
||||
@@ -62,7 +62,8 @@ class TestAscendW4A8DynamicLinearMethod(TestBase):
|
||||
|
||||
@patch('torch_npu.npu_convert_weight_to_int4pack')
|
||||
@patch('torch.Tensor.npu')
|
||||
def test_process_weights_after_loading(self, mock_npu,
|
||||
@patch("torch_npu.npu_format_cast")
|
||||
def test_process_weights_after_loading(self, mock_format_cast, mock_npu,
|
||||
mock_npu_convert_weight):
|
||||
mock_npu.side_effect = lambda: torch.zeros(
|
||||
(1, 32), dtype=torch.float32)
|
||||
@@ -85,6 +86,8 @@ class TestAscendW4A8DynamicLinearMethod(TestBase):
|
||||
layer.weight_offset_second = torch.nn.Parameter(torch.empty_like(
|
||||
layer.weight_scale_second.data),
|
||||
requires_grad=False)
|
||||
mock_format_cast.return_value = layer.weight.data.transpose(
|
||||
0, 1).contiguous()
|
||||
self.method.process_weights_after_loading(layer)
|
||||
self.assertTrue(hasattr(layer, "weight_scale_bias"))
|
||||
self.assertEqual(layer.weight_scale_bias.data.shape, (32, ))
|
||||
@@ -110,6 +113,8 @@ class TestAscendW4A8DynamicLinearMethod(TestBase):
|
||||
new_layer.scale_bias = torch.nn.Parameter(torch.zeros(
|
||||
(32, 1), dtype=torch.float32),
|
||||
requires_grad=False)
|
||||
mock_format_cast.return_value = new_layer.weight.data.transpose(
|
||||
0, 1).contiguous()
|
||||
self.method.process_weights_after_loading(new_layer)
|
||||
self.assertEqual(new_layer.scale_bias.data.shape, (32, ))
|
||||
self.assertTrue(hasattr(new_layer, "weight_scale_second"))
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import torch
|
||||
@@ -132,20 +133,21 @@ class TestAscendW8A8LinearMethod(TestBase):
|
||||
expected_y_output += bias
|
||||
self.assertTrue(torch.equal(output, expected_y_output))
|
||||
|
||||
@patch("vllm_ascend.quantization.w8a8.is_enable_nz")
|
||||
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "0"})
|
||||
@patch('torch_npu.npu_format_cast')
|
||||
def test_process_weights_after_loading_not_nz(self, mock_npu_format_cast,
|
||||
mock_is_nz):
|
||||
def test_process_weights_after_loading_with_nz0(self,
|
||||
mock_npu_format_cast):
|
||||
layer = MagicMock()
|
||||
|
||||
layer.weight.data = torch.randn(128, 256)
|
||||
layer.weight.data = torch.randint(-127,
|
||||
128, (128, 256),
|
||||
dtype=torch.int8)
|
||||
layer.input_scale.data = torch.tensor([0.1])
|
||||
layer.input_offset.data = torch.tensor([0])
|
||||
layer.deq_scale = torch.tensor([0.5])
|
||||
layer.weight_scale.data = torch.randn(128, 1)
|
||||
layer.weight_offset.data = torch.randn(128, 1)
|
||||
|
||||
mock_is_nz.return_value = 0
|
||||
mock_npu_format_cast.return_value = MagicMock
|
||||
self.method.process_weights_after_loading(layer)
|
||||
|
||||
@@ -160,20 +162,50 @@ class TestAscendW8A8LinearMethod(TestBase):
|
||||
self.assertEqual(layer.weight_offset.data.shape, (128, ))
|
||||
mock_npu_format_cast.assert_not_called()
|
||||
|
||||
@patch("vllm_ascend.quantization.w8a8.is_enable_nz")
|
||||
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "1"})
|
||||
@patch('torch_npu.npu_format_cast')
|
||||
def test_process_weights_after_loading_nz(self, mock_npu_format_cast,
|
||||
mock_is_nz):
|
||||
def test_process_weights_after_loading_with_nz1(self,
|
||||
mock_npu_format_cast):
|
||||
layer = MagicMock()
|
||||
|
||||
layer.weight.data = torch.randn(128, 256)
|
||||
layer.weight.data = torch.randint(-127,
|
||||
128, (128, 256),
|
||||
dtype=torch.int8)
|
||||
layer.input_scale.data = torch.tensor([0.1])
|
||||
layer.input_offset.data = torch.tensor([0])
|
||||
layer.deq_scale = torch.tensor([0.5])
|
||||
layer.weight_scale.data = torch.randn(128, 1)
|
||||
layer.weight_offset.data = torch.randn(128, 1)
|
||||
|
||||
mock_npu_format_cast.return_value = MagicMock
|
||||
self.method.process_weights_after_loading(layer)
|
||||
|
||||
expected_offset = torch.tensor([0]).repeat(256).to(torch.int8)
|
||||
self.assertTrue(
|
||||
torch.equal(layer.aclnn_input_offset.data, expected_offset))
|
||||
self.assertFalse(layer.aclnn_input_offset.requires_grad)
|
||||
|
||||
self.assertFalse(layer.deq_scale.requires_grad)
|
||||
|
||||
self.assertEqual(layer.weight_scale.data.shape, (128, ))
|
||||
self.assertEqual(layer.weight_offset.data.shape, (128, ))
|
||||
mock_npu_format_cast.assert_called_once()
|
||||
|
||||
@patch.dict(os.environ, {"VLLM_ASCEND_ENABLE_NZ": "2"})
|
||||
@patch('torch_npu.npu_format_cast')
|
||||
def test_process_weights_after_loading_with_nz2(self,
|
||||
mock_npu_format_cast):
|
||||
layer = MagicMock()
|
||||
|
||||
layer.weight.data = torch.randint(-127,
|
||||
128, (128, 256),
|
||||
dtype=torch.int8)
|
||||
layer.input_scale.data = torch.tensor([0.1])
|
||||
layer.input_offset.data = torch.tensor([0])
|
||||
layer.deq_scale = torch.tensor([0.5])
|
||||
layer.weight_scale.data = torch.randn(128, 1)
|
||||
layer.weight_offset.data = torch.randn(128, 1)
|
||||
|
||||
mock_is_nz.return_value = 1
|
||||
mock_npu_format_cast.return_value = MagicMock
|
||||
self.method.process_weights_after_loading(layer)
|
||||
|
||||
|
||||
@@ -35,14 +35,6 @@ class TestUtils(TestBase):
|
||||
from vllm_ascend import platform
|
||||
importlib.reload(platform)
|
||||
|
||||
def test_is_enable_nz(self):
|
||||
with mock.patch("vllm_ascend.utils.envs_ascend.VLLM_ASCEND_ENABLE_NZ",
|
||||
1):
|
||||
self.assertTrue(utils.is_enable_nz())
|
||||
with mock.patch("vllm_ascend.utils.envs_ascend.VLLM_ASCEND_ENABLE_NZ",
|
||||
0):
|
||||
self.assertFalse(utils.is_enable_nz())
|
||||
|
||||
def test_nd_to_nz_2d(self):
|
||||
# can be divided by 16
|
||||
input_tensor = torch.randn(32, 64)
|
||||
|
||||
Reference in New Issue
Block a user