[refactor] refactor weight trans nz and transpose (#4878)
### What this PR does / why we need it?
Now `VLLM_ASCEND_ENABLE_NZ` will have three options:
0: disable nz;
1: only quant case enable nz;
2: enable nz as long as possible;
And `VLLM_ASCEND_ENABLE_NZ`=1 by default.
All cases are shown in the table below:
| | W4A4 | W4A8 | W8A8 | fp16/bf16 | fp32 |
|---|---|---|---|---|---|
| trans nz | can't support nz | trans nz by default | trans nz by
default | trans nz when VLLM_ASCEND_ENABLE_NZ is 2 | can't support nz |
| transpose | only support not transpose case | only support transpose
case | only support transpose case | linear: only support not transpose
case<br>gmm: only support transpose case | same to fp16/bf16 |
Some exceptional cases:
1. MLAPO op need to do some additional processing on the weights,
including trans nz. If use MLAPO op, some weight will be transformed to
nz forcely;
2. MLA/SFA's weight `W_UV` will be used by op
`torch.ops._C_ascend.batch_matmul_transpose`, and this op can't support
nz currently;
### Does this PR introduce _any_ user-facing change?
Now fp16/bf16 weight will not trans nz by default.
### How was this patch tested?
- vLLM version: v0.12.0
- vLLM main:
ad32e3e19c
Signed-off-by: zzzzwwjj <1183291235@qq.com>
This commit is contained in:
@@ -114,10 +114,10 @@ from vllm_ascend.spec_decode import get_spec_decode_method
|
||||
from vllm_ascend.spec_decode.eagle_proposer import EagleProposer
|
||||
from vllm_ascend.spec_decode.interface import SpecDcodeType
|
||||
from vllm_ascend.spec_decode.mtp_proposer import MtpProposer
|
||||
from vllm_ascend.utils import (ACL_FORMAT_FRACTAL_ND, ACL_FORMAT_FRACTAL_NZ,
|
||||
AscendDeviceType, ProfileExecuteDuration,
|
||||
enable_sp, get_ascend_device_type, is_enable_nz,
|
||||
is_moe_model, lmhead_tp_enable, vllm_version_is)
|
||||
from vllm_ascend.utils import (AscendDeviceType, ProfileExecuteDuration,
|
||||
enable_sp, get_ascend_device_type, is_moe_model,
|
||||
lmhead_tp_enable, maybe_trans_nz,
|
||||
vllm_version_is)
|
||||
from vllm_ascend.worker.npu_input_batch import NPUInputBatch
|
||||
|
||||
from vllm_ascend.ascend_forward_context import ( # isort: skip
|
||||
@@ -137,9 +137,6 @@ torch.npu.config.allow_internal_format = True
|
||||
|
||||
if get_ascend_device_type() == AscendDeviceType._310P:
|
||||
torch_npu.npu.set_compile_mode(jit_compile=False)
|
||||
ACL_FORMAT = ACL_FORMAT_FRACTAL_NZ
|
||||
else:
|
||||
ACL_FORMAT = ACL_FORMAT_FRACTAL_ND
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -2225,16 +2222,6 @@ class NPUModelRunner(GPUModelRunner):
|
||||
self.model = get_model(vllm_config=self.vllm_config)
|
||||
if self.dynamic_eplb:
|
||||
model_register(self.model, self.model_config)
|
||||
if get_ascend_device_type() == AscendDeviceType._310P:
|
||||
from vllm.model_executor.layers.linear import (
|
||||
MergedColumnParallelLinear, QKVParallelLinear,
|
||||
RowParallelLinear)
|
||||
for module in self.model.modules():
|
||||
if isinstance(module,
|
||||
(MergedColumnParallelLinear,
|
||||
QKVParallelLinear, RowParallelLinear)):
|
||||
module.weight.data = self._convert_torch_format(
|
||||
module.weight.data)
|
||||
if self.drafter:
|
||||
logger.info("Loading drafter model...")
|
||||
self.drafter.load_model(self.model)
|
||||
@@ -2255,13 +2242,6 @@ class NPUModelRunner(GPUModelRunner):
|
||||
self.vllm_config,
|
||||
runtime_mode=CUDAGraphMode.FULL)
|
||||
|
||||
def _convert_torch_format(self, tensor):
|
||||
if ACL_FORMAT == ACL_FORMAT_FRACTAL_NZ \
|
||||
and not is_enable_nz():
|
||||
return tensor
|
||||
tensor = torch_npu.npu_format_cast(tensor, ACL_FORMAT)
|
||||
return tensor
|
||||
|
||||
def initialize_kv_cache(self, kv_cache_config: KVCacheConfig) -> None:
|
||||
"""
|
||||
Initialize KV cache based on `kv_cache_config`.
|
||||
@@ -2534,9 +2514,10 @@ class NPUModelRunner(GPUModelRunner):
|
||||
self.model_config.hf_text_config.qk_rope_head_dim
|
||||
]
|
||||
k_cache = raw_k_tensor.view(dtype).view(k_shape)
|
||||
k_cache = self._convert_torch_format(k_cache)
|
||||
v_cache = raw_v_tensor.view(dtype).view(v_shape)
|
||||
v_cache = self._convert_torch_format(v_cache)
|
||||
if get_ascend_device_type() == AscendDeviceType._310P:
|
||||
k_cache = maybe_trans_nz(k_cache)
|
||||
v_cache = maybe_trans_nz(v_cache)
|
||||
if self.use_sparse and raw_dsa_k_tensor is not None:
|
||||
dsa_k_cache_shape = (num_blocks,
|
||||
kv_cache_spec.block_size, 1, 128)
|
||||
|
||||
@@ -55,7 +55,7 @@ from vllm_ascend.distributed.parallel_state import init_ascend_model_parallel
|
||||
from vllm_ascend.ops.triton.triton_utils import init_device_properties_triton
|
||||
from vllm_ascend.platform import NPUPlatform
|
||||
from vllm_ascend.utils import (AscendDeviceType, check_ascend_device_type,
|
||||
enable_sp, get_ascend_device_type, is_enable_nz,
|
||||
enable_sp, get_ascend_device_type,
|
||||
register_ascend_customop)
|
||||
from vllm_ascend.worker.model_runner_v1 import NPUModelRunner
|
||||
|
||||
@@ -160,7 +160,7 @@ class NPUWorker(WorkerBase):
|
||||
used_bytes / GiB_bytes)
|
||||
|
||||
def wake_up(self, tags: Optional[list[str]] = None) -> None:
|
||||
if is_enable_nz():
|
||||
if envs_ascend.VLLM_ASCEND_ENABLE_NZ:
|
||||
raise ValueError(
|
||||
"FRACTAL_NZ mode is enabled. This may cause model parameter precision issues "
|
||||
"in the RL scenarios. Please set VLLM_ASCEND_ENABLE_NZ=0.")
|
||||
|
||||
Reference in New Issue
Block a user