### What this PR does / why we need it?
**Scope of Changes**:
| File Path |
| :--- |
|` vllm_ascend/quantization/compressed_tensors/compressed_tensors.py`|
|` vllm_ascend/quantization/quant_config.py`|
|` vllm_ascend/quantization/utils.py`|
|` vllm_ascend/quantization/w4a16.py`|
|` vllm_ascend/quantization/w4a4_flatquant_dynamic.py`|
|` vllm_ascend/quantization/w4a8_dynamic.py`|
|` vllm_ascend/quantization/w8a16.py`|
|` vllm_ascend/quantization/w8a8.py`|
|` vllm_ascend/quantization/w8a8_dynamic.py`|
|` vllm_ascend/quantization/w8a8_pdmix.py`|
|` vllm_ascend/quantization/w8a8mxfp8.py`|
|` vllm_ascend/sample/rejection_sampler.py`|
|` vllm_ascend/sample/sampler.py`|
|` vllm_ascend/worker/block_table.py`|
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.13.0
- vLLM main:
2c24bc6996
Signed-off-by: MrZ20 <2609716663@qq.com>
This commit is contained in:
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
import torch_npu
|
||||
@@ -29,7 +29,7 @@ from .registry import register_scheme
|
||||
@register_scheme("W8A16", "linear")
|
||||
class AscendW8A16LinearMethod(AscendLinearScheme):
|
||||
"""Linear method for Ascend W8A16.
|
||||
|
||||
|
||||
This scheme uses 8-bit quantized weights with 16-bit activations.
|
||||
"""
|
||||
|
||||
@@ -41,39 +41,34 @@ class AscendW8A16LinearMethod(AscendLinearScheme):
|
||||
input_size: int,
|
||||
output_size: int,
|
||||
params_dtype: torch.dtype = torch.bfloat16,
|
||||
) -> Dict[str, Any]:
|
||||
params_dict = {
|
||||
"weight": torch.empty(output_size, input_size, dtype=torch.int8)
|
||||
}
|
||||
) -> dict[str, Any]:
|
||||
params_dict = {"weight": torch.empty(output_size, input_size, dtype=torch.int8)}
|
||||
return params_dict
|
||||
|
||||
def get_perchannel_param(
|
||||
self,
|
||||
output_size: int,
|
||||
params_dtype: torch.dtype,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
params_dict = {}
|
||||
params_dict["weight_scale"] = torch.empty(output_size,
|
||||
1,
|
||||
dtype=params_dtype)
|
||||
params_dict["weight_offset"] = torch.empty(output_size,
|
||||
1,
|
||||
dtype=params_dtype)
|
||||
params_dict["weight_scale"] = torch.empty(output_size, 1, dtype=params_dtype)
|
||||
params_dict["weight_offset"] = torch.empty(output_size, 1, dtype=params_dtype)
|
||||
return params_dict
|
||||
|
||||
def apply(
|
||||
self,
|
||||
layer: torch.nn.Module,
|
||||
x: torch.Tensor,
|
||||
bias: Optional[torch.Tensor] = None,
|
||||
tp_rank: Optional[int] = 0,
|
||||
bias: torch.Tensor | None = None,
|
||||
tp_rank: int | None = 0,
|
||||
) -> torch.Tensor:
|
||||
output = torch_npu.npu_weight_quant_batchmatmul(
|
||||
x=x,
|
||||
weight=layer.weight,
|
||||
antiquant_scale=layer.weight_scale,
|
||||
antiquant_offset=layer.weight_offset,
|
||||
bias=bias)
|
||||
bias=bias,
|
||||
)
|
||||
return output
|
||||
|
||||
def process_weights_after_loading(self, layer):
|
||||
|
||||
Reference in New Issue
Block a user