### What this PR does / why we need it?
This pull request introduces significant enhancements for 310P device
support, primarily by enabling W8A8S quantization and facilitating the
saving of models with W8A8SC state outputs. It provides an example
script for saving sharded and compressed model states, implements the
core W8A8S quantization method, and integrates metadata generation
within the 310P worker to accurately describe the quantization types of
saved parameters. These changes aim to improve efficiency and
compatibility for quantized models on 310P hardware.
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
W8A8S accuarcy test and W8A8SC states save.
<img width="886" height="184" alt="image"
src="https://github.com/user-attachments/assets/e9bcac54-1f69-4d3a-a5b8-221a147ef99d"
/>
- vLLM version: v0.16.0
- vLLM main:
15d76f74e2
---------
Signed-off-by: pu-zhe <zpuaa@outlook.com>
70 lines
2.6 KiB
Python
70 lines
2.6 KiB
Python
#
|
|
# Copyright (c) 2026 Huawei Technologies Co., Ltd. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
# This file is a part of the vllm-ascend project.
|
|
#
|
|
import json
|
|
import os
|
|
from pathlib import Path
|
|
|
|
import torch
|
|
from vllm.config.load import LoadConfig
|
|
from vllm.model_executor.model_loader import ShardedStateLoader
|
|
|
|
|
|
class ShardedStateLoader310(ShardedStateLoader):
|
|
def __init__(self, load_config: LoadConfig):
|
|
super().__init__(load_config)
|
|
|
|
@staticmethod
|
|
def save_model(
|
|
model: torch.nn.Module,
|
|
path: str,
|
|
pattern: str | None = None,
|
|
max_size: int | None = None,
|
|
) -> None:
|
|
from safetensors.torch import save_file
|
|
from vllm.distributed import get_tensor_model_parallel_rank
|
|
|
|
rank = get_tensor_model_parallel_rank()
|
|
part_idx = 0
|
|
state_dict = ShardedStateLoader._filter_subtensors(model.state_dict())
|
|
|
|
filename = ShardedStateLoader.DEFAULT_PATTERN.format(rank=rank, part=part_idx)
|
|
save_file(
|
|
state_dict,
|
|
os.path.join(path, filename),
|
|
)
|
|
|
|
@staticmethod
|
|
def generate_quant_description(model: torch.nn.Module, path: str):
|
|
"""Generate a mapping of parameter names to their corresponding quantization types."""
|
|
quant_description = {}
|
|
quantize_type = model.quant_config.quant_description.get("model_quant_type", "FLOAT")
|
|
quant_description["model_quant_type"] = quantize_type
|
|
quant_description["version"] = "1.0.0"
|
|
state_dict = ShardedStateLoader._filter_subtensors(model.state_dict())
|
|
for name, tensor in state_dict.items():
|
|
if name.endswith(".weight") or name.endswith(".bias"):
|
|
if tensor.dtype in [torch.int8, torch.int32, torch.int64]:
|
|
quant_description[name] = quantize_type
|
|
else:
|
|
quant_description[name] = "FLOAT"
|
|
else:
|
|
quant_description[name] = "FLOAT"
|
|
|
|
json_path = Path(path) / "parameters_type_map.json"
|
|
with json_path.open("w", encoding="utf-8") as f:
|
|
json.dump(quant_description, f, indent=2)
|