Revert "[Feature][Quant] Auto-detect quantization format from model f… (#6873)
This reverts commit 3953dcf784. to keep
the basic functions available
---------
Signed-off-by: wangli <wangli858794774@gmail.com>
This commit is contained in:
@@ -178,11 +178,6 @@ class NPUPlatform(Platform):
|
||||
|
||||
@classmethod
|
||||
def check_and_update_config(cls, vllm_config: VllmConfig) -> None:
|
||||
from vllm_ascend.quantization.utils import maybe_auto_detect_quantization
|
||||
|
||||
if vllm_config.model_config is not None:
|
||||
maybe_auto_detect_quantization(vllm_config)
|
||||
|
||||
# initialize ascend config from vllm additional_config
|
||||
cls._fix_incompatible_config(vllm_config)
|
||||
ascend_config = init_ascend_config(vllm_config)
|
||||
|
||||
@@ -21,9 +21,6 @@ This module provides the AscendModelSlimConfig class for parsing quantization
|
||||
configs generated by the ModelSlim tool, along with model-specific mappings.
|
||||
"""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
from collections.abc import Mapping
|
||||
from types import MappingProxyType
|
||||
from typing import Any, Optional
|
||||
@@ -42,9 +39,6 @@ from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD
|
||||
|
||||
from .methods import get_scheme_class
|
||||
|
||||
# The config filename that ModelSlim generates after quantizing a model.
|
||||
MODELSLIM_CONFIG_FILENAME = "quant_model_description.json"
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
# key: model_type
|
||||
@@ -424,9 +418,9 @@ class AscendModelSlimConfig(QuantizationConfig):
|
||||
quantized using the ModelSlim tool.
|
||||
"""
|
||||
|
||||
def __init__(self, quant_config: dict[str, Any] | None = None):
|
||||
def __init__(self, quant_config: dict[str, Any]):
|
||||
super().__init__()
|
||||
self.quant_description = quant_config if quant_config is not None else {}
|
||||
self.quant_description = quant_config
|
||||
# TODO(whx): remove this adaptation after adding "shared_head"
|
||||
# to prefix of DeepSeekShareHead in vLLM.
|
||||
extra_quant_dict = {}
|
||||
@@ -456,12 +450,7 @@ class AscendModelSlimConfig(QuantizationConfig):
|
||||
|
||||
@classmethod
|
||||
def get_config_filenames(cls) -> list[str]:
|
||||
# Return empty list so that vllm's get_quant_config() skips the
|
||||
# file-based lookup (which raises an unfriendly "Cannot find the
|
||||
# config file for ascend" error when the model is not quantized).
|
||||
# Instead, the config file is loaded in maybe_update_config(),
|
||||
# which can provide a user-friendly error message.
|
||||
return []
|
||||
return ["quant_model_description.json"]
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config: dict[str, Any]) -> "AscendModelSlimConfig":
|
||||
@@ -573,98 +562,5 @@ class AscendModelSlimConfig(QuantizationConfig):
|
||||
assert is_skipped is not None
|
||||
return is_skipped
|
||||
|
||||
def maybe_update_config(self, model_name: str) -> None:
|
||||
"""Load the ModelSlim quantization config from model directory.
|
||||
|
||||
This method is called by vllm after get_quant_config() returns
|
||||
successfully. Since we return an empty list from get_config_filenames()
|
||||
to bypass vllm's built-in file lookup, we do the actual config loading
|
||||
here and provide user-friendly error messages when the config is missing.
|
||||
|
||||
Args:
|
||||
model_name: Path to the model directory or model name.
|
||||
"""
|
||||
# If quant_description is already populated (e.g. from from_config()),
|
||||
# there is nothing to do.
|
||||
if self.quant_description:
|
||||
return
|
||||
|
||||
# Try to find and load the ModelSlim config file
|
||||
if os.path.isdir(model_name):
|
||||
config_path = os.path.join(model_name, MODELSLIM_CONFIG_FILENAME)
|
||||
if os.path.isfile(config_path):
|
||||
with open(config_path) as f:
|
||||
self.quant_description = json.load(f)
|
||||
self._apply_extra_quant_adaptations()
|
||||
return
|
||||
|
||||
# Check if there are any json files at all to help diagnose
|
||||
json_files = glob.glob(os.path.join(model_name, "*.json"))
|
||||
json_names = [os.path.basename(f) for f in json_files]
|
||||
else:
|
||||
json_names = []
|
||||
|
||||
# Config file not found - raise a friendly error message
|
||||
raise ValueError(
|
||||
"\n"
|
||||
+ "=" * 80
|
||||
+ "\n"
|
||||
+ "ERROR: ModelSlim Quantization Config Not Found\n"
|
||||
+ "=" * 80
|
||||
+ "\n"
|
||||
+ "\n"
|
||||
+ f"You have enabled '--quantization {ASCEND_QUANTIZATION_METHOD}' "
|
||||
+ "(ModelSlim quantization),\n"
|
||||
+ f"but the model at '{model_name}' does not contain the required\n"
|
||||
+ f"quantization config file ('{MODELSLIM_CONFIG_FILENAME}').\n"
|
||||
+ "\n"
|
||||
+ "This usually means the model weights are NOT quantized by "
|
||||
+ "ModelSlim.\n"
|
||||
+ "\n"
|
||||
+ "Please choose one of the following solutions:\n"
|
||||
+ "\n"
|
||||
+ " Solution 1: Remove the quantization option "
|
||||
+ "(for float/unquantized models)\n"
|
||||
+ " "
|
||||
+ "-" * 58
|
||||
+ "\n"
|
||||
+ f" Remove '--quantization {ASCEND_QUANTIZATION_METHOD}' from "
|
||||
+ "your command if you want to\n"
|
||||
+ " run the model with the original (float) weights.\n"
|
||||
+ "\n"
|
||||
+ " Example:\n"
|
||||
+ f" vllm serve {model_name}\n"
|
||||
+ "\n"
|
||||
+ " Solution 2: Quantize your model weights with ModelSlim first\n"
|
||||
+ " "
|
||||
+ "-" * 58
|
||||
+ "\n"
|
||||
+ " Use the ModelSlim tool to quantize your model weights "
|
||||
+ "before deployment.\n"
|
||||
+ " After quantization, the model directory should contain "
|
||||
+ f"'{MODELSLIM_CONFIG_FILENAME}'.\n"
|
||||
+ " For more information, please refer to:\n"
|
||||
+ " https://gitee.com/ascend/msit/tree/master/msmodelslim\n"
|
||||
+ "\n"
|
||||
+ (f" (Found JSON files in model directory: {json_names})\n" if json_names else "")
|
||||
+ "=" * 80
|
||||
)
|
||||
|
||||
def _apply_extra_quant_adaptations(self) -> None:
|
||||
"""Apply extra adaptations to the quant_description dict.
|
||||
|
||||
This handles known key transformations such as shared_head and
|
||||
weight_packed mappings.
|
||||
"""
|
||||
extra_quant_dict = {}
|
||||
for k in self.quant_description:
|
||||
if "shared_head" in k:
|
||||
new_k = k.replace(".shared_head.", ".")
|
||||
extra_quant_dict[new_k] = self.quant_description[k]
|
||||
if "weight_packed" in k:
|
||||
new_k = k.replace("weight_packed", "weight")
|
||||
extra_quant_dict[new_k] = self.quant_description[k]
|
||||
self.quant_description.update(extra_quant_dict)
|
||||
|
||||
def get_scaled_act_names(self) -> list[str]:
|
||||
return []
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# This file is a part of the vllm-ascend project.
|
||||
#
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from vllm.logger import init_logger
|
||||
|
||||
from vllm_ascend.quantization.modelslim_config import MODELSLIM_CONFIG_FILENAME
|
||||
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD, COMPRESSED_TENSORS_METHOD
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
|
||||
def detect_quantization_method(model_path: str) -> str | None:
|
||||
"""Auto-detect the quantization method from model directory files.
|
||||
|
||||
This function performs a lightweight check (JSON files and file existence
|
||||
only — no .safetensors or .bin inspection) to determine which quantization
|
||||
method was used to produce the weights in *model_path*.
|
||||
|
||||
Detection priority:
|
||||
1. **ModelSlim (Ascend)** – ``quant_model_description.json`` exists
|
||||
in the model directory.
|
||||
2. **LLM-Compressor (compressed-tensors)** – ``config.json`` contains
|
||||
a ``quantization_config`` section with
|
||||
``"quant_method": "compressed-tensors"``.
|
||||
3. **None** – neither condition is met; the caller should fall back to
|
||||
the default (float) behaviour.
|
||||
|
||||
Args:
|
||||
model_path: Path to the local model directory.
|
||||
|
||||
Returns:
|
||||
``"ascend"`` for ModelSlim models,
|
||||
``"compressed-tensors"`` for LLM-Compressor models,
|
||||
or ``None`` if no quantization signature is found.
|
||||
"""
|
||||
if not os.path.isdir(model_path):
|
||||
return None
|
||||
|
||||
# Case 1: ModelSlim — look for quant_model_description.json
|
||||
modelslim_config_path = os.path.join(model_path, MODELSLIM_CONFIG_FILENAME)
|
||||
if os.path.isfile(modelslim_config_path):
|
||||
return ASCEND_QUANTIZATION_METHOD
|
||||
|
||||
# Case 2: LLM-Compressor — look for compressed-tensors in config.json
|
||||
config_json_path = os.path.join(model_path, "config.json")
|
||||
if os.path.isfile(config_json_path):
|
||||
try:
|
||||
with open(config_json_path) as f:
|
||||
config = json.load(f)
|
||||
quant_cfg = config.get("quantization_config")
|
||||
if isinstance(quant_cfg, dict):
|
||||
quant_method = quant_cfg.get("quant_method", "")
|
||||
if quant_method == COMPRESSED_TENSORS_METHOD:
|
||||
return COMPRESSED_TENSORS_METHOD
|
||||
except (json.JSONDecodeError, OSError):
|
||||
# Malformed or unreadable config.json — skip silently.
|
||||
pass
|
||||
|
||||
# Case 3: No quantization signature found.
|
||||
return None
|
||||
|
||||
|
||||
def maybe_auto_detect_quantization(vllm_config) -> None:
|
||||
"""Auto-detect and apply the quantization method on *vllm_config*.
|
||||
|
||||
This should be called during engine initialisation (from
|
||||
``NPUPlatform.check_and_update_config``) **after** ``VllmConfig`` has been
|
||||
created but **before** heavy weights are loaded.
|
||||
|
||||
Because ``check_and_update_config`` runs *after*
|
||||
``VllmConfig.__post_init__`` has already evaluated
|
||||
``_get_quantization_config`` (which returned ``None`` when
|
||||
``model_config.quantization`` was not set), we must:
|
||||
|
||||
1. Set ``model_config.quantization`` to the detected value.
|
||||
2. Recreate ``vllm_config.quant_config`` so that the quantization
|
||||
pipeline (``get_quant_config`` → ``QuantizationConfig`` →
|
||||
``get_quant_method`` for every layer) is properly initialised.
|
||||
|
||||
Rules:
|
||||
* If the user explicitly set ``--quantization``, that value is
|
||||
respected. A warning is emitted when the detected method differs.
|
||||
* If no ``--quantization`` was given, the detected method (if any) is
|
||||
applied automatically.
|
||||
|
||||
Args:
|
||||
vllm_config: A ``vllm.config.VllmConfig`` instance (mutable).
|
||||
"""
|
||||
model_config = vllm_config.model_config
|
||||
model_path = model_config.model
|
||||
user_quant = model_config.quantization
|
||||
detected = detect_quantization_method(model_path)
|
||||
|
||||
if detected is None:
|
||||
# No quantization signature found — nothing to do.
|
||||
return
|
||||
|
||||
if user_quant is not None:
|
||||
# User explicitly specified a quantization method.
|
||||
if user_quant != detected:
|
||||
logger.warning(
|
||||
"Auto-detected quantization method '%s' from model "
|
||||
"files at '%s', but user explicitly specified "
|
||||
"'--quantization %s'. Respecting the user-specified "
|
||||
"value. If you encounter errors during model loading, "
|
||||
"consider using '--quantization %s' instead.",
|
||||
detected,
|
||||
model_path,
|
||||
user_quant,
|
||||
detected,
|
||||
)
|
||||
return
|
||||
|
||||
# No user-specified quantization — apply auto-detected value.
|
||||
model_config.quantization = detected
|
||||
logger.info(
|
||||
"Auto-detected quantization method '%s' from model files "
|
||||
"at '%s'. To override, pass '--quantization <method>' explicitly.",
|
||||
detected,
|
||||
model_path,
|
||||
)
|
||||
|
||||
# Recreate quant_config on VllmConfig. The original __post_init__
|
||||
# already ran _get_quantization_config(), but at that point
|
||||
# model_config.quantization was None so it returned None. Now that
|
||||
# we've set it, we need to build the actual QuantizationConfig so the
|
||||
# downstream model-loading code can use it.
|
||||
from vllm.config import VllmConfig as _VllmConfig
|
||||
|
||||
vllm_config.quant_config = _VllmConfig._get_quantization_config(model_config, vllm_config.load_config)
|
||||
Reference in New Issue
Block a user