[Feature][Quant] Reapply auto-detect quantization format and support remote model ID (#7111)
### What this PR does / why we need it?
Reapply the auto-detect quantization format feature (originally in
#6645, reverted in #6873) and extend it to support remote model
identifiers (e.g., `org/model-name`).
Changes:
- Reapply auto-detection of quantization method from model files
(`quant_model_description.json` for ModelSlim, `config.json` for
compressed-tensors)
- Add `get_model_file()` utility to handle file retrieval from both
local paths and remote repos (HuggingFace Hub / ModelScope)
- Update `detect_quantization_method()` to accept remote repo IDs with
optional `revision` parameter
- Update `maybe_update_config()` to work with remote model identifiers
- Add platform-level `auto_detect_quantization` support
- Add unit tests and e2e tests for both local and remote model ID
scenarios
Closes #6836
### Does this PR introduce _any_ user-facing change?
Yes. When `--quantization` is not explicitly specified, vllm-ascend will
now automatically detect the quantization format from the model files
for both local directories and remote model IDs.
- vLLM version: v0.16.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: SlightwindSec <slightwindsec@gmail.com>
This commit is contained in:
@@ -21,6 +21,9 @@ This module provides the AscendModelSlimConfig class for parsing quantization
|
||||
configs generated by the ModelSlim tool, along with model-specific mappings.
|
||||
"""
|
||||
|
||||
import glob
|
||||
import json
|
||||
import os
|
||||
from collections.abc import Mapping
|
||||
from types import MappingProxyType
|
||||
from typing import Any, Optional
|
||||
@@ -39,6 +42,9 @@ from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD
|
||||
|
||||
from .methods import get_scheme_class
|
||||
|
||||
# The config filename that ModelSlim generates after quantizing a model.
|
||||
MODELSLIM_CONFIG_FILENAME = "quant_model_description.json"
|
||||
|
||||
logger = init_logger(__name__)
|
||||
|
||||
# key: model_type
|
||||
@@ -397,9 +403,9 @@ class AscendModelSlimConfig(QuantizationConfig):
|
||||
quantized using the ModelSlim tool.
|
||||
"""
|
||||
|
||||
def __init__(self, quant_config: dict[str, Any]):
|
||||
def __init__(self, quant_config: dict[str, Any] | None = None):
|
||||
super().__init__()
|
||||
self.quant_description = quant_config
|
||||
self.quant_description = quant_config if quant_config is not None else {}
|
||||
# TODO(whx): remove this adaptation after adding "shared_head"
|
||||
# to prefix of DeepSeekShareHead in vLLM.
|
||||
extra_quant_dict = {}
|
||||
@@ -433,7 +439,12 @@ class AscendModelSlimConfig(QuantizationConfig):
|
||||
|
||||
@classmethod
|
||||
def get_config_filenames(cls) -> list[str]:
|
||||
return ["quant_model_description.json"]
|
||||
# Return empty list so that vllm's get_quant_config() skips the
|
||||
# file-based lookup (which raises an unfriendly "Cannot find the
|
||||
# config file for ascend" error when the model is not quantized).
|
||||
# Instead, the config file is loaded in maybe_update_config(),
|
||||
# which can provide a user-friendly error message.
|
||||
return []
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config: dict[str, Any]) -> "AscendModelSlimConfig":
|
||||
@@ -604,5 +615,108 @@ class AscendModelSlimConfig(QuantizationConfig):
|
||||
assert is_skipped is not None
|
||||
return is_skipped
|
||||
|
||||
def maybe_update_config(self, model_name: str, revision: str | None = None) -> None:
|
||||
"""Load the ModelSlim quantization config from model directory.
|
||||
|
||||
This method is called by vllm after get_quant_config() returns
|
||||
successfully. Since we return an empty list from get_config_filenames()
|
||||
to bypass vllm's built-in file lookup, we do the actual config loading
|
||||
here and provide user-friendly error messages when the config is missing.
|
||||
|
||||
Works with both local directories (``/path/to/model``) and remote
|
||||
repository identifiers (``org/model-name``). For remote repos the
|
||||
lookup goes through the HuggingFace / ModelScope cache via
|
||||
``get_model_file`` to fetch the config if not already cached.
|
||||
|
||||
Args:
|
||||
model_name: Path to the model directory or HuggingFace /
|
||||
ModelScope repo id.
|
||||
revision: Optional revision (branch, tag, or commit hash) for
|
||||
remote repos.
|
||||
"""
|
||||
from vllm_ascend.quantization.utils import get_model_file
|
||||
|
||||
# If quant_description is already populated (e.g. from from_config()),
|
||||
# there is nothing to do.
|
||||
if self.quant_description:
|
||||
return
|
||||
|
||||
# Try to get the config file (local or remote)
|
||||
config_path = get_model_file(model_name, MODELSLIM_CONFIG_FILENAME, revision=revision)
|
||||
|
||||
if config_path is not None:
|
||||
with open(config_path) as f:
|
||||
self.quant_description = json.load(f)
|
||||
self._apply_extra_quant_adaptations()
|
||||
return
|
||||
|
||||
# Collect diagnostic info for the error message
|
||||
json_names: list[str] = []
|
||||
if os.path.isdir(model_name):
|
||||
json_files = glob.glob(os.path.join(model_name, "*.json"))
|
||||
json_names = [os.path.basename(f) for f in json_files]
|
||||
|
||||
# Config file not found - raise a friendly error message
|
||||
raise ValueError(
|
||||
"\n"
|
||||
+ "=" * 80
|
||||
+ "\n"
|
||||
+ "ERROR: ModelSlim Quantization Config Not Found\n"
|
||||
+ "=" * 80
|
||||
+ "\n"
|
||||
+ "\n"
|
||||
+ f"You have enabled '--quantization {ASCEND_QUANTIZATION_METHOD}' "
|
||||
+ "(ModelSlim quantization),\n"
|
||||
+ f"but the model '{model_name}' does not contain the required\n"
|
||||
+ f"quantization config file ('{MODELSLIM_CONFIG_FILENAME}').\n"
|
||||
+ "\n"
|
||||
+ "This usually means the model weights are NOT quantized by "
|
||||
+ "ModelSlim.\n"
|
||||
+ "\n"
|
||||
+ "Please choose one of the following solutions:\n"
|
||||
+ "\n"
|
||||
+ " Solution 1: Remove the quantization option "
|
||||
+ "(for float/unquantized models)\n"
|
||||
+ " "
|
||||
+ "-" * 58
|
||||
+ "\n"
|
||||
+ f" Remove '--quantization {ASCEND_QUANTIZATION_METHOD}' from "
|
||||
+ "your command if you want to\n"
|
||||
+ " run the model with the original (float) weights.\n"
|
||||
+ "\n"
|
||||
+ " Example:\n"
|
||||
+ f" vllm serve {model_name}\n"
|
||||
+ "\n"
|
||||
+ " Solution 2: Quantize your model weights with ModelSlim first\n"
|
||||
+ " "
|
||||
+ "-" * 58
|
||||
+ "\n"
|
||||
+ " Use the ModelSlim tool to quantize your model weights "
|
||||
+ "before deployment.\n"
|
||||
+ " After quantization, the model directory should contain "
|
||||
+ f"'{MODELSLIM_CONFIG_FILENAME}'.\n"
|
||||
+ " For more information, please refer to:\n"
|
||||
+ " https://gitee.com/ascend/msit/tree/master/msmodelslim\n"
|
||||
+ "\n"
|
||||
+ (f" (Found JSON files in model directory: {json_names})\n" if json_names else "")
|
||||
+ "=" * 80
|
||||
)
|
||||
|
||||
def _apply_extra_quant_adaptations(self) -> None:
|
||||
"""Apply extra adaptations to the quant_description dict.
|
||||
|
||||
This handles known key transformations such as shared_head and
|
||||
weight_packed mappings.
|
||||
"""
|
||||
extra_quant_dict = {}
|
||||
for k in self.quant_description:
|
||||
if "shared_head" in k:
|
||||
new_k = k.replace(".shared_head.", ".")
|
||||
extra_quant_dict[new_k] = self.quant_description[k]
|
||||
if "weight_packed" in k:
|
||||
new_k = k.replace("weight_packed", "weight")
|
||||
extra_quant_dict[new_k] = self.quant_description[k]
|
||||
self.quant_description.update(extra_quant_dict)
|
||||
|
||||
def get_scaled_act_names(self) -> list[str]:
|
||||
return []
|
||||
|
||||
Reference in New Issue
Block a user