[Feature][Quant] Reapply auto-detect quantization format and support remote model ID (#7111)
### What this PR does / why we need it?
Reapply the auto-detect quantization format feature (originally in
#6645, reverted in #6873) and extend it to support remote model
identifiers (e.g., `org/model-name`).
Changes:
- Reapply auto-detection of quantization method from model files
(`quant_model_description.json` for ModelSlim, `config.json` for
compressed-tensors)
- Add `get_model_file()` utility to handle file retrieval from both
local paths and remote repos (HuggingFace Hub / ModelScope)
- Update `detect_quantization_method()` to accept remote repo IDs with
optional `revision` parameter
- Update `maybe_update_config()` to work with remote model identifiers
- Add platform-level `auto_detect_quantization` support
- Add unit tests and e2e tests for both local and remote model ID
scenarios
Closes #6836
### Does this PR introduce _any_ user-facing change?
Yes. When `--quantization` is not explicitly specified, vllm-ascend will
now automatically detect the quantization format from the model files
for both local directories and remote model IDs.
- vLLM version: v0.16.0
- vLLM main:
4034c3d32e
---------
Signed-off-by: SlightwindSec <slightwindsec@gmail.com>
This commit is contained in:
@@ -1,3 +1,6 @@
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from vllm.model_executor.layers.fused_moe import FusedMoE
|
||||
@@ -7,6 +10,7 @@ from vllm.model_executor.layers.linear import LinearBase
|
||||
from tests.ut.base import TestBase
|
||||
from vllm_ascend.ops.linear import AscendUnquantizedLinearMethod
|
||||
from vllm_ascend.quantization.modelslim_config import (
|
||||
MODELSLIM_CONFIG_FILENAME,
|
||||
AscendModelSlimConfig,
|
||||
)
|
||||
from vllm_ascend.utils import ASCEND_QUANTIZATION_METHOD
|
||||
@@ -53,7 +57,7 @@ class TestAscendModelSlimConfig(TestBase):
|
||||
|
||||
def test_get_config_filenames(self):
|
||||
filenames = AscendModelSlimConfig.get_config_filenames()
|
||||
self.assertEqual(filenames, ["quant_model_description.json"])
|
||||
self.assertEqual(filenames, [])
|
||||
|
||||
def test_from_config(self):
|
||||
config = AscendModelSlimConfig.from_config(self.sample_config)
|
||||
@@ -161,5 +165,90 @@ class TestAscendModelSlimConfig(TestBase):
|
||||
with self.assertRaises(ValueError):
|
||||
config.is_layer_skipped_ascend("fused_layer", fused_mapping)
|
||||
|
||||
def test_init_with_none_config(self):
|
||||
config = AscendModelSlimConfig(None)
|
||||
self.assertEqual(config.quant_description, {})
|
||||
|
||||
def test_init_with_default_config(self):
|
||||
config = AscendModelSlimConfig()
|
||||
self.assertEqual(config.quant_description, {})
|
||||
|
||||
def test_maybe_update_config_already_populated(self):
|
||||
# When quant_description is already populated, should be a no-op
|
||||
self.assertTrue(len(self.ascend_config.quant_description) > 0)
|
||||
self.ascend_config.maybe_update_config("/some/model/path")
|
||||
# quant_description should remain unchanged
|
||||
self.assertEqual(self.ascend_config.quant_description,
|
||||
self.sample_config)
|
||||
|
||||
def test_maybe_update_config_loads_from_file(self):
|
||||
config = AscendModelSlimConfig()
|
||||
self.assertEqual(config.quant_description, {})
|
||||
|
||||
quant_data = {"layer1.weight": "INT8", "layer2.weight": "FLOAT"}
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
config_path = os.path.join(tmpdir, MODELSLIM_CONFIG_FILENAME)
|
||||
with open(config_path, "w") as f:
|
||||
json.dump(quant_data, f)
|
||||
|
||||
config.maybe_update_config(tmpdir)
|
||||
|
||||
self.assertEqual(config.quant_description, quant_data)
|
||||
|
||||
def test_maybe_update_config_raises_when_file_missing(self):
|
||||
config = AscendModelSlimConfig()
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
with self.assertRaises(ValueError) as ctx:
|
||||
config.maybe_update_config(tmpdir)
|
||||
|
||||
error_msg = str(ctx.exception)
|
||||
self.assertIn("ModelSlim Quantization Config Not Found", error_msg)
|
||||
self.assertIn(MODELSLIM_CONFIG_FILENAME, error_msg)
|
||||
|
||||
def test_maybe_update_config_raises_with_json_files_listed(self):
|
||||
config = AscendModelSlimConfig()
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
# Create a dummy json file that is NOT the config file
|
||||
dummy_path = os.path.join(tmpdir, "config.json")
|
||||
with open(dummy_path, "w") as f:
|
||||
json.dump({"dummy": True}, f)
|
||||
|
||||
with self.assertRaises(ValueError) as ctx:
|
||||
config.maybe_update_config(tmpdir)
|
||||
|
||||
error_msg = str(ctx.exception)
|
||||
self.assertIn("config.json", error_msg)
|
||||
|
||||
def test_maybe_update_config_non_directory_raises(self):
|
||||
config = AscendModelSlimConfig()
|
||||
|
||||
with self.assertRaises(ValueError) as ctx:
|
||||
config.maybe_update_config("not_a_real_directory_path")
|
||||
|
||||
error_msg = str(ctx.exception)
|
||||
self.assertIn("ModelSlim Quantization Config Not Found", error_msg)
|
||||
|
||||
def test_apply_extra_quant_adaptations_shared_head(self):
|
||||
config = AscendModelSlimConfig()
|
||||
config.quant_description = {
|
||||
"model.layers.0.shared_head.weight": "INT8",
|
||||
}
|
||||
config._apply_extra_quant_adaptations()
|
||||
self.assertIn("model.layers.0.weight", config.quant_description)
|
||||
self.assertEqual(config.quant_description["model.layers.0.weight"],
|
||||
"INT8")
|
||||
|
||||
def test_apply_extra_quant_adaptations_weight_packed(self):
|
||||
config = AscendModelSlimConfig()
|
||||
config.quant_description = {
|
||||
"model.layers.0.weight_packed": "INT8",
|
||||
}
|
||||
config._apply_extra_quant_adaptations()
|
||||
self.assertIn("model.layers.0.weight", config.quant_description)
|
||||
self.assertEqual(config.quant_description["model.layers.0.weight"],
|
||||
"INT8")
|
||||
|
||||
def test_get_scaled_act_names(self):
|
||||
self.assertEqual(self.ascend_config.get_scaled_act_names(), [])
|
||||
|
||||
Reference in New Issue
Block a user