[Model]Add Qwen3-Omni quantization Ascend NPU adaptation and optimization (#6828)
### What this PR does / why we need it?
This pull request is for quantization adaptation of Qwen3Omni, and it
achieves operator-level optimization and AUT (Auto-Quantization Tuning)
component optimization through patch-based modifications.
### Does this PR introduce _any_ user-facing change?
### How was this patch tested?
- vLLM version: v0.15.0
- vLLM main:
83b47f67b1
---------
Signed-off-by: tanhaoan333 <tanhaoan@huawei.com>
This commit is contained in:
@@ -64,6 +64,13 @@ QUANT_MODEL_PREFIX_MAPPINGS: dict[str, dict[str, str]] = {
|
||||
"mm_projector.linear_1": "mm_projector.proj.0",
|
||||
"mm_projector.linear_2": "mm_projector.proj.2",
|
||||
},
|
||||
"qwen3_omni_moe_thinker": {
|
||||
"thinker.lm_head.": "language_model.lm_head.",
|
||||
"thinker.model.": "language_model.model.",
|
||||
"thinker.": "",
|
||||
"lm_head.": "language_model.lm_head.",
|
||||
"model.": "language_model.model.",
|
||||
},
|
||||
}
|
||||
|
||||
# key: model_type
|
||||
@@ -186,6 +193,18 @@ packed_modules_model_mapping: dict[str, dict[str, list[str]]] = {
|
||||
],
|
||||
"experts": ["experts.0.w1", "experts.0.w2", "experts.0.w3"],
|
||||
},
|
||||
"qwen3_omni_moe_text": {
|
||||
"qkv_proj": [
|
||||
"q_proj",
|
||||
"k_proj",
|
||||
"v_proj",
|
||||
],
|
||||
"gate_up_proj": [
|
||||
"gate_proj",
|
||||
"up_proj",
|
||||
],
|
||||
"experts": ["experts.0.gate_proj", "experts.0.up_proj", "experts.0.down_proj"],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -457,7 +476,10 @@ class AscendModelSlimConfig(QuantizationConfig):
|
||||
"to have the same precision."
|
||||
)
|
||||
else:
|
||||
is_skipped = self.quant_description[prefix + ".weight"] == "FLOAT"
|
||||
is_skipped = any(
|
||||
key.startswith(prefix) and key.endswith(".weight") and value == "FLOAT"
|
||||
for key, value in self.quant_description.items()
|
||||
)
|
||||
|
||||
assert is_skipped is not None
|
||||
return is_skipped
|
||||
|
||||
Reference in New Issue
Block a user