### What this PR does / why we need it? While using the LLM Compressor quantization tool from the VLLM community to generate quantized weights, the VLLM Ascend engine needs to be adapted to support the compressed tensors quantization format. 1. Add AscendCompressedTensorsConfig to replace CompressedTensorsConfig in vllm. 2. Support CompressedTensorsW8A8 static weight. - weight: per-channel, int8, symmetric; activation: per-tensor, int8, symmetric. 4. Support CompressedTensorsW8A8Dynamic weight. - weight: per-channel, int8, symmetric; activation: per-token, int8, symmetric, dynamic. 5. Modify the override_quantization_method in AscendQuantConfig. Co-authored-by: taoqun110 taoqun@huawei.com Co-authored-by: chenxi-hh chen464822955@163.com - vLLM version: v0.11.2 --------- Signed-off-by: LHXuuu <scut_xlh@163.com> Signed-off-by: chenxi-hh <chen464822955@163.com> Signed-off-by: chenxi-hh <32731611+chenxi-hh@users.noreply.github.com> Co-authored-by: chenxi-hh <chen464822955@163.com> Co-authored-by: chenxi-hh <32731611+chenxi-hh@users.noreply.github.com>
30 lines
560 B
INI
30 lines
560 B
INI
[mypy]
|
|
; warn_return_any = True
|
|
warn_unused_configs = True
|
|
|
|
; Suppress all missing import errors from torch_npu for mypy.
|
|
[mypy-torch_npu.*]
|
|
ignore_missing_imports = True
|
|
|
|
[mypy-torchair.*]
|
|
ignore_missing_imports = True
|
|
|
|
[mypy-transformers.*]
|
|
ignore_missing_imports = True
|
|
|
|
[mypy-lm_eval.*]
|
|
ignore_missing_imports = True
|
|
|
|
[mypy-compressed_tensors.*]
|
|
ignore_missing_imports = True
|
|
|
|
[mypy-datasets.*]
|
|
ignore_missing_imports = True
|
|
|
|
[mypy-llmcompressor.*]
|
|
ignore_missing_imports = True
|
|
|
|
[mypy-msprobe.*]
|
|
ignore_missing_imports = True
|
|
allow_untyped_imports = True
|