Adopt inductor fusion and define quantization fusion pass (#4168)
### What this PR does / why we need it?
The main goal of this PR to alleviate the high maintenance burden from
model duplication when we are going to do the model optimization. Some
of our optimized models diverges a little from the vllm's modeling, but
needs to rewrite several part of original one, brings negligible
maintenance bruden to the vllm-ascend.In order to solve that, we propose
to leverage `torch.compile` and `inductor pattern matcher`,
automatically fuse the pattern we want to merge. For more details can
refer to the RFC https://github.com/vllm-project/vllm-ascend/issues/4239
This pr integrates `AddRMSNorm` and the `Quant` operator, which can
improve the inference speed of models using `w8a8 `quantization.
### Does this PR introduce _any_ user-facing change?
Yes, add new additional_config
### How was this patch tested?
```python
def main():
prompts = [
"The president of the United States is Mr.",
]
# Create a sampling params object.
sampling_params = SamplingParams(max_tokens=100, temperature=0.6, top_k=40, top_p=0.95)
# Create an LLM.
llm = LLM(
model="/root/.cache/modelscope/hub/models/vllm-ascend/Qwen3-8B-W8A8",
# enforce_eager=True,
tensor_parallel_size=1,
trust_remote_code=True,
gpu_memory_utilization=0.7,
quantization="ascend",
)
# Generate texts from the prompts.
outputs = llm.generate(prompts, sampling_params)
for output in outputs:
prompt = output.prompt
generated_text = output.outputs[0].text
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
```
```text
Prompt: 'The president of the United States is Mr.', Generated text: ' Trump. The president of the United States is Mr. Biden. Which of the following statements is correct? \n\nA. Mr. Trump is Mr. Biden. \nB. Mr. Trump is not Mr. Biden. \nC. The president of the United States is not Mr. Trump. \nD. The president of the United States is not Mr. Biden.\n\nThe question presents a contradiction: it states that "The president of the United States is Mr. Trump" and "The president of'
```
- vLLM version: 86e178f7c4d8c3b0eaf3c8e3f810a83f63b90e24
- vLLM main:
86e178f7c4
---------
Signed-off-by: Icey <1790571317@qq.com>
Signed-off-by: wxsIcey <1790571317@qq.com>
This commit is contained in:
@@ -66,6 +66,32 @@ class NPUPlatform(Platform):
|
||||
def is_sleep_mode_available(self) -> bool:
|
||||
return True
|
||||
|
||||
@property
|
||||
def pass_key(self) -> str:
|
||||
"""
|
||||
Inductor config key for the PassManager custom pass, for example 'post_grad_custom_post_pass'.
|
||||
It is a parameter of inductor_config used to register custom passes.
|
||||
Currently, we only use Inductor's 'pattern matcher' functionality, so we define our own pass_key.
|
||||
"""
|
||||
return "graph_fusion_manager"
|
||||
|
||||
@classmethod
|
||||
def get_pass_manager_cls(cls) -> str:
|
||||
"""
|
||||
Get the pass manager class for this platform.
|
||||
It will be registered as a custom pass under the current_platform.pass_key.
|
||||
"""
|
||||
return "vllm_ascend.compilation.graph_fusion_pass_manager.GraphFusionPassManager"
|
||||
|
||||
@classmethod
|
||||
def get_compile_backend(self) -> str:
|
||||
"""
|
||||
Get the custom compile backend. Previously, we used EagerAdaptor by default.
|
||||
To use graph fusion operations, we defined our own backend compiler.
|
||||
"""
|
||||
from vllm_ascend.compilation.compiler_interface import AscendCompiler
|
||||
return AscendCompiler.__module__ + "." + AscendCompiler.__name__
|
||||
|
||||
@classmethod
|
||||
def pre_register_and_update(cls,
|
||||
parser: Optional[FlexibleArgumentParser] = None
|
||||
@@ -135,6 +161,13 @@ class NPUPlatform(Platform):
|
||||
parallel_config = vllm_config.parallel_config
|
||||
cache_config = vllm_config.cache_config
|
||||
ascend_scheduler_config = ascend_config.ascend_scheduler_config
|
||||
ascend_compilation_config = ascend_config.ascend_compilation_config
|
||||
if ascend_compilation_config:
|
||||
vllm_config.additional_config.setdefault(
|
||||
"ascend_compilation_config", {}).update(
|
||||
vars(ascend_compilation_config
|
||||
) if not isinstance(ascend_compilation_config, dict)
|
||||
else ascend_compilation_config)
|
||||
|
||||
kv_cache_dtype = vllm_config.additional_config.get(
|
||||
"kv_cache_dtype", None)
|
||||
@@ -214,6 +247,9 @@ class NPUPlatform(Platform):
|
||||
if compilation_config.cudagraph_mode == CUDAGraphMode.FULL_AND_PIECEWISE:
|
||||
compilation_config.cudagraph_mode = CUDAGraphMode.PIECEWISE
|
||||
|
||||
from vllm_ascend.compilation.compiler_interface import AscendCompiler
|
||||
compilation_config.oot_compiler = AscendCompiler.__module__ + "." + AscendCompiler.__name__
|
||||
|
||||
if compilation_config.cudagraph_mode == CUDAGraphMode.NONE:
|
||||
compilation_config.mode = CompilationMode.NONE
|
||||
elif compilation_config.cudagraph_mode == CUDAGraphMode.PIECEWISE:
|
||||
|
||||
Reference in New Issue
Block a user