[kernel] add AscendC op: lightning_indexer and sparse_flash_attention (#4625)
### What this PR does / why we need it? Provide high-performance AscendC operators lightning_indexer and sparse_flash_attention to boost the execution performance of the DeepSeek v3.2 model. Meanwhile, adapt the two AscendC operators to vllm-ascend framework. ### Does this PR introduce _any_ user-facing change? No (only underlying operator optimizations, with no user-facing changes) ### How was this patch tested? - vLLM version: v0.11.2 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.2 Signed-off-by: MingYang119 <songmingyang@huawei.com>
This commit is contained in:
@@ -93,21 +93,6 @@ class NPUWorker(WorkerBase):
|
||||
# init ascend config and soc version
|
||||
init_ascend_config(vllm_config)
|
||||
check_ascend_device_type()
|
||||
use_sparse = False
|
||||
if vllm_config.model_config is not None:
|
||||
use_sparse = hasattr(vllm_config.model_config.hf_config,
|
||||
"index_topk")
|
||||
if use_sparse:
|
||||
# Direct import instead of using try_register_lib to ensure proper error handling when
|
||||
# custom_ops is necessary but not available (e.g., in DeepSeek v3.2 deployments)
|
||||
# yapf: disable
|
||||
import custom_ops # type: ignore # noqa
|
||||
|
||||
# yapf: enable
|
||||
logger.info(
|
||||
"custom_ops module loaded successfully. Custom operators like "
|
||||
"torch.ops.custom.npu_sparse_flash_attention are now available."
|
||||
)
|
||||
|
||||
super().__init__(vllm_config=vllm_config,
|
||||
local_rank=local_rank,
|
||||
|
||||
Reference in New Issue
Block a user