### What this PR does / why we need it? we notice that `patch_main` is never used. Usually the patch is for all version. And if it's for specified version, we can use `vllm_version_is` instead. So let's remove the useless sub folder in patch module to make it clear. - vLLM version: v0.11.0rc3 - vLLM main: https://github.com/vllm-project/vllm/commit/v0.11.0 Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
17 lines
1020 B
Python
17 lines
1020 B
Python
import vllm.model_executor.layers.fla.ops.chunk
|
|
import vllm.model_executor.layers.fla.ops.fused_recurrent
|
|
import vllm.model_executor.layers.fla.ops.layernorm_guard
|
|
import vllm.model_executor.layers.mamba.ops.causal_conv1d
|
|
|
|
from vllm_ascend.ops.casual_conv1d import (causal_conv1d_fn,
|
|
causal_conv1d_update_npu)
|
|
from vllm_ascend.ops.fla import LayerNormFn, torch_chunk_gated_delta_rule
|
|
from vllm_ascend.ops.sigmoid_gating import \
|
|
fused_recurrent_gated_delta_rule_fwd_kernel
|
|
|
|
vllm.model_executor.layers.mamba.ops.causal_conv1d.causal_conv1d_update = causal_conv1d_update_npu
|
|
vllm.model_executor.layers.mamba.ops.causal_conv1d.causal_conv1d_fn = causal_conv1d_fn
|
|
vllm.model_executor.layers.fla.ops.fused_recurrent.fused_recurrent_gated_delta_rule_fwd_kernel = fused_recurrent_gated_delta_rule_fwd_kernel
|
|
vllm.model_executor.layers.fla.ops.layernorm_guard.LayerNormFn = LayerNormFn
|
|
vllm.model_executor.layers.fla.ops.chunk.chunk_gated_delta_rule = torch_chunk_gated_delta_rule
|