ggml vulkan: add hardsigmoid and hardswish operations (#15762)

This commit is contained in:
Shin-myoung-serp
2025-09-04 03:22:55 +09:00
committed by GitHub
parent 661ae31c9c
commit 0014fb4add
4 changed files with 70 additions and 0 deletions

View File

@@ -529,6 +529,8 @@ struct vk_device_struct {
vk_pipeline pipeline_relu[2];
vk_pipeline pipeline_tanh[2];
vk_pipeline pipeline_sigmoid[2];
vk_pipeline pipeline_hardsigmoid[2];
vk_pipeline pipeline_hardswish[2];
vk_pipeline pipeline_geglu[2];
vk_pipeline pipeline_reglu[2];
@@ -3261,6 +3263,8 @@ static void ggml_vk_load_shaders(vk_device& device) {
CREATE_UNARY(relu)
CREATE_UNARY(tanh)
CREATE_UNARY(sigmoid)
CREATE_UNARY(hardsigmoid)
CREATE_UNARY(hardswish)
#undef CREATE_UNARY
#define CREATE_GLU(name) \
@@ -7533,6 +7537,10 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
return ctx->device->pipeline_tanh[dst->type == GGML_TYPE_F16];
case GGML_UNARY_OP_SIGMOID:
return ctx->device->pipeline_sigmoid[dst->type == GGML_TYPE_F16];
case GGML_UNARY_OP_HARDSIGMOID:
return ctx->device->pipeline_hardsigmoid[dst->type == GGML_TYPE_F16];
case GGML_UNARY_OP_HARDSWISH:
return ctx->device->pipeline_hardswish[dst->type == GGML_TYPE_F16];
default:
break;
}
@@ -10201,6 +10209,8 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
case GGML_UNARY_OP_RELU:
case GGML_UNARY_OP_TANH:
case GGML_UNARY_OP_SIGMOID:
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_HARDSWISH:
break;
default:
return false;
@@ -10571,6 +10581,8 @@ static bool ggml_vk_build_graph(ggml_backend_vk_context * ctx, ggml_cgraph * cgr
case GGML_UNARY_OP_RELU:
case GGML_UNARY_OP_TANH:
case GGML_UNARY_OP_SIGMOID:
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_HARDSWISH:
ggml_vk_unary(ctx, compute_ctx, src0, node, dryrun);
break;
default:
@@ -10813,6 +10825,8 @@ static bool ggml_vk_compute_forward(ggml_backend_vk_context * ctx, ggml_cgraph *
case GGML_UNARY_OP_RELU:
case GGML_UNARY_OP_TANH:
case GGML_UNARY_OP_SIGMOID:
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_HARDSWISH:
buf = tensor->buffer;
break;
default:
@@ -11764,6 +11778,8 @@ static bool ggml_backend_vk_device_supports_op(ggml_backend_dev_t dev, const ggm
case GGML_UNARY_OP_RELU:
case GGML_UNARY_OP_TANH:
case GGML_UNARY_OP_SIGMOID:
case GGML_UNARY_OP_HARDSIGMOID:
case GGML_UNARY_OP_HARDSWISH:
return ggml_is_contiguous(op->src[0]) &&
(op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16) &&
(op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16) &&
@@ -12580,6 +12596,12 @@ static void ggml_vk_check_results_0(ggml_backend_vk_context * ctx, ggml_cgraph *
case GGML_UNARY_OP_SIGMOID:
tensor_clone = ggml_sigmoid(ggml_ctx, src_clone[0]);
break;
case GGML_UNARY_OP_HARDSIGMOID:
tensor_clone = ggml_hardsigmoid(ggml_ctx, src_clone[0]);
break;
case GGML_UNARY_OP_HARDSWISH:
tensor_clone = ggml_hardswish(ggml_ctx, src_clone[0]);
break;
default:
std::cerr << "Missing vk_check_results OP: " << ggml_op_name(tensor->op) << std::endl;
GGML_ABORT("fatal error");