Sync from upstream llama.cpp repository

This commit is contained in:
2026-01-16 10:43:34 +08:00
parent 3bc369a6f7
commit f4ae4cc7da
2053 changed files with 956010 additions and 1 deletions

View File

@@ -0,0 +1,124 @@
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
find_library(METAL_FRAMEWORK Metal REQUIRED)
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
message(STATUS "Metal framework found")
ggml_add_backend_library(ggml-metal
ggml-metal.cpp
ggml-metal-device.m
ggml-metal-device.cpp
ggml-metal-common.cpp
ggml-metal-context.m
ggml-metal-ops.cpp
)
target_link_libraries(ggml-metal PRIVATE
${FOUNDATION_LIBRARY}
${METAL_FRAMEWORK}
${METALKIT_FRAMEWORK}
)
if (GGML_METAL_NDEBUG)
add_compile_definitions(GGML_METAL_NDEBUG)
endif()
set(METALLIB_COMMON "${CMAKE_CURRENT_SOURCE_DIR}/../ggml-common.h")
if (GGML_METAL_EMBED_LIBRARY)
enable_language(ASM)
add_compile_definitions(GGML_METAL_EMBED_LIBRARY)
set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
set(METALLIB_IMPL "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal-impl.h")
file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/autogenerated")
# merge ggml-common.h and ggml-metal.metal into a single file
set(METALLIB_EMBED_ASM "${CMAKE_CURRENT_BINARY_DIR}/autogenerated/ggml-metal-embed.s")
set(METALLIB_SOURCE_EMBED "${CMAKE_CURRENT_BINARY_DIR}/autogenerated/ggml-metal-embed.metal")
set(METALLIB_SOURCE_EMBED_TMP "${CMAKE_CURRENT_BINARY_DIR}/autogenerated/ggml-metal-embed.metal.tmp")
add_custom_command(
OUTPUT "${METALLIB_EMBED_ASM}"
COMMAND echo "Embedding Metal library"
COMMAND sed -e "/__embed_ggml-common.h__/r ${METALLIB_COMMON}" -e "/__embed_ggml-common.h__/d" < "${METALLIB_SOURCE}" > "${METALLIB_SOURCE_EMBED_TMP}"
COMMAND sed -e "/\#include \"ggml-metal-impl.h\"/r ${METALLIB_IMPL}" -e "/\#include \"ggml-metal-impl.h\"/d" < "${METALLIB_SOURCE_EMBED_TMP}" > "${METALLIB_SOURCE_EMBED}"
COMMAND echo ".section __DATA,__ggml_metallib" > "${METALLIB_EMBED_ASM}"
COMMAND echo ".globl _ggml_metallib_start" >> "${METALLIB_EMBED_ASM}"
COMMAND echo "_ggml_metallib_start:" >> "${METALLIB_EMBED_ASM}"
COMMAND echo .incbin "\"${METALLIB_SOURCE_EMBED}\"" >> "${METALLIB_EMBED_ASM}"
COMMAND echo ".globl _ggml_metallib_end" >> "${METALLIB_EMBED_ASM}"
COMMAND echo "_ggml_metallib_end:" >> "${METALLIB_EMBED_ASM}"
DEPENDS ../ggml-common.h ggml-metal.metal ggml-metal-impl.h
COMMENT "Generate assembly for embedded Metal library"
VERBATIM
)
target_sources(ggml-metal PRIVATE "${METALLIB_EMBED_ASM}")
else()
# copy metal files to bin directory
configure_file(../ggml-common.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COPYONLY)
configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY)
configure_file(ggml-metal-impl.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal-impl.h COPYONLY)
if (GGML_METAL_SHADER_DEBUG)
# custom command to do the following:
# xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air
# xcrun -sdk macosx metallib ggml-metal.air -o default.metallib
#
# note: this is the only way I found to disable fast-math in Metal. it's ugly, but at least it works
# disabling fast math is needed in order to pass tests/test-backend-ops
# note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1
# note: unfortunately, we have to call it default.metallib instead of ggml.metallib
# ref: https://github.com/ggerganov/whisper.cpp/issues/1720
# note: adding -g causes segmentation fault during compile
#set(XC_FLAGS -fno-fast-math -fno-inline -g)
set(XC_FLAGS -fno-fast-math -fno-inline)
else()
set(XC_FLAGS -O3)
endif()
# Append macOS metal versioning flags
if (GGML_METAL_MACOSX_VERSION_MIN)
message(STATUS "Adding -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN} flag to metal compilation")
list (APPEND XC_FLAGS -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN})
endif()
if (GGML_METAL_STD)
message(STATUS "Adding -std=${GGML_METAL_STD} flag to metal compilation")
list (APPEND XC_FLAGS -std=${GGML_METAL_STD})
endif()
add_custom_command(
OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o - |
xcrun -sdk macosx metallib - -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h
COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal
DEPENDS ggml-metal.metal ${METALLIB_COMMON}
COMMENT "Compiling Metal kernels"
)
# FIXME: only add to the ggml-metal target?
add_custom_target(
ggml-metal-lib ALL
DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
)
endif() # GGML_METAL_EMBED_LIBRARY
if (NOT GGML_METAL_EMBED_LIBRARY)
install(
FILES src/ggml-metal/ggml-metal.metal
PERMISSIONS
OWNER_READ
OWNER_WRITE
GROUP_READ
WORLD_READ
DESTINATION ${CMAKE_INSTALL_BINDIR})
install(
FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
DESTINATION ${CMAKE_INSTALL_BINDIR}
)
endif()

View File

@@ -0,0 +1,446 @@
#include "ggml-metal-common.h"
#include "ggml-impl.h"
#include "ggml-backend-impl.h"
#include <vector>
// represents a memory range (i.e. an interval from a starting address p0 to an ending address p1 in a given buffer pb)
// the type indicates whether it is a source range (i.e. ops read data from it) or a destination range (i.e. ops write data to it)
struct ggml_mem_range {
uint64_t pb; // buffer id
uint64_t p0; // begin
uint64_t p1; // end
ggml_mem_range_type pt;
};
struct ggml_mem_ranges {
std::vector<ggml_mem_range> ranges;
int debug = 0;
};
ggml_mem_ranges_t ggml_mem_ranges_init(int debug) {
auto * res = new ggml_mem_ranges;
res->ranges.reserve(256);
res->debug = debug;
return res;
}
void ggml_mem_ranges_free(ggml_mem_ranges_t mrs) {
delete mrs;
}
void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs) {
mrs->ranges.clear();
}
static bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, ggml_mem_range mr) {
mrs->ranges.push_back(mr);
return true;
}
static ggml_mem_range ggml_mem_range_from_tensor(const ggml_tensor * tensor, ggml_mem_range_type pt) {
// always use the base tensor
tensor = tensor->view_src ? tensor->view_src : tensor;
GGML_ASSERT(!tensor->view_src);
ggml_mem_range mr;
if (tensor->buffer) {
// when the tensor is allocated, use the actual memory address range in the buffer
//
// take the actual allocated size with ggml_backend_buft_get_alloc_size()
// this can be larger than the tensor size if the buffer type allocates extra memory
// ref: https://github.com/ggml-org/llama.cpp/pull/15966
mr = {
/*.pb =*/ (uint64_t) tensor->buffer,
/*.p0 =*/ (uint64_t) tensor->data,
/*.p1 =*/ (uint64_t) tensor->data + ggml_backend_buft_get_alloc_size(tensor->buffer->buft, tensor),
/*.pt =*/ pt,
};
} else {
// otherwise, the pointer address is used as an unique id of the memory ranges
// that the tensor will be using when it is allocated
mr = {
/*.pb =*/ (uint64_t) tensor,
/*.p0 =*/ 0, //
/*.p1 =*/ 1024, // [0, 1024) is a dummy range, not used
/*.pt =*/ pt,
};
};
return mr;
}
static ggml_mem_range ggml_mem_range_from_tensor_src(const ggml_tensor * tensor) {
return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_SRC);
}
static ggml_mem_range ggml_mem_range_from_tensor_dst(const ggml_tensor * tensor) {
return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_DST);
}
static bool ggml_mem_ranges_add_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
GGML_ASSERT(tensor);
ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor);
if (mrs->debug > 2) {
GGML_LOG_DEBUG("%s: add src range buf=%lld, [%lld, %lld)\n", __func__, mr.pb, mr.p0, mr.p1);
}
return ggml_mem_ranges_add(mrs, mr);
}
static bool ggml_mem_ranges_add_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
GGML_ASSERT(tensor);
ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor);
if (mrs->debug > 2) {
GGML_LOG_DEBUG("%s: add dst range buf=%lld, [%lld, %lld)\n", __func__, mr.pb, mr.p0, mr.p1);
}
return ggml_mem_ranges_add(mrs, mr);
}
bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
for (int i = 0; i < GGML_MAX_SRC; i++) {
if (tensor->src[i]) {
ggml_mem_ranges_add_src(mrs, tensor->src[i]);
}
}
return ggml_mem_ranges_add_dst(mrs, tensor);
}
static bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, ggml_mem_range mr) {
for (size_t i = 0; i < mrs->ranges.size(); i++) {
const auto & cmp = mrs->ranges[i];
// two memory ranges cannot intersect if they are in different buffers
if (mr.pb != cmp.pb) {
continue;
}
// intersecting source ranges are allowed
if (mr.pt == MEM_RANGE_TYPE_SRC && cmp.pt == MEM_RANGE_TYPE_SRC) {
continue;
}
if (mr.p0 < cmp.p1 && mr.p1 >= cmp.p0) {
if (mrs->debug > 2) {
GGML_LOG_DEBUG("%s: the %s range buf=%lld, [%lld, %lld) overlaps with a previous %s range buf=%lld, [%lld, %lld)\n",
__func__,
mr.pt == MEM_RANGE_TYPE_SRC ? "src" : "dst",
mr.pb, mr.p0, mr.p1,
cmp.pt == MEM_RANGE_TYPE_SRC ? "src" : "dst",
cmp.pb, cmp.p0, cmp.p1);
}
return false;
}
}
return true;
}
static bool ggml_mem_ranges_check_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
GGML_ASSERT(tensor);
ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor);
const bool res = ggml_mem_ranges_check(mrs, mr);
return res;
}
static bool ggml_mem_ranges_check_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
GGML_ASSERT(tensor);
ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor);
const bool res = ggml_mem_ranges_check(mrs, mr);
return res;
}
bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
for (int i = 0; i < GGML_MAX_SRC; i++) {
if (tensor->src[i]) {
if (!ggml_mem_ranges_check_src(mrs, tensor->src[i])) {
return false;
}
}
}
return ggml_mem_ranges_check_dst(mrs, tensor);
}
struct node_info {
ggml_tensor * node;
std::vector<ggml_tensor *> fused;
ggml_op op() const {
return node->op;
}
const ggml_tensor * dst() const {
return fused.empty() ? node : fused.back();
}
bool is_empty() const {
return ggml_op_is_empty(node->op);
}
void add_fused(ggml_tensor * t) {
fused.push_back(t);
}
};
static std::vector<int> ggml_metal_graph_optimize_reorder(const std::vector<node_info> & nodes) {
// helper to add node src and dst ranges
const auto & h_add = [](ggml_mem_ranges_t mrs, const node_info & node) {
for (int i = 0; i < GGML_MAX_SRC; i++) {
if (node.node->src[i]) {
if (!ggml_mem_ranges_add_src(mrs, node.node->src[i])) {
return false;
}
}
}
// keep track of the sources of the fused nodes as well
for (const auto * fused : node.fused) {
for (int i = 0; i < GGML_MAX_SRC; i++) {
if (fused->src[i]) {
if (!ggml_mem_ranges_add_src(mrs, fused->src[i])) {
return false;
}
}
}
}
return ggml_mem_ranges_add_dst(mrs, node.dst());
};
// helper to check if a node can run concurrently with the existing set of nodes
const auto & h_check = [](ggml_mem_ranges_t mrs, const node_info & node) {
for (int i = 0; i < GGML_MAX_SRC; i++) {
if (node.node->src[i]) {
if (!ggml_mem_ranges_check_src(mrs, node.node->src[i])) {
return false;
}
}
}
for (const auto * fused : node.fused) {
for (int i = 0; i < GGML_MAX_SRC; i++) {
if (fused->src[i]) {
if (!ggml_mem_ranges_check_src(mrs, fused->src[i])) {
return false;
}
}
}
}
return ggml_mem_ranges_check_dst(mrs, node.dst());
};
// perform reorders only across these types of ops
// can be expanded when needed
const auto & h_safe = [](ggml_op op) {
switch (op) {
case GGML_OP_MUL_MAT:
case GGML_OP_MUL_MAT_ID:
case GGML_OP_ROPE:
case GGML_OP_NORM:
case GGML_OP_RMS_NORM:
case GGML_OP_GROUP_NORM:
case GGML_OP_SUM_ROWS:
case GGML_OP_MUL:
case GGML_OP_ADD:
case GGML_OP_DIV:
case GGML_OP_GLU:
case GGML_OP_SCALE:
case GGML_OP_GET_ROWS:
case GGML_OP_CPY:
case GGML_OP_SET_ROWS:
return true;
default:
return ggml_op_is_empty(op);
}
};
const int n = nodes.size();
std::vector<int> res;
res.reserve(n);
std::vector<bool> used(n, false);
// the memory ranges for the set of currently concurrent nodes
ggml_mem_ranges_t mrs0 = ggml_mem_ranges_init(0);
// the memory ranges for the set of nodes that haven't been processed yet, when looking forward for a node to reorder
ggml_mem_ranges_t mrs1 = ggml_mem_ranges_init(0);
for (int i0 = 0; i0 < n; i0++) {
if (used[i0]) {
continue;
}
const auto & node0 = nodes[i0];
// the node is not concurrent with the existing concurrent set, so we have to "put a barrier" (i.e reset mrs0)
// but before we do that, look forward for some other nodes that can be added to the concurrent set mrs0
//
// note: we can always add empty nodes to the concurrent set as they don't read nor write anything
if (!node0.is_empty() && !h_check(mrs0, node0)) {
// this will hold the set of memory ranges from the nodes that haven't been processed yet
// if a node is not concurrent with this set, we cannot reorder it
ggml_mem_ranges_reset(mrs1);
// initialize it with the current node
h_add(mrs1, node0);
// that many nodes forward to search for a concurrent node
constexpr int N_FORWARD = 8;
for (int i1 = i0 + 1; i1 < i0 + N_FORWARD && i1 < n; i1++) {
if (used[i1]) {
continue;
}
const auto & node1 = nodes[i1];
// disallow reordering of certain ops
if (!h_safe(node1.op())) {
break;
}
const bool is_empty = node1.is_empty();
// to reorder a node and add it to the concurrent set, it has to be:
// + empty or concurrent with all nodes in the existing concurrent set (mrs0)
// + concurrent with all nodes prior to it that haven't been processed yet (mrs1)
if ((is_empty || h_check(mrs0, node1)) && h_check(mrs1, node1)) {
// add the node to the existing concurrent set (i.e. reorder it for early execution)
h_add(mrs0, node1);
res.push_back(i1);
// mark as used, so we skip re-processing it later
used[i1] = true;
} else {
// expand the set of nodes that haven't been processed yet
h_add(mrs1, node1);
}
}
// finalize the concurrent set and begin a new one
ggml_mem_ranges_reset(mrs0);
}
// expand the concurrent set with the current node
{
h_add(mrs0, node0);
res.push_back(i0);
}
}
ggml_mem_ranges_free(mrs0);
ggml_mem_ranges_free(mrs1);
return res;
}
void ggml_graph_optimize(ggml_cgraph * gf) {
constexpr int MAX_FUSE = 16;
const int n = gf->n_nodes;
enum ggml_op ops[MAX_FUSE];
std::vector<node_info> nodes;
nodes.reserve(gf->n_nodes);
// fuse nodes:
// we don't want to make reorders that break fusing, so we first pack all fusable tensors
// and perform the reorder over the fused nodes. after the reorder is done, we unfuse
for (int i = 0; i < n; i++) {
node_info node = {
/*.node =*/ gf->nodes[i],
/*.fused =*/ {},
};
// fuse only ops that start with these operations
// can be expanded when needed
if (node.op() == GGML_OP_ADD ||
node.op() == GGML_OP_NORM ||
node.op() == GGML_OP_RMS_NORM) {
ops[0] = node.op();
int f = i + 1;
while (f < n && f < i + MAX_FUSE) {
// conservatively allow fusing only these ops
// can be expanded when needed
if (gf->nodes[f]->op != GGML_OP_ADD &&
gf->nodes[f]->op != GGML_OP_MUL &&
gf->nodes[f]->op != GGML_OP_NORM &&
gf->nodes[f]->op != GGML_OP_RMS_NORM) {
break;
}
ops[f - i] = gf->nodes[f]->op;
f++;
}
f -= i;
for (; f > 1; f--) {
if (ggml_can_fuse(gf, i, ops, f)) {
break;
}
}
// add the fused tensors into the node info so we can unfuse them later
for (int k = 1; k < f; k++) {
++i;
// the .dst() becomes the last fused tensor
node.add_fused(gf->nodes[i]);
}
}
nodes.push_back(std::move(node));
}
#if 1
// reorder to improve concurrency
const auto order = ggml_metal_graph_optimize_reorder(nodes);
#else
std::vector<int> order(nodes.size());
for (size_t i = 0; i < nodes.size(); i++) {
order[i] = i;
}
#endif
// unfuse
{
int j = 0;
for (const auto i : order) {
const auto & node = nodes[i];
gf->nodes[j++] = node.node;
for (auto * fused : node.fused) {
gf->nodes[j++] = fused;
}
}
}
}

View File

@@ -0,0 +1,52 @@
// helper functions for ggml-metal that are too difficult to implement in Objective-C
#pragma once
#include <stdbool.h>
#ifdef __cplusplus
extern "C" {
#endif
struct ggml_tensor;
struct ggml_cgraph;
enum ggml_mem_range_type {
MEM_RANGE_TYPE_SRC = 0,
MEM_RANGE_TYPE_DST = 1,
};
// a helper object that can be used for reordering operations to improve concurrency
//
// the fundamental idea is that a set of tasks (either ggml ops, or something else) can run concurrently if they
// don't write to a memory that is being read by another task or written to by another task in the set
//
// with this structure, we can add tasks to the set, setting memory constraints. we can also check if a new task
// can be added to the set without violating the constraints (i.e. if it can be executed concurrently with the
// tasks already in the set)
//
typedef struct ggml_mem_ranges * ggml_mem_ranges_t;
ggml_mem_ranges_t ggml_mem_ranges_init(int debug);
void ggml_mem_ranges_free(ggml_mem_ranges_t mrs);
// remove all ranges from the set
void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs);
// add src or dst ranges to track
bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const struct ggml_tensor * tensor);
// return false if:
// - new src range overlaps with any existing dst range
// - new dst range overlaps with any existing range (src or dst)
bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const struct ggml_tensor * tensor);
// reorder the nodes in the graph to improve concurrency, while respecting fusion
//
// note: this implementation is generic and not specific to metal
// if it proves to work well, we can start using it for other backends in the future
void ggml_graph_optimize(struct ggml_cgraph * gf);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,33 @@
#pragma once
#include "ggml-metal-device.h"
#ifdef __cplusplus
extern "C" {
#endif
//
// backend context
//
typedef struct ggml_metal * ggml_metal_t;
ggml_metal_t ggml_metal_init(ggml_metal_device_t dev);
void ggml_metal_free(ggml_metal_t ctx);
void ggml_metal_synchronize(ggml_metal_t ctx);
void ggml_metal_set_tensor_async(ggml_metal_t ctx, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
void ggml_metal_get_tensor_async(ggml_metal_t ctx, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
enum ggml_status ggml_metal_graph_compute (ggml_metal_t ctx, struct ggml_cgraph * gf);
void ggml_metal_graph_optimize(ggml_metal_t ctx, struct ggml_cgraph * gf);
void ggml_metal_set_n_cb (ggml_metal_t ctx, int n_cb);
void ggml_metal_set_abort_callback (ggml_metal_t ctx, ggml_abort_callback abort_callback, void * user_data);
bool ggml_metal_supports_family (ggml_metal_t ctx, int family);
void ggml_metal_capture_next_compute(ggml_metal_t ctx);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,609 @@
#import "ggml-metal-context.h"
#import "ggml-impl.h"
#import "ggml-backend-impl.h"
#import "ggml-metal-impl.h"
#import "ggml-metal-common.h"
#import "ggml-metal-ops.h"
#import <Foundation/Foundation.h>
#import <Metal/Metal.h>
#undef MIN
#undef MAX
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
// max number of MTLCommandBuffer used to submit a graph for processing
#define GGML_METAL_MAX_COMMAND_BUFFERS 8
struct ggml_metal_command_buffer {
id<MTLCommandBuffer> obj;
};
struct ggml_metal {
ggml_metal_device_t dev;
ggml_metal_library_t lib;
dispatch_queue_t d_queue;
// additional, inference-time compiled pipelines
ggml_metal_pipelines_t pipelines_ext;
bool use_fusion;
bool use_concurrency;
bool use_graph_optimize;
int debug_graph;
int debug_fusion;
// how many times a given op was fused
uint64_t fuse_cnt[GGML_OP_COUNT];
// capture state
bool capture_next_compute;
bool capture_started;
id<MTLCaptureScope> capture_scope;
// command buffer state
int n_cb; // number of extra threads used to submit the command buffers
int n_nodes_0; // number of nodes submitted by the main thread
int n_nodes_1; // remaining number of nodes submitted by the n_cb threads
int n_nodes_per_cb;
struct ggml_cgraph * gf;
// the callback given to the thread pool
void (^encode_async)(size_t ith);
// n_cb command buffers + 1 used by the main thread
struct ggml_metal_command_buffer cmd_bufs[GGML_METAL_MAX_COMMAND_BUFFERS + 1];
// extra command buffers for things like getting, setting and copying tensors
NSMutableArray * cmd_bufs_ext;
// the last command buffer queued into the Metal queue with operations relevant to the current Metal backend
id<MTLCommandBuffer> cmd_buf_last;
// abort ggml_metal_graph_compute if callback returns true
ggml_abort_callback abort_callback;
void * abort_callback_data;
};
ggml_metal_t ggml_metal_init(ggml_metal_device_t dev) {
GGML_LOG_INFO("%s: allocating\n", __func__);
#if TARGET_OS_OSX && !GGML_METAL_NDEBUG
// Show all the Metal device instances in the system
NSArray * devices = MTLCopyAllDevices();
for (id<MTLDevice> device in devices) {
GGML_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
}
[devices release]; // since it was created by a *Copy* C method
#endif
// init context
ggml_metal_t res = calloc(1, sizeof(struct ggml_metal));
id<MTLDevice> device = ggml_metal_device_get_obj(dev);
GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
// TODO: would it be better to have one queue for the backend and one queue for the device?
// the graph encoders and async ops would use the backend queue while the sync ops would use the device queue?
//res->queue = [device newCommandQueue]; [TAG_QUEUE_PER_BACKEND]
id<MTLCommandQueue> queue = ggml_metal_device_get_queue(dev);
if (queue == nil) {
GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__);
return NULL;
}
res->dev = dev;
res->lib = ggml_metal_device_get_library(dev);
if (res->lib == NULL) {
GGML_LOG_WARN("%s: the device does not have a precompiled Metal library - this is unexpected\n", __func__);
GGML_LOG_WARN("%s: will try to compile it on the fly\n", __func__);
res->lib = ggml_metal_library_init(dev);
if (res->lib == NULL) {
GGML_LOG_ERROR("%s: error: failed to initialize the Metal library\n", __func__);
free(res);
return NULL;
}
}
//const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev);
res->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
res->use_fusion = getenv("GGML_METAL_FUSION_DISABLE") == nil;
res->use_concurrency = getenv("GGML_METAL_CONCURRENCY_DISABLE") == nil;
{
const char * val = getenv("GGML_METAL_GRAPH_DEBUG");
res->debug_graph = val ? atoi(val) : 0;
}
{
const char * val = getenv("GGML_METAL_FUSION_DEBUG");
res->debug_fusion = val ? atoi(val) : 0;
}
res->use_graph_optimize = true;
if (getenv("GGML_METAL_GRAPH_OPTIMIZE_DISABLE") != NULL) {
res->use_graph_optimize = false;
}
memset(res->fuse_cnt, 0, sizeof(res->fuse_cnt));
GGML_LOG_INFO("%s: use fusion = %s\n", __func__, res->use_fusion ? "true" : "false");
GGML_LOG_INFO("%s: use concurrency = %s\n", __func__, res->use_concurrency ? "true" : "false");
GGML_LOG_INFO("%s: use graph optimize = %s\n", __func__, res->use_graph_optimize ? "true" : "false");
res->capture_next_compute = false;
res->capture_started = false;
res->capture_scope = nil;
res->gf = nil;
res->encode_async = nil;
for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
res->cmd_bufs[i].obj = nil;
}
res->cmd_bufs_ext = [[NSMutableArray alloc] init];
res->cmd_buf_last = nil;
res->pipelines_ext = ggml_metal_pipelines_init();
return res;
}
void ggml_metal_free(ggml_metal_t ctx) {
GGML_LOG_INFO("%s: deallocating\n", __func__);
for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
if (ctx->cmd_bufs[i].obj) {
[ctx->cmd_bufs[i].obj release];
}
}
for (int i = 0; i < (int) ctx->cmd_bufs_ext.count; ++i) {
if (ctx->cmd_bufs_ext[i]) {
[ctx->cmd_bufs_ext[i] release];
}
}
[ctx->cmd_bufs_ext removeAllObjects];
[ctx->cmd_bufs_ext release];
if (ctx->pipelines_ext) {
ggml_metal_pipelines_free(ctx->pipelines_ext);
ctx->pipelines_ext = nil;
}
if (ctx->debug_fusion > 0) {
GGML_LOG_DEBUG("%s: fusion stats:\n", __func__);
for (int i = 0; i < GGML_OP_COUNT; i++) {
if (ctx->fuse_cnt[i] == 0) {
continue;
}
// note: cannot use ggml_log here
GGML_LOG_DEBUG("%s: - %s: %" PRIu64 "\n", __func__, ggml_op_name((enum ggml_op) i), ctx->fuse_cnt[i]);
}
}
Block_release(ctx->encode_async);
//[ctx->queue release]; // [TAG_QUEUE_PER_BACKEND]
dispatch_release(ctx->d_queue);
free(ctx);
}
void ggml_metal_synchronize(ggml_metal_t ctx) {
// wait for any backend operations to finish
if (ctx->cmd_buf_last) {
[ctx->cmd_buf_last waitUntilCompleted];
ctx->cmd_buf_last = nil;
}
// check status of all command buffers
{
const int n_cb = ctx->n_cb;
for (int cb_idx = 0; cb_idx <= n_cb; ++cb_idx) {
id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[cb_idx].obj;
if (!cmd_buf) {
continue;
}
MTLCommandBufferStatus status = [cmd_buf status];
if (status != MTLCommandBufferStatusCompleted) {
GGML_LOG_ERROR("%s: error: command buffer %d failed with status %d\n", __func__, cb_idx, (int) status);
if (status == MTLCommandBufferStatusError) {
GGML_LOG_ERROR("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
}
GGML_ABORT("fatal error");
}
}
}
// release any completed extra command buffers
if (ctx->cmd_bufs_ext.count > 0) {
for (size_t i = 0; i < ctx->cmd_bufs_ext.count; ++i) {
id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs_ext[i];
MTLCommandBufferStatus status = [cmd_buf status];
if (status != MTLCommandBufferStatusCompleted) {
GGML_LOG_ERROR("%s: error: command buffer %d failed with status %d\n", __func__, (int) i, (int) status);
if (status == MTLCommandBufferStatusError) {
GGML_LOG_ERROR("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
}
GGML_ABORT("fatal error");
}
[cmd_buf release];
}
[ctx->cmd_bufs_ext removeAllObjects];
}
}
static struct ggml_metal_buffer_id ggml_metal_get_buffer_id(const struct ggml_tensor * t) {
if (!t) {
return (struct ggml_metal_buffer_id) { nil, 0 };
}
ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
return ggml_metal_buffer_get_id(buffer->context, t);
}
void ggml_metal_set_tensor_async(ggml_metal_t ctx, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
@autoreleasepool {
// wrap the source data into a Metal buffer
id<MTLDevice> device = ggml_metal_device_get_obj(ctx->dev);
id<MTLBuffer> buf_src = [device newBufferWithBytes:data
length:size
options:MTLResourceStorageModeShared];
GGML_ASSERT(buf_src);
struct ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(tensor);
if (bid_dst.metal == nil) {
GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name);
}
bid_dst.offs += offset;
// queue the copy operation into the queue of the Metal context
// this will be queued at the end, after any currently ongoing GPU operations
id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx->dev);
id<MTLCommandBuffer> cmd_buf = [queue commandBuffer];
id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
[encoder copyFromBuffer:buf_src
sourceOffset:0
toBuffer:bid_dst.metal
destinationOffset:bid_dst.offs
size:size];
[encoder endEncoding];
[cmd_buf commit];
[buf_src release];
// do not wait here for completion
//[cmd_buf waitUntilCompleted];
// instead, remember a reference to the command buffer and wait for it later if needed
[ctx->cmd_bufs_ext addObject:cmd_buf];
ctx->cmd_buf_last = cmd_buf;
[cmd_buf retain];
}
}
void ggml_metal_get_tensor_async(ggml_metal_t ctx, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
@autoreleasepool {
id<MTLDevice> device = ggml_metal_device_get_obj(ctx->dev);
id<MTLBuffer> buf_dst = [device newBufferWithBytesNoCopy:data
length:size
options:MTLResourceStorageModeShared
deallocator:nil];
GGML_ASSERT(buf_dst);
struct ggml_metal_buffer_id bid_src = ggml_metal_get_buffer_id(tensor);
if (bid_src.metal == nil) {
GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name);
}
bid_src.offs += offset;
// queue the copy operation into the queue of the Metal context
// this will be queued at the end, after any currently ongoing GPU operations
id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx->dev);
id<MTLCommandBuffer> cmd_buf = [queue commandBuffer];
id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
[encoder copyFromBuffer:bid_src.metal
sourceOffset:bid_src.offs
toBuffer:buf_dst
destinationOffset:0
size:size];
[encoder endEncoding];
[cmd_buf commit];
[buf_dst release];
// do not wait here for completion
//[cmd_buf waitUntilCompleted];
// instead, remember a reference to the command buffer and wait for it later if needed
[ctx->cmd_bufs_ext addObject:cmd_buf];
ctx->cmd_buf_last = cmd_buf;
[cmd_buf retain];
}
}
enum ggml_status ggml_metal_graph_compute(ggml_metal_t ctx, struct ggml_cgraph * gf) {
// number of nodes encoded by the main thread (empirically determined)
const int n_main = 64;
// number of threads in addition to the main thread
const int n_cb = ctx->n_cb;
// keep the memory wired
ggml_metal_device_rsets_keep_alive(ctx->dev);
// submit the ggml compute graph to the GPU by creating command buffers and encoding the ops in them
// the first n_nodes_0 are encoded and submitted for processing directly by the calling thread
// while these nodes are processing, we start n_cb threads to enqueue the rest of the nodes
// each thread creates it's own command buffer and enqueues the ops in parallel
//
// tests on M1 Pro and M2 Ultra using LLaMA models, show that optimal values for n_cb are 1 or 2
@autoreleasepool {
ctx->gf = gf;
ctx->n_nodes_0 = MIN(n_main, gf->n_nodes);
ctx->n_nodes_1 = gf->n_nodes - ctx->n_nodes_0;
ctx->n_nodes_per_cb = (ctx->n_nodes_1 + ctx->n_cb - 1) / ctx->n_cb;
const bool use_capture = ctx->capture_next_compute;
if (use_capture) {
ctx->capture_next_compute = false;
// make sure all previous computations have finished before starting the capture
if (ctx->cmd_buf_last) {
[ctx->cmd_buf_last waitUntilCompleted];
ctx->cmd_buf_last = nil;
}
if (!ctx->capture_started) {
// create capture scope
id<MTLDevice> device = ggml_metal_device_get_obj(ctx->dev);
ctx->capture_scope = [[MTLCaptureManager sharedCaptureManager] newCaptureScopeWithDevice:device];
MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new];
descriptor.captureObject = ctx->capture_scope;
descriptor.destination = MTLCaptureDestinationGPUTraceDocument;
descriptor.outputURL = [NSURL fileURLWithPath:[NSString stringWithFormat:@"/tmp/perf-metal.gputrace"]];
NSError * error = nil;
if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) {
GGML_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]);
} else {
[ctx->capture_scope beginScope];
ctx->capture_started = true;
}
}
}
// short-hand
id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx->dev);
// the main thread commits the first few commands immediately
// cmd_buf[n_cb]
{
id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
[cmd_buf retain];
if (ctx->cmd_bufs[n_cb].obj) {
[ctx->cmd_bufs[n_cb].obj release];
}
ctx->cmd_bufs[n_cb].obj = cmd_buf;
[cmd_buf enqueue];
ctx->encode_async(n_cb);
}
// remember the command buffer for the next iteration
ctx->cmd_buf_last = ctx->cmd_bufs[n_cb].obj;
// prepare the rest of the command buffers asynchronously (optional)
// cmd_buf[0.. n_cb)
for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
[cmd_buf retain];
if (ctx->cmd_bufs[cb_idx].obj) {
[ctx->cmd_bufs[cb_idx].obj release];
}
ctx->cmd_bufs[cb_idx].obj = cmd_buf;
// always enqueue the first two command buffers
// enqueue all of the command buffers if we don't need to abort
if (cb_idx < 2 || ctx->abort_callback == NULL) {
[cmd_buf enqueue];
// update the pointer to the last queued command buffer
// this is needed to implement synchronize()
ctx->cmd_buf_last = cmd_buf;
}
}
dispatch_apply(n_cb, ctx->d_queue, ctx->encode_async);
// for debugging: block until graph is computed
//[ctx->cmd_buf_last waitUntilCompleted];
// enter here only when capturing in order to wait for all computation to finish
// otherwise, we leave the graph to compute asynchronously
if (!use_capture && ctx->capture_started) {
// wait for completion and check status of each command buffer
// needed to detect if the device ran out-of-memory for example (#1881)
{
id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[n_cb].obj;
[cmd_buf waitUntilCompleted];
MTLCommandBufferStatus status = [cmd_buf status];
if (status != MTLCommandBufferStatusCompleted) {
GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, n_cb, status);
if (status == MTLCommandBufferStatusError) {
GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
}
return GGML_STATUS_FAILED;
}
}
for (int i = 0; i < n_cb; ++i) {
id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[i].obj;
[cmd_buf waitUntilCompleted];
MTLCommandBufferStatus status = [cmd_buf status];
if (status != MTLCommandBufferStatusCompleted) {
GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
if (status == MTLCommandBufferStatusError) {
GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
}
return GGML_STATUS_FAILED;
}
id<MTLCommandBuffer> next_buffer = (i + 1 < n_cb ? ctx->cmd_bufs[i + 1].obj : nil);
if (!next_buffer) {
continue;
}
const bool next_queued = ([next_buffer status] != MTLCommandBufferStatusNotEnqueued);
if (next_queued) {
continue;
}
if (ctx->abort_callback && ctx->abort_callback(ctx->abort_callback_data)) {
GGML_LOG_INFO("%s: command buffer %d aborted", __func__, i);
return GGML_STATUS_ABORTED;
}
[next_buffer commit];
}
[ctx->capture_scope endScope];
[[MTLCaptureManager sharedCaptureManager] stopCapture];
}
}
return GGML_STATUS_SUCCESS;
}
void ggml_metal_graph_optimize(ggml_metal_t ctx, struct ggml_cgraph * gf) {
//const int64_t t_start = ggml_time_us();
if (ctx->use_graph_optimize) {
ggml_graph_optimize(gf);
}
//printf("%s: graph optimize took %.3f ms\n", __func__, (ggml_time_us() - t_start) / 1000.0);
}
void ggml_metal_set_n_cb(ggml_metal_t ctx, int n_cb) {
if (ctx->n_cb != n_cb) {
ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_COMMAND_BUFFERS);
if (ctx->n_cb > 2) {
GGML_LOG_WARN("%s: n_cb = %d, using n_cb > 2 is not recommended and can degrade the performance in some cases\n", __func__, n_cb);
}
}
if (ctx->encode_async) {
Block_release(ctx->encode_async);
}
ctx->encode_async = Block_copy(^(size_t iter) {
const int cb_idx = iter;
const int n_cb_l = ctx->n_cb;
const int n_nodes_0 = ctx->n_nodes_0;
const int n_nodes_1 = ctx->n_nodes_1;
const int n_nodes_per_cb = ctx->n_nodes_per_cb;
int idx_start = 0;
int idx_end = n_nodes_0;
if (cb_idx < n_cb_l) {
idx_start = n_nodes_0 + ( (cb_idx + 0) * n_nodes_per_cb);
idx_end = n_nodes_0 + (MIN((cb_idx == n_cb_l - 1) ? n_nodes_1 : (cb_idx + 1) * n_nodes_per_cb, n_nodes_1));
}
id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[cb_idx].obj;
ggml_metal_op_t ctx_op = ggml_metal_op_init(
ctx->dev,
cmd_buf,
ctx->gf,
idx_start,
idx_end,
ctx->use_fusion,
ctx->use_concurrency,
ctx->capture_next_compute,
ctx->debug_graph,
ctx->debug_fusion);
for (int idx = 0; idx < ggml_metal_op_n_nodes(ctx_op); ++idx) {
const int res = ggml_metal_op_encode(ctx_op, idx);
if (res == 0) {
break;
}
idx += res - 1;
}
ggml_metal_op_free(ctx_op);
if (cb_idx < 2 || ctx->abort_callback == NULL) {
[cmd_buf commit];
}
});
}
void ggml_metal_set_abort_callback(ggml_metal_t ctx, ggml_abort_callback abort_callback, void * user_data) {
ctx->abort_callback = abort_callback;
ctx->abort_callback_data = user_data;
}
bool ggml_metal_supports_family(ggml_metal_t ctx, int family) {
GGML_ASSERT(ctx->dev != nil);
id<MTLDevice> device = ggml_metal_device_get_obj(ctx->dev);
return [device supportsFamily:(MTLGPUFamilyApple1 + family - 1)];
}
void ggml_metal_capture_next_compute(ggml_metal_t ctx) {
ctx->capture_next_compute = true;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,273 @@
#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C" {
#endif
struct ggml_metal_buffer_id {
void * metal; // id<MTLBuffer>
size_t offs;
};
typedef struct ggml_metal_device * ggml_metal_device_t;
//
// MTLFunctionConstantValues wrapper
//
typedef struct ggml_metal_cv * ggml_metal_cv_t;
ggml_metal_cv_t ggml_metal_cv_init(void);
void ggml_metal_cv_free(ggml_metal_cv_t cv);
void ggml_metal_cv_set_int16(ggml_metal_cv_t cv, int16_t value, int32_t idx);
void ggml_metal_cv_set_int32(ggml_metal_cv_t cv, int32_t value, int32_t idx);
void ggml_metal_cv_set_bool (ggml_metal_cv_t cv, bool value, int32_t idx);
//
// MTLComputePipelineState wrapper
//
typedef struct ggml_metal_pipeline * ggml_metal_pipeline_t;
ggml_metal_pipeline_t ggml_metal_pipeline_init(void);
void ggml_metal_pipeline_free(ggml_metal_pipeline_t pipeline);
// a collection of pipelines
typedef struct ggml_metal_pipelines * ggml_metal_pipelines_t;
ggml_metal_pipelines_t ggml_metal_pipelines_init(void);
void ggml_metal_pipelines_free(ggml_metal_pipelines_t ppls);
void ggml_metal_pipelines_add(ggml_metal_pipelines_t ppls, const char * name, ggml_metal_pipeline_t pipeline);
ggml_metal_pipeline_t ggml_metal_pipelines_get(ggml_metal_pipelines_t ppls, const char * name);
struct ggml_metal_pipeline_with_params {
ggml_metal_pipeline_t pipeline;
int nsg;
int nr0;
int nr1;
size_t smem;
};
int ggml_metal_pipeline_max_theads_per_threadgroup(struct ggml_metal_pipeline_with_params pipeline);
//
// MTLCommandBuffer wrapper
//
typedef void * ggml_metal_cmd_buf_t;
//
// MTLComputeCommandEncoder wrapper
//
typedef struct ggml_metal_encoder * ggml_metal_encoder_t;
ggml_metal_encoder_t ggml_metal_encoder_init(ggml_metal_cmd_buf_t cmd_buf_raw, bool concurrent);
void ggml_metal_encoder_free(ggml_metal_encoder_t encoder);
void ggml_metal_encoder_debug_group_push(ggml_metal_encoder_t encoder, const char * name);
void ggml_metal_encoder_debug_group_pop (ggml_metal_encoder_t encoder);
void ggml_metal_encoder_set_pipeline(ggml_metal_encoder_t encoder, struct ggml_metal_pipeline_with_params pipeline);
void ggml_metal_encoder_set_bytes (ggml_metal_encoder_t encoder, void * data, size_t size, int idx);
void ggml_metal_encoder_set_buffer(ggml_metal_encoder_t encoder, struct ggml_metal_buffer_id buffer, int idx);
void ggml_metal_encoder_set_threadgroup_memory_size(ggml_metal_encoder_t encoder, size_t size, int idx);
void ggml_metal_encoder_dispatch_threadgroups(ggml_metal_encoder_t encoder, int tg0, int tg1, int tg2, int tptg0, int tptg1, int tptg2);
void ggml_metal_encoder_memory_barrier(ggml_metal_encoder_t encoder);
void ggml_metal_encoder_end_encoding(ggml_metal_encoder_t encoder);
//
// MTLLibrary wrapper
//
typedef struct ggml_metal_library * ggml_metal_library_t;
ggml_metal_library_t ggml_metal_library_init (ggml_metal_device_t dev);
ggml_metal_library_t ggml_metal_library_init_from_source(ggml_metal_device_t dev, const char * source, bool verbose);
void ggml_metal_library_free(ggml_metal_library_t lib);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline (ggml_metal_library_t lib, const char * name);
struct ggml_metal_pipeline_with_params ggml_metal_library_compile_pipeline(ggml_metal_library_t lib, const char * base, const char * name, ggml_metal_cv_t cv);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_base (ggml_metal_library_t lib, enum ggml_op op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cpy (ggml_metal_library_t lib, enum ggml_type tsrc, enum ggml_type tdst);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pool_2d (ggml_metal_library_t lib, const struct ggml_tensor * op, enum ggml_op_pool op_pool);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_get_rows (ggml_metal_library_t lib, enum ggml_type tsrc);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_set_rows (ggml_metal_library_t lib, enum ggml_type tidx, enum ggml_type tdst);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_repeat (ggml_metal_library_t lib, enum ggml_type tsrc);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_unary (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_glu (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum_rows (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_blk (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_add (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_tri (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_soft_max (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv_batched (ggml_metal_library_t lib, const struct ggml_tensor * op, int ssm_conv_bs);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_scan (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rwkv (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_ext (ggml_metal_library_t lib, enum ggml_type tsrc0, enum ggml_type tsrc1, int nsg, int nxpsg, int r1ptg);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id_map0 (ggml_metal_library_t lib, int ne02, int ne20);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_id (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argmax (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort_merge (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k_merge (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin (ggml_metal_library_t lib, enum ggml_op op, int32_t n_fuse, bool row);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_l2_norm (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_group_norm (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_norm (ggml_metal_library_t lib, const struct ggml_tensor * op, int32_t n_fuse);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rope (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_im2col (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_1d (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_2d (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_2d (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_upscale (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad_reflect_1d (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_arange (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_timestep_embedding(ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_adamw (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_sgd (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_memset (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_count_equal (ggml_metal_library_t lib, const struct ggml_tensor * op);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_pad(
ggml_metal_library_t lib,
const struct ggml_tensor * op,
bool has_mask,
int32_t ncpsg);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_blk(
ggml_metal_library_t lib,
const struct ggml_tensor * op,
int32_t nqptg,
int32_t ncpsg);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext(
ggml_metal_library_t lib,
const struct ggml_tensor * op,
bool has_mask,
bool has_sinks,
bool has_bias,
bool has_scap,
bool has_kvpad,
int32_t nsg);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec(
ggml_metal_library_t lib,
const struct ggml_tensor * op,
bool has_mask,
bool has_sinks,
bool has_bias,
bool has_scap,
bool has_kvpad,
int32_t nsg,
int32_t nwg);
struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce(
ggml_metal_library_t lib,
const struct ggml_tensor * op,
int32_t dv,
int32_t nwg);
// MTLResidencySet wrapper
typedef void * ggml_metal_rset_t;
// a collection of residency sets (non-owning)
typedef struct ggml_metal_rsets * ggml_metal_rsets_t;
ggml_metal_rsets_t ggml_metal_rsets_init(void);
void ggml_metal_rsets_free(ggml_metal_rsets_t rsets);
//
// device
//
struct ggml_metal_device_props {
char name[128];
size_t max_buffer_size;
size_t max_working_set_size;
size_t max_theadgroup_memory_size;
bool has_simdgroup_reduction;
bool has_simdgroup_mm;
bool has_unified_memory;
bool has_bfloat;
bool has_tensor;
bool use_residency_sets;
bool use_shared_buffers;
bool supports_gpu_family_apple7;
int op_offload_min_batch_size;
};
ggml_metal_device_t ggml_metal_device_init(void);
void ggml_metal_device_free(ggml_metal_device_t dev);
// return a singleton that is automatically destroyed when the program exits
ggml_metal_device_t ggml_metal_device_get(void);
void * ggml_metal_device_get_obj (ggml_metal_device_t dev); // id<MTLDevice>
void * ggml_metal_device_get_queue(ggml_metal_device_t dev); // id<MTLCommandQueue>
ggml_metal_library_t ggml_metal_device_get_library(ggml_metal_device_t dev);
void ggml_metal_device_rsets_add(ggml_metal_device_t dev, ggml_metal_rset_t rset);
void ggml_metal_device_rsets_rm (ggml_metal_device_t dev, ggml_metal_rset_t rset);
void ggml_metal_device_rsets_keep_alive(ggml_metal_device_t dev);
void ggml_metal_device_get_memory(ggml_metal_device_t dev, size_t * free, size_t * total);
bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_tensor * op);
const struct ggml_metal_device_props * ggml_metal_device_get_props(ggml_metal_device_t dev);
//
// device buffers
//
typedef struct ggml_metal_buffer * ggml_metal_buffer_t;
ggml_metal_buffer_t ggml_metal_buffer_init(ggml_metal_device_t dev, size_t size, bool shared);
ggml_metal_buffer_t ggml_metal_buffer_map (ggml_metal_device_t dev, void * ptr, size_t size, size_t max_tensor_size);
void ggml_metal_buffer_free (ggml_metal_buffer_t buf);
void * ggml_metal_buffer_get_base (ggml_metal_buffer_t buf);
bool ggml_metal_buffer_is_shared(ggml_metal_buffer_t buf);
void ggml_metal_buffer_memset_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
void ggml_metal_buffer_set_tensor (ggml_metal_buffer_t buf, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
void ggml_metal_buffer_get_tensor (ggml_metal_buffer_t buf, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
void ggml_metal_buffer_clear (ggml_metal_buffer_t buf, uint8_t value);
// finds the Metal buffer that contains the tensor data on the GPU device
// the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
// Metal buffer based on the host memory pointer
//
struct ggml_metal_buffer_id ggml_metal_buffer_get_id(ggml_metal_buffer_t buf, const struct ggml_tensor * t);
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,944 @@
#ifndef GGML_METAL_IMPL
#define GGML_METAL_IMPL
// kernel parameters for mat-vec threadgroups
//
// N_R0: number of src0 rows to process per simdgroup
// N_SG: number of simdgroups per threadgroup
//
// TODO: for optimal performance, become function of the device and work size
#define N_R0_Q4_0 4
#define N_SG_Q4_0 2
#define N_R0_Q4_1 4
#define N_SG_Q4_1 2
#define N_R0_Q5_0 4
#define N_SG_Q5_0 2
#define N_R0_Q5_1 4
#define N_SG_Q5_1 2
#define N_R0_Q8_0 2
#define N_SG_Q8_0 4
#define N_R0_MXFP4 2
#define N_SG_MXFP4 2
#define N_R0_Q2_K 4
#define N_SG_Q2_K 2
#define N_R0_Q3_K 2
#define N_SG_Q3_K 2
#define N_R0_Q4_K 2
#define N_SG_Q4_K 2
#define N_R0_Q5_K 2
#define N_SG_Q5_K 2
#define N_R0_Q6_K 2
#define N_SG_Q6_K 2
#define N_R0_IQ1_S 4
#define N_SG_IQ1_S 2
#define N_R0_IQ1_M 4
#define N_SG_IQ1_M 2
#define N_R0_IQ2_XXS 4
#define N_SG_IQ2_XXS 2
#define N_R0_IQ2_XS 4
#define N_SG_IQ2_XS 2
#define N_R0_IQ2_S 4
#define N_SG_IQ2_S 2
#define N_R0_IQ3_XXS 4
#define N_SG_IQ3_XXS 2
#define N_R0_IQ3_S 4
#define N_SG_IQ3_S 2
#define N_R0_IQ4_NL 2
#define N_SG_IQ4_NL 2
#define N_R0_IQ4_XS 2
#define N_SG_IQ4_XS 2
// function constants offsets
#define FC_FLASH_ATTN_EXT_PAD 100
#define FC_FLASH_ATTN_EXT_BLK 200
#define FC_FLASH_ATTN_EXT 300
#define FC_FLASH_ATTN_EXT_VEC 400
#define FC_FLASH_ATTN_EXT_VEC_REDUCE 500
#define FC_MUL_MV 600
#define FC_MUL_MM 700
#define FC_ROPE 800
#define FC_SSM_CONV 900
#define FC_COUNT_EQUAL 1000
// op-specific constants
#define OP_FLASH_ATTN_EXT_NQPTG 8
#define OP_FLASH_ATTN_EXT_NCPSG 64
#define OP_FLASH_ATTN_EXT_VEC_NQPTG 1
#define OP_FLASH_ATTN_EXT_VEC_NCPSG 32
// kernel argument structs
//
// - element counters (e.g. ne00) typically use int32_t to reduce register usage
// however, be careful from int overflows when using those in the kernel implementation
//
// - strides (e.g. nb00) use uint64_t
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne10;
int32_t ne11;
int32_t ne12;
int32_t ne13;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
int32_t ne0;
int32_t ne1;
int32_t ne2;
int32_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
int32_t dim;
} ggml_metal_kargs_concat;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne10;
int32_t ne11;
int32_t ne12;
int32_t ne13;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
int32_t ne0;
int32_t ne1;
int32_t ne2;
int32_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
uint64_t offs;
uint64_t o1[8];
} ggml_metal_kargs_bin;
typedef struct {
int64_t ne0;
int64_t ne1;
size_t nb01;
size_t nb02;
size_t nb11;
size_t nb21;
} ggml_metal_kargs_add_id;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne0;
int32_t ne1;
int32_t ne2;
int32_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
} ggml_metal_kargs_repeat;
typedef struct {
float scale;
float bias;
} ggml_metal_kargs_scale;
typedef struct {
float val;
} ggml_metal_kargs_fill;
typedef struct {
float min;
float max;
} ggml_metal_kargs_clamp;
typedef struct {
int64_t nk0;
int64_t ne00;
int64_t ne01;
int64_t ne02;
int64_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int64_t ne0;
int64_t ne1;
int64_t ne2;
int64_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
} ggml_metal_kargs_cpy;
typedef struct {
int64_t ne10;
int64_t ne11;
int64_t ne12;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
uint64_t offs;
bool inplace;
} ggml_metal_kargs_set;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne0;
int32_t ne1;
int32_t ne2;
int32_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
int32_t n_past;
int32_t n_dims;
int32_t n_ctx_orig;
float freq_base;
float freq_scale;
float ext_factor;
float attn_factor;
float beta_fast;
float beta_slow;
int32_t sect_0;
int32_t sect_1;
int32_t sect_2;
int32_t sect_3;
bool src2;
} ggml_metal_kargs_rope;
typedef struct {
int32_t ne11;
int32_t ne_12_2; // assume K and V are same shape
int32_t ne_12_3;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
uint64_t nb21;
uint64_t nb22;
uint64_t nb23;
int32_t ne31;
int32_t ne32;
int32_t ne33;
uint64_t nb31;
uint64_t nb32;
uint64_t nb33;
} ggml_metal_kargs_flash_attn_ext_pad;
typedef struct {
int32_t ne01;
int32_t ne30;
int32_t ne31;
int32_t ne32;
int32_t ne33;
uint64_t nb31;
uint64_t nb32;
uint64_t nb33;
} ggml_metal_kargs_flash_attn_ext_blk;
typedef struct {
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne11;
int32_t ne_12_2; // assume K and V are same shape
int32_t ne_12_3;
int32_t ns10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
int32_t ns20;
uint64_t nb21;
uint64_t nb22;
uint64_t nb23;
int32_t ne31;
int32_t ne32;
int32_t ne33;
uint64_t nb31;
uint64_t nb32;
uint64_t nb33;
int32_t ne1;
int32_t ne2;
int32_t ne3;
float scale;
float max_bias;
float m0;
float m1;
int32_t n_head_log2;
float logit_softcap;
} ggml_metal_kargs_flash_attn_ext;
typedef struct {
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne11;
int32_t ne_12_2; // assume K and V are same shape
int32_t ne_12_3;
int32_t ns10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
int32_t ns20;
uint64_t nb21;
uint64_t nb22;
uint64_t nb23;
int32_t ne31;
int32_t ne32;
int32_t ne33;
uint64_t nb31;
uint64_t nb32;
uint64_t nb33;
int32_t ne1;
int32_t ne2;
int32_t ne3;
float scale;
float max_bias;
float m0;
float m1;
int32_t n_head_log2;
float logit_softcap;
} ggml_metal_kargs_flash_attn_ext_vec;
typedef struct {
int32_t nrows;
} ggml_metal_kargs_flash_attn_ext_vec_reduce;
typedef struct {
int32_t ne00;
int32_t ne02;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne12;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
int32_t ne0;
int32_t ne1;
int16_t r2;
int16_t r3;
} ggml_metal_kargs_mul_mm;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne10;
int32_t ne11;
int32_t ne12;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
int32_t ne0;
int32_t ne1;
int32_t nr0;
int16_t r2;
int16_t r3;
} ggml_metal_kargs_mul_mv;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne10;
int32_t ne11;
int32_t ne12;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
int32_t ne0;
int32_t ne1;
int16_t r2;
int16_t r3;
} ggml_metal_kargs_mul_mv_ext;
typedef struct {
int32_t ne02;
int32_t ne10;
int32_t ne11; // n_expert_used (bcast)
uint64_t nb11;
uint64_t nb12;
int32_t ne21; // n_tokens
int32_t ne20; // n_expert_used
uint64_t nb21;
} ggml_metal_kargs_mul_mm_id_map0;
typedef struct {
int32_t ne00;
int32_t ne02;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne11;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
int32_t ne20;
int32_t ne21;
int32_t ne0;
int32_t ne1;
int16_t r2;
int16_t r3;
} ggml_metal_kargs_mul_mm_id;
typedef struct {
int32_t nei0;
int32_t nei1;
uint64_t nbi1;
int32_t ne00;
int32_t ne01;
int32_t ne02;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
int32_t ne10;
int32_t ne11;
int32_t ne12;
int32_t ne13;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
int32_t ne0;
int32_t ne1;
uint64_t nb1;
int32_t nr0;
} ggml_metal_kargs_mul_mv_id;
// NORM
// RMS_NORM
typedef struct {
int32_t ne00;
int32_t ne00_t;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
float eps;
int32_t nef1[3];
int32_t nef2[3];
int32_t nef3[3];
uint64_t nbf1[3];
uint64_t nbf2[3];
uint64_t nbf3[3];
} ggml_metal_kargs_norm;
typedef struct {
int32_t ne00;
int32_t ne00_4;
uint64_t nb01;
float eps;
} ggml_metal_kargs_l2_norm;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
int32_t ngrp;
float eps;
} ggml_metal_kargs_group_norm;
typedef struct {
int32_t IC;
int32_t IL;
int32_t K;
int32_t s0;
uint64_t nb0;
uint64_t nb1;
} ggml_metal_kargs_conv_transpose_1d;
typedef struct {
int32_t IC;
int32_t IH;
int32_t IW;
int32_t KH;
int32_t KW;
int32_t OC;
int32_t s0;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
} ggml_metal_kargs_conv_transpose_2d;
typedef struct {
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
int32_t IW;
int32_t IH;
int32_t KW;
int32_t KH;
int32_t IC;
int32_t OC;
int32_t OW;
int32_t OH;
int32_t N;
int32_t s0;
int32_t s1;
int32_t p0;
int32_t p1;
int32_t d0;
int32_t d1;
} ggml_metal_kargs_conv_2d;
typedef struct {
uint64_t ofs0;
uint64_t ofs1;
int32_t IW;
int32_t IH;
int32_t CHW;
int32_t s0;
int32_t s1;
int32_t p0;
int32_t p1;
int32_t d0;
int32_t d1;
int32_t N;
int32_t KH;
int32_t KW;
int32_t KHW; // KH * KW, pre-computed on CPU to save GPU resources
} ggml_metal_kargs_im2col;
typedef struct{
int32_t ne00;
uint64_t nb01;
int32_t ne10;
uint64_t nb11;
int32_t ne0;
uint64_t nb1;
int32_t i00;
int32_t i10;
float alpha;
float limit;
} ggml_metal_kargs_glu;
typedef struct {
uint64_t np;
} ggml_metal_kargs_sum;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
int64_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int64_t ne0;
int64_t ne1;
int64_t ne2;
int64_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
} ggml_metal_kargs_sum_rows;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
int64_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int64_t net0;
int64_t net1;
int64_t net2;
int64_t net3;
uint64_t nbt0;
uint64_t nbt1;
uint64_t nbt2;
uint64_t nbt3;
bool outb;
} ggml_metal_kargs_cumsum_blk;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
int64_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int64_t net0;
int64_t net1;
int64_t net2;
int64_t net3;
uint64_t nbt0;
uint64_t nbt1;
uint64_t nbt2;
uint64_t nbt3;
} ggml_metal_kargs_cumsum_add;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne11;
int32_t ne12;
int32_t ne13;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
float scale;
float max_bias;
float m0;
float m1;
int32_t n_head_log2;
} ggml_metal_kargs_soft_max;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
int64_t ne10;
int64_t ne11;
uint64_t nb10;
uint64_t nb11;
int64_t ne0;
int64_t ne1;
int64_t ne2;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
} ggml_metal_kargs_ssm_conv;
typedef struct {
int64_t d_state;
int64_t d_inner;
int64_t n_head;
int64_t n_group;
int64_t n_seq_tokens;
int64_t n_seqs;
uint64_t s_off;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t ns12;
uint64_t nb13;
uint64_t nb20;
uint64_t nb21;
uint64_t ns21;
uint64_t nb22;
int64_t ne30;
uint64_t nb31;
uint64_t nb41;
uint64_t nb42;
uint64_t ns42;
uint64_t nb43;
uint64_t nb51;
uint64_t nb52;
uint64_t ns52;
uint64_t nb53;
uint64_t nb0;
} ggml_metal_kargs_ssm_scan;
typedef struct {
int32_t ne00t;
int32_t ne00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne10;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
} ggml_metal_kargs_get_rows;
typedef struct {
int32_t nk0;
int32_t ne01;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne11;
int32_t ne12;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
} ggml_metal_kargs_set_rows;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
int64_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int64_t ne0;
int64_t ne1;
int64_t ne2;
int64_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
float sf0;
float sf1;
float sf2;
float sf3;
} ggml_metal_kargs_upscale;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
int64_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int64_t ne0;
int64_t ne1;
int64_t ne2;
int64_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
} ggml_metal_kargs_pad;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
int64_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int64_t ne0;
int64_t ne1;
int64_t ne2;
int64_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
int32_t p0;
int32_t p1;
} ggml_metal_kargs_pad_reflect_1d;
typedef struct {
uint64_t nb1;
int dim;
int max_period;
} ggml_metal_kargs_timestep_embedding;
typedef struct {
float slope;
} ggml_metal_kargs_leaky_relu;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne0;
int32_t ne1;
int32_t ne2;
int32_t ne3;
uint64_t nb0;
uint64_t nb1;
uint64_t nb2;
uint64_t nb3;
} ggml_metal_kargs_tri;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne0;
int32_t ne1;
int32_t ne2;
int32_t ne3;
int32_t top_k;
} ggml_metal_kargs_argsort;
typedef struct {
int64_t ne00;
int64_t ne01;
int64_t ne02;
int64_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
int32_t ne0;
int32_t ne1;
int32_t ne2;
int32_t ne3;
int32_t top_k;
int32_t len;
} ggml_metal_kargs_argsort_merge;
typedef struct {
int64_t ne0;
float start;
float step;
} ggml_metal_kargs_arange;
typedef struct {
int64_t val;
} ggml_metal_kargs_memset;
typedef struct {
int32_t ne00;
int32_t ne01;
int32_t ne02;
int32_t ne03;
uint64_t nb00;
uint64_t nb01;
uint64_t nb02;
uint64_t nb03;
uint64_t nb10;
uint64_t nb11;
uint64_t nb12;
uint64_t nb13;
} ggml_metal_kargs_count_equal;
typedef struct {
int32_t k0;
int32_t k1;
int32_t s0;
int32_t s1;
int32_t p0;
int32_t p1;
int64_t IH;
int64_t IW;
int64_t OH;
int64_t OW;
int64_t np;
} ggml_metal_kargs_pool_2d;
typedef struct {
int64_t ne00;
uint64_t nb01;
} ggml_metal_kargs_argmax;
typedef struct {
int64_t np;
} ggml_metal_kargs_opt_step_adamw;
typedef struct {
int64_t np;
} ggml_metal_kargs_opt_step_sgd;
#endif // GGML_METAL_IMPL

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,94 @@
#pragma once
#include "ggml-metal-device.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct ggml_metal_op * ggml_metal_op_t;
ggml_metal_op_t ggml_metal_op_init(
ggml_metal_device_t dev,
ggml_metal_cmd_buf_t cmd_buf,
struct ggml_cgraph * gf,
int idx_start,
int idx_end,
bool use_fusion,
bool use_concurrency,
bool use_capture,
int debug_graph,
int debug_fusion);
void ggml_metal_op_free(ggml_metal_op_t ctx);
int ggml_metal_op_n_nodes(ggml_metal_op_t ctx);
int ggml_metal_op_encode(ggml_metal_op_t ctx, int idx);
//
// available ops:
//
// tokens per expert
size_t ggml_metal_op_mul_mat_id_extra_tpe(const struct ggml_tensor * op);
// id map [n_tokens, n_expert]
size_t ggml_metal_op_mul_mat_id_extra_ids(const struct ggml_tensor * op);
// return true if we should use the FA vector kernel for this op
bool ggml_metal_op_flash_attn_ext_use_vec(const struct ggml_tensor * op);
size_t ggml_metal_op_flash_attn_ext_extra_pad(const struct ggml_tensor * op);
size_t ggml_metal_op_flash_attn_ext_extra_blk(const struct ggml_tensor * op);
size_t ggml_metal_op_flash_attn_ext_extra_tmp(const struct ggml_tensor * op);
int ggml_metal_op_concat (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_repeat (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_acc (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_scale (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_fill (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_clamp (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_unary (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_glu (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_sum (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_sum_rows (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_cumsum (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_get_rows (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_set_rows (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_soft_max (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_ssm_conv (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_ssm_scan (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_rwkv (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_cpy (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_pool_2d (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_mul_mat (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_mul_mat_id (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_add_id (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_flash_attn_ext (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_bin (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_l2_norm (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_group_norm (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_norm (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_rope (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_im2col (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_conv_2d (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_conv_transpose_1d (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_conv_transpose_2d (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_upscale (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_pad (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_pad_reflect_1d (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_arange (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_timestep_embedding(ggml_metal_op_t ctx, int idx);
int ggml_metal_op_argmax (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_argsort (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_top_k (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_leaky_relu (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_tri (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_opt_step_adamw (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_opt_step_sgd (ggml_metal_op_t ctx, int idx);
int ggml_metal_op_count_equal (ggml_metal_op_t ctx, int idx);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,724 @@
#include "ggml-metal.h"
#include "ggml-impl.h"
#include "ggml-backend-impl.h"
#include "ggml-metal-device.h"
#include "ggml-metal-context.h"
#include "ggml-metal-ops.h"
// globals
// initialized in ggml_backend_metal_reg
static ggml_backend_reg g_ggml_metal_reg;
static ggml_backend_device g_ggml_metal_device;
////////////////////////////////////////////////////////////////////////////////
// backend interface
////////////////////////////////////////////////////////////////////////////////
// shared buffer
static void ggml_backend_metal_buffer_shared_free_buffer(ggml_backend_buffer_t buffer) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_free(ctx);
}
static void * ggml_backend_metal_buffer_shared_get_base(ggml_backend_buffer_t buffer) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
return ggml_metal_buffer_get_base(ctx);
}
static void ggml_backend_metal_buffer_shared_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_memset_tensor(ctx, tensor, value, offset, size);
}
static void ggml_backend_metal_buffer_shared_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_set_tensor(ctx, tensor, data, offset, size);
}
static void ggml_backend_metal_buffer_shared_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_get_tensor(ctx, tensor, data, offset, size);
}
static bool ggml_backend_metal_buffer_shared_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
GGML_UNUSED(buffer);
GGML_UNUSED(src);
GGML_UNUSED(dst);
return false;
}
static void ggml_backend_metal_buffer_shared_clear(ggml_backend_buffer_t buffer, uint8_t value) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_clear(ctx, value);
}
static ggml_backend_buffer_i ggml_backend_metal_buffer_shared_i = {
/* .free_buffer = */ ggml_backend_metal_buffer_shared_free_buffer,
/* .get_base = */ ggml_backend_metal_buffer_shared_get_base,
/* .init_tensor = */ NULL,
/* .memset_tensor = */ ggml_backend_metal_buffer_shared_memset_tensor,
/* .set_tensor = */ ggml_backend_metal_buffer_shared_set_tensor,
/* .get_tensor = */ ggml_backend_metal_buffer_shared_get_tensor,
/* .cpy_tensor = */ ggml_backend_metal_buffer_shared_cpy_tensor,
/* .clear = */ ggml_backend_metal_buffer_shared_clear,
/* .reset = */ NULL,
};
// private buffer
static void ggml_backend_metal_buffer_private_free_buffer(ggml_backend_buffer_t buffer) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_free(ctx);
}
static void * ggml_backend_metal_buffer_private_get_base(ggml_backend_buffer_t buffer) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
return ggml_metal_buffer_get_base(ctx);
}
static void ggml_backend_metal_buffer_private_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_memset_tensor(ctx, tensor, value, offset, size);
}
static void ggml_backend_metal_buffer_private_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_set_tensor(ctx, tensor, data, offset, size);
}
static void ggml_backend_metal_buffer_private_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_get_tensor(ctx, tensor, data, offset, size);
}
static bool ggml_backend_metal_buffer_private_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
GGML_UNUSED(buffer);
GGML_UNUSED(src);
GGML_UNUSED(dst);
return false;
}
static void ggml_backend_metal_buffer_private_clear(ggml_backend_buffer_t buffer, uint8_t value) {
ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
ggml_metal_buffer_clear(ctx, value);
}
static ggml_backend_buffer_i ggml_backend_metal_buffer_private_i = {
/* .free_buffer = */ ggml_backend_metal_buffer_private_free_buffer,
/* .get_base = */ ggml_backend_metal_buffer_private_get_base,
/* .init_tensor = */ NULL,
/* .memset_tensor = */ ggml_backend_metal_buffer_private_memset_tensor,
/* .set_tensor = */ ggml_backend_metal_buffer_private_set_tensor,
/* .get_tensor = */ ggml_backend_metal_buffer_private_get_tensor,
/* .cpy_tensor = */ ggml_backend_metal_buffer_private_cpy_tensor,
/* .clear = */ ggml_backend_metal_buffer_private_clear,
/* .reset = */ NULL,
};
//
// buffer types
//
// common method for allocating shread or private Metal buffers
static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size, bool shared) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
ggml_metal_buffer_t res = ggml_metal_buffer_init(ctx_dev, size, shared);
ggml_backend_buffer_i buf_i = ggml_metal_buffer_is_shared(res)
? ggml_backend_metal_buffer_shared_i
: ggml_backend_metal_buffer_private_i;
return ggml_backend_buffer_init(buft, buf_i, res, size);
}
static size_t ggml_backend_metal_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
size_t res = ggml_nbytes(tensor);
// some operations require additional memory for fleeting data:
switch (tensor->op) {
case GGML_OP_MUL_MAT_ID:
{
res += ggml_metal_op_mul_mat_id_extra_tpe(tensor);
res += ggml_metal_op_mul_mat_id_extra_ids(tensor);
} break;
case GGML_OP_FLASH_ATTN_EXT:
{
res += ggml_metal_op_flash_attn_ext_extra_pad(tensor);
res += ggml_metal_op_flash_attn_ext_extra_blk(tensor);
res += ggml_metal_op_flash_attn_ext_extra_tmp(tensor);
} break;
case GGML_OP_CUMSUM:
case GGML_OP_ARGSORT:
{
res *= 2;
} break;
case GGML_OP_TOP_K:
{
res = 2*sizeof(int32_t)*ggml_nelements(tensor->src[0]);
} break;
default:
break;
}
return res;
GGML_UNUSED(buft);
}
// default (shared) buffer type
static const char * ggml_backend_metal_buffer_type_shared_get_name(ggml_backend_buffer_type_t buft) {
return "Metal";
GGML_UNUSED(buft);
}
static ggml_backend_buffer_t ggml_backend_metal_buffer_type_shared_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true);
}
static size_t ggml_backend_metal_buffer_type_shared_get_alignment(ggml_backend_buffer_type_t buft) {
return 32;
GGML_UNUSED(buft);
}
static size_t ggml_backend_metal_buffer_type_shared_get_max_size(ggml_backend_buffer_type_t buft) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
}
static size_t ggml_backend_metal_buffer_type_shared_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
}
static bool ggml_backend_metal_buffer_type_shared_is_host(ggml_backend_buffer_type_t buft) {
return false;
GGML_UNUSED(buft);
}
static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_shared(void) {
static ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
/* .iface = */ {
/* .get_name = */ ggml_backend_metal_buffer_type_shared_get_name,
/* .alloc_buffer = */ ggml_backend_metal_buffer_type_shared_alloc_buffer,
/* .get_alignment = */ ggml_backend_metal_buffer_type_shared_get_alignment,
/* .get_max_size = */ ggml_backend_metal_buffer_type_shared_get_max_size,
/* .get_alloc_size = */ ggml_backend_metal_buffer_type_shared_get_alloc_size,
/* .is_host = */ ggml_backend_metal_buffer_type_shared_is_host,
},
/* .device = */ &g_ggml_metal_device,
/* .context = */ NULL,
};
return &ggml_backend_buffer_type_metal;
}
// default (private) buffer type
static const char * ggml_backend_metal_buffer_type_private_get_name(ggml_backend_buffer_type_t buft) {
return "Metal_Private";
GGML_UNUSED(buft);
}
static ggml_backend_buffer_t ggml_backend_metal_buffer_type_private_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, false);
}
static size_t ggml_backend_metal_buffer_type_private_get_alignment(ggml_backend_buffer_type_t buft) {
return 32;
GGML_UNUSED(buft);
}
static size_t ggml_backend_metal_buffer_type_private_get_max_size(ggml_backend_buffer_type_t buft) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
}
static size_t ggml_backend_metal_buffer_type_private_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
}
static bool ggml_backend_metal_buffer_type_private_is_host(ggml_backend_buffer_type_t buft) {
return false;
GGML_UNUSED(buft);
}
static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_private(void) {
static ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
/* .iface = */ {
/* .get_name = */ ggml_backend_metal_buffer_type_private_get_name,
/* .alloc_buffer = */ ggml_backend_metal_buffer_type_private_alloc_buffer,
/* .get_alignment = */ ggml_backend_metal_buffer_type_private_get_alignment,
/* .get_max_size = */ ggml_backend_metal_buffer_type_private_get_max_size,
/* .get_alloc_size = */ ggml_backend_metal_buffer_type_private_get_alloc_size,
/* .is_host = */ ggml_backend_metal_buffer_type_private_is_host,
},
/* .device = */ &g_ggml_metal_device,
/* .context = */ NULL,
};
return &ggml_backend_buffer_type_metal;
}
// mapped buffer type
static const char * ggml_backend_metal_buffer_type_mapped_get_name(ggml_backend_buffer_type_t buft) {
return "Metal_Mapped";
GGML_UNUSED(buft);
}
static ggml_backend_buffer_t ggml_backend_metal_buffer_type_mapped_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
// for mapped buffers, prefer shared memory
return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true);
}
static size_t ggml_backend_metal_buffer_type_mapped_get_alignment(ggml_backend_buffer_type_t buft) {
return 32;
GGML_UNUSED(buft);
}
static size_t ggml_backend_metal_buffer_type_mapped_get_max_size(ggml_backend_buffer_type_t buft) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
}
static size_t ggml_backend_metal_buffer_type_mapped_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
}
static bool ggml_backend_metal_buffer_type_mapped_is_host(ggml_backend_buffer_type_t buft) {
return false;
GGML_UNUSED(buft);
}
static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_mapped(void) {
// note: not obvious, but this buffer type still needs to implement .alloc_buffer:
// https://github.com/ggml-org/llama.cpp/pull/15832#discussion_r2333177099
static ggml_backend_buffer_type ggml_backend_buffer_type_mapped_metal = {
/* .iface = */ {
/* .get_name = */ ggml_backend_metal_buffer_type_mapped_get_name,
/* .alloc_buffer = */ ggml_backend_metal_buffer_type_mapped_alloc_buffer,
/* .get_alignment = */ ggml_backend_metal_buffer_type_mapped_get_alignment,
/* .get_max_size = */ ggml_backend_metal_buffer_type_mapped_get_max_size,
/* .get_alloc_size = */ ggml_backend_metal_buffer_type_mapped_get_alloc_size,
/* .is_host = */ ggml_backend_metal_buffer_type_mapped_is_host,
},
/* .device = */ &g_ggml_metal_device,
/* .context = */ NULL,
};
return &ggml_backend_buffer_type_mapped_metal;
}
// backend
static const char * ggml_backend_metal_name(ggml_backend_t backend) {
return "Metal";
GGML_UNUSED(backend);
}
static void ggml_backend_metal_free(ggml_backend_t backend) {
ggml_metal_t ctx = (ggml_metal_t)backend->context;
// wait for any ongoing async operations to finish
ggml_metal_synchronize(ctx);
ggml_metal_free(ctx);
free(backend);
}
static void ggml_backend_metal_synchronize(ggml_backend_t backend) {
ggml_metal_t ctx = (ggml_metal_t)backend->context;
ggml_metal_synchronize(ctx);
}
static void ggml_backend_metal_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
ggml_metal_t ctx = (ggml_metal_t)backend->context;
ggml_metal_set_tensor_async(ctx, tensor, data, offset, size);
}
static void ggml_backend_metal_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
ggml_metal_t ctx = (ggml_metal_t)backend->context;
ggml_metal_get_tensor_async(ctx, tensor, data, offset, size);
}
static bool ggml_backend_metal_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) {
return false;
GGML_UNUSED(backend_src);
GGML_UNUSED(backend_dst);
GGML_UNUSED(src);
GGML_UNUSED(dst);
}
static enum ggml_status ggml_backend_metal_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
ggml_metal_t ctx = (ggml_metal_t)backend->context;
return ggml_metal_graph_compute(ctx, cgraph);
}
static void ggml_backend_metal_graph_optimize(ggml_backend_t backend, ggml_cgraph * cgraph) {
ggml_metal_t ctx = (ggml_metal_t)backend->context;
ggml_metal_graph_optimize(ctx, cgraph);
}
static void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
GGML_ASSERT(ggml_backend_is_metal(backend));
ggml_metal_t ctx = (ggml_metal_t)backend->context;
ggml_metal_set_n_cb(ctx, n_cb);
}
static ggml_backend_i ggml_backend_metal_i = {
/* .get_name = */ ggml_backend_metal_name,
/* .free = */ ggml_backend_metal_free,
/* .set_tensor_async = */ ggml_backend_metal_set_tensor_async,
/* .get_tensor_async = */ ggml_backend_metal_get_tensor_async,
/* .cpy_tensor_async = */ ggml_backend_metal_cpy_tensor_async, // only needed for multi-GPU setups
/* .synchronize = */ ggml_backend_metal_synchronize,
/* .graph_plan_create = */ NULL,
/* .graph_plan_free = */ NULL,
/* .graph_plan_update = */ NULL,
/* .graph_plan_compute = */ NULL,
/* .graph_compute = */ ggml_backend_metal_graph_compute,
// the events API is needed only for multi-GPU setups, so likely no need to implement it for Metal
// in any case, these docs seem relevant if we ever decide to implement it:
// https://developer.apple.com/documentation/metal/mtlcommandbuffer#Synchronizing-Passes-with-Events
/* .event_record = */ NULL,
/* .event_wait = */ NULL,
/* .graph_optimize = */ ggml_backend_metal_graph_optimize,
};
static ggml_guid_t ggml_backend_metal_guid(void) {
static ggml_guid guid = { 0x81, 0xa1, 0x8b, 0x1e, 0x71, 0xec, 0x79, 0xed, 0x2b, 0x85, 0xdc, 0x8a, 0x61, 0x98, 0x30, 0xe6 };
return &guid;
}
ggml_backend_t ggml_backend_metal_init(void) {
ggml_backend_dev_t dev = ggml_backend_reg_dev_get(ggml_backend_metal_reg(), 0);
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
ggml_metal_t ctx = ggml_metal_init(ctx_dev);
if (ctx == NULL) {
GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
return NULL;
}
ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend));
*backend = {
/* .guid = */ ggml_backend_metal_guid(),
/* .interface = */ ggml_backend_metal_i,
/* .device = */ dev,
/* .context = */ ctx,
};
ggml_backend_metal_set_n_cb(backend, 1);
return backend;
}
bool ggml_backend_is_metal(ggml_backend_t backend) {
return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_metal_guid());
}
void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data) {
GGML_ASSERT(ggml_backend_is_metal(backend));
ggml_metal_t ctx = (ggml_metal_t)backend->context;
ggml_metal_set_abort_callback(ctx, abort_callback, user_data);
}
bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
GGML_ASSERT(ggml_backend_is_metal(backend));
ggml_metal_t ctx = (ggml_metal_t)backend->context;
return ggml_metal_supports_family(ctx, family);
}
void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
GGML_ASSERT(ggml_backend_is_metal(backend));
ggml_metal_t ctx = (ggml_metal_t)backend->context;
ggml_metal_capture_next_compute(ctx);
}
// backend device
static const char * ggml_backend_metal_device_get_name(ggml_backend_dev_t dev) {
return "Metal";
GGML_UNUSED(dev);
}
static const char * ggml_backend_metal_device_get_description(ggml_backend_dev_t dev) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
return ggml_metal_device_get_props(ctx_dev)->name;
}
static void ggml_backend_metal_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
ggml_metal_device_get_memory(ctx_dev, free, total);
}
static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backend_dev_t dev) {
return GGML_BACKEND_DEVICE_TYPE_GPU;
GGML_UNUSED(dev);
}
static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
props->name = ggml_backend_metal_device_get_name(dev);
props->description = ggml_backend_metal_device_get_description(dev);
props->type = ggml_backend_metal_device_get_type(dev);
ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total);
props->caps = {
/* .async = */ true,
/* .host_buffer = */ false,
/* .buffer_from_host_ptr = */ true,
/* .events = */ false,
};
}
static ggml_backend_t ggml_backend_metal_device_init(ggml_backend_dev_t dev, const char * params) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
ggml_metal_t ctx = ggml_metal_init(ctx_dev);
if (ctx == NULL) {
GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
return NULL;
}
ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend));
*backend = {
/* .guid = */ ggml_backend_metal_guid(),
/* .interface = */ ggml_backend_metal_i,
/* .device = */ dev,
/* .context = */ ctx,
};
ggml_backend_metal_set_n_cb(backend, 1);
return backend;
GGML_UNUSED(params);
}
static ggml_backend_buffer_type_t ggml_backend_metal_device_get_buffer_type(ggml_backend_dev_t dev) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx_dev);
return props_dev->use_shared_buffers ? ggml_backend_metal_buffer_type_shared() : ggml_backend_metal_buffer_type_private();
}
static ggml_backend_buffer_t ggml_backend_metal_device_buffer_mapped(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
ggml_metal_buffer_t res = ggml_metal_buffer_map(ctx_dev, ptr, size, max_tensor_size);
return ggml_backend_buffer_init(ggml_backend_metal_buffer_type_mapped(), ggml_backend_metal_buffer_shared_i, res, size);
}
static bool ggml_backend_metal_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
return ggml_metal_device_supports_op(ctx_dev, op);
}
static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
return
buft->iface.get_name == ggml_backend_metal_buffer_type_shared_get_name ||
buft->iface.get_name == ggml_backend_metal_buffer_type_private_get_name ||
buft->iface.get_name == ggml_backend_metal_buffer_type_mapped_get_name;
GGML_UNUSED(dev);
}
static int64_t get_op_batch_size(const ggml_tensor * op) {
switch (op->op) {
case GGML_OP_MUL_MAT:
return op->ne[1];
case GGML_OP_MUL_MAT_ID:
return op->ne[2];
default:
return ggml_nrows(op);
}
}
static bool ggml_backend_metal_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
return (op->op == GGML_OP_MUL_MAT ||
op->op == GGML_OP_MUL_MAT_ID) &&
get_op_batch_size(op) >= ggml_metal_device_get_props(ctx_dev)->op_offload_min_batch_size;
}
static ggml_backend_device_i ggml_backend_metal_device_i = {
/* .get_name = */ ggml_backend_metal_device_get_name,
/* .get_description = */ ggml_backend_metal_device_get_description,
/* .get_memory = */ ggml_backend_metal_device_get_memory,
/* .get_type = */ ggml_backend_metal_device_get_type,
/* .get_props = */ ggml_backend_metal_device_get_props,
/* .init_backend = */ ggml_backend_metal_device_init,
/* .get_buffer_type = */ ggml_backend_metal_device_get_buffer_type,
/* .get_host_buffer_type = */ NULL,
/* .buffer_from_host_ptr = */ ggml_backend_metal_device_buffer_mapped,
/* .supports_op = */ ggml_backend_metal_device_supports_op,
/* .supports_buft = */ ggml_backend_metal_device_supports_buft,
/* .offload_op = */ ggml_backend_metal_device_offload_op,
/* .event_new = */ NULL,
/* .event_free = */ NULL,
/* .event_synchronize = */ NULL,
};
// backend registry
static const char * ggml_backend_metal_reg_get_name(ggml_backend_reg_t reg) {
return "Metal";
GGML_UNUSED(reg);
}
static size_t ggml_backend_metal_reg_device_count(ggml_backend_reg_t reg) {
return 1;
GGML_UNUSED(reg);
}
static ggml_backend_dev_t ggml_backend_metal_reg_device_get(ggml_backend_reg_t reg, size_t index) {
GGML_ASSERT(index == 0);
return &g_ggml_metal_device;
GGML_UNUSED(reg);
GGML_UNUSED(index);
}
static ggml_backend_feature g_ggml_backend_metal_features[] = {
#if defined(GGML_METAL_EMBED_LIBRARY)
{ "EMBED_LIBRARY", "1" },
#endif
{ NULL, NULL },
};
static ggml_backend_feature * ggml_backend_metal_get_features(ggml_backend_reg_t reg) {
return g_ggml_backend_metal_features;
GGML_UNUSED(reg);
}
static void * ggml_backend_metal_get_proc_address(ggml_backend_reg_t reg, const char * name) {
if (strcmp(name, "ggml_backend_get_features") == 0) {
return (void *)ggml_backend_metal_get_features;
}
return NULL;
GGML_UNUSED(reg);
}
static ggml_backend_reg_i ggml_backend_metal_reg_i = {
/* .get_name = */ ggml_backend_metal_reg_get_name,
/* .device_count = */ ggml_backend_metal_reg_device_count,
/* .device_get = */ ggml_backend_metal_reg_device_get,
/* .get_proc_address = */ ggml_backend_metal_get_proc_address,
};
ggml_backend_reg_t ggml_backend_metal_reg(void) {
{
g_ggml_metal_reg = {
/* .api_version = */ GGML_BACKEND_API_VERSION,
/* .iface = */ ggml_backend_metal_reg_i,
/* .context = */ NULL,
};
g_ggml_metal_device = {
/* .iface = */ ggml_backend_metal_device_i,
/* .reg = */ &g_ggml_metal_reg,
/* .context = */ ggml_metal_device_get(),
};
}
return &g_ggml_metal_reg;
}
GGML_BACKEND_DL_IMPL(ggml_backend_metal_reg)

File diff suppressed because it is too large Load Diff