From f6ac08424f490b70b94057deb28ab70e8ebc549c Mon Sep 17 00:00:00 2001 From: bepis Date: Mon, 3 Nov 2025 13:27:40 -0800 Subject: [PATCH 01/39] Feat: Added vulkan circular tiling support --- ggml/include/ggml.h | 82 +++++++++++ ggml/src/ggml-vulkan/ggml-vulkan.cpp | 12 ++ .../ggml-vulkan/vulkan-shaders/conv2d_dw.comp | 76 ++++++++--- .../ggml-vulkan/vulkan-shaders/conv2d_mm.comp | 17 ++- ggml/src/ggml-vulkan/vulkan-shaders/pad.comp | 26 +++- ggml/src/ggml.c | 127 +++++++++++++++++- 6 files changed, 310 insertions(+), 30 deletions(-) diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 2311cdabe3b..bb96e1bb5db 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -1943,6 +1943,18 @@ extern "C" { int d0, // dilation dimension 0 int d1); // dilation dimension 1 + + GGML_API struct ggml_tensor * ggml_conv_2d_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, // convolution kernel + struct ggml_tensor * b, // data + int s0, // stride dimension 0 + int s1, // stride dimension 1 + int p0, // padding dimension 0 + int p1, // padding dimension 1 + int d0, // dilation dimension 0 + int d1); // dilation dimension 1 + GGML_API struct ggml_tensor * ggml_im2col_3d( struct ggml_context * ctx, struct ggml_tensor * a, @@ -2016,6 +2028,19 @@ extern "C" { int d0, // dilation dimension 0 int d1); // dilation dimension 1 + + // depthwise (via im2col and mul_mat) + GGML_API struct ggml_tensor * ggml_conv_2d_dw_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, // convolution kernel + struct ggml_tensor * b, // data + int s0, // stride dimension 0 + int s1, // stride dimension 1 + int p0, // padding dimension 0 + int p1, // padding dimension 1 + int d0, // dilation dimension 0 + int d1); // dilation dimension 1 + // Depthwise 2D convolution // may be faster than ggml_conv_2d_dw, but not available in all backends // a: KW KH 1 C convolution kernel @@ -2032,12 +2057,35 @@ extern "C" { int dilation0, int dilation1); + // Depthwise 2D convolution (on a torus) + // may be faster than ggml_conv_2d_dw, but not available in all backends + // a: KW KH 1 C convolution kernel + // b: W H C N input data + // res: W_out H_out C N + GGML_API struct ggml_tensor * ggml_conv_2d_dw_direct_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int stride0, + int stride1, + int pad0, + int pad1, + int dilation0, + int dilation1); + GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int stride); + // circular (on a torus) + GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int stride); + GGML_API struct ggml_tensor * ggml_conv_2d_direct( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel [KW, KH, IC, OC] @@ -2048,6 +2096,17 @@ extern "C" { int p1, // padding dimension 1 int d0, // dilation dimension 0 int d1); // dilation dimension 1 + + GGML_API struct ggml_tensor * ggml_conv_2d_direct_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, // convolution kernel [KW, KH, IC, OC] + struct ggml_tensor * b, // input data [W, H, C, N] + int s0, // stride dimension 0 + int s1, // stride dimension 1 + int p0, // padding dimension 0 + int p1, // padding dimension 1 + int d0, // dilation dimension 0 + int d1); // dilation dimension 1 GGML_API struct ggml_tensor * ggml_conv_3d_direct( struct ggml_context * ctx, @@ -2156,6 +2215,15 @@ extern "C" { int p2, int p3); + // pad each dimension with values on the other side of the torus (looping around) + GGML_API struct ggml_tensor * ggml_pad_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + int p0, + int p1, + int p2, + int p3); + GGML_API struct ggml_tensor * ggml_pad_ext( struct ggml_context * ctx, struct ggml_tensor * a, @@ -2169,6 +2237,20 @@ extern "C" { int rp3 ); + // circular padding + GGML_API struct ggml_tensor * ggml_pad_ext_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + int lp0, + int rp0, + int lp1, + int rp1, + int lp2, + int rp2, + int lp3, + int rp3 + ); + // pad each dimension with reflection: [a, b, c, d] -> [b, a, b, c, d, c] GGML_API struct ggml_tensor * ggml_pad_reflect_1d( struct ggml_context * ctx, diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index 8d1a85c9693..69b58e32950 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -940,6 +940,7 @@ struct vk_op_pad_push_constants { uint32_t ne00; uint32_t ne01; uint32_t ne02; uint32_t ne03; uint32_t nb00; uint32_t nb01; uint32_t nb02; uint32_t nb03; uint32_t ne10; uint32_t ne11; uint32_t ne12; uint32_t ne13; uint32_t nb10; uint32_t nb11; uint32_t nb12; uint32_t nb13; uint32_t misalign_offsets; + uint32_t circular; uint32_t lp0; uint32_t rp0; uint32_t lp1; uint32_t rp1; @@ -982,6 +983,7 @@ static vk_op_pad_push_constants vk_op_pad_push_constants_init(const ggml_tensor p.rp2 = dst->op_params[5]; p.lp3 = dst->op_params[6]; p.rp3 = dst->op_params[7]; + p.circular = dst->op_params[8]; return p; // fastdiv values and offsets are initialized later in ggml_vk_op } @@ -1249,6 +1251,8 @@ struct vk_op_conv2d_push_constants { uint32_t KWKHmp; uint32_t KWKHL; uint32_t OWmp; uint32_t OWL; uint32_t OWOHmp; uint32_t OWOHL; + + uint32_t circular; }; template <> void init_pushconst_fastdiv(vk_op_conv2d_push_constants &p) { @@ -1297,6 +1301,8 @@ struct vk_op_conv_transpose_2d_push_constants { uint32_t OWOHmp; uint32_t OWOHL; uint32_t s0mp; uint32_t s0L; uint32_t s1mp; uint32_t s1L; + + uint32_t circular; }; template <> void init_pushconst_fastdiv(vk_op_conv_transpose_2d_push_constants &p) { @@ -1325,6 +1331,7 @@ struct vk_op_conv2d_dw_push_constants { int32_t pad_y; int32_t dilation_x; int32_t dilation_y; + uint32_t circular; }; struct vk_op_upscale_push_constants { @@ -10420,6 +10427,8 @@ static void ggml_vk_conv_2d(ggml_backend_vk_context * ctx, vk_context & subctx, p.nb2 = static_cast(nb2 / nb0); p.nb3 = static_cast(nb3 / nb0); + p.circular = static_cast(dst->op_params[6]); + GGML_ASSERT(ne03 == ne2); GGML_ASSERT(ne02 == ne12); @@ -10469,6 +10478,8 @@ static void ggml_vk_conv_transpose_2d(ggml_backend_vk_context * ctx, vk_context p.nb2 = static_cast(nb2 / nb0); p.nb3 = static_cast(nb3 / nb0); + p.circular = static_cast(dst->op_params[1]); + GGML_ASSERT(ne02 == ne2); GGML_ASSERT(ne03 == ne12); @@ -10492,6 +10503,7 @@ static void ggml_vk_conv_2d_dw(ggml_backend_vk_context * ctx, vk_context& subctx p.pad_y = dst->op_params[3]; p.dilation_x = dst->op_params[4]; p.dilation_y = dst->op_params[5]; + p.circular = dst->op_params[6]; GGML_ASSERT(src0->ne[3] == p.channels); GGML_ASSERT(src1->ne[3] == p.batches); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp index 70a301488eb..88bd1d7a755 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp @@ -19,6 +19,7 @@ layout (push_constant) uniform parameter int pad_y; int dilation_x; int dilation_y; + uint circular; } p; layout (binding = 0) readonly buffer A {A_TYPE knl_data[];}; @@ -27,6 +28,10 @@ layout (binding = 2) writeonly buffer D {D_TYPE dst_data[];}; layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in; +uint32_t wrap_coord(int coord, uint32_t size) { + return uint32_t((uint(coord + int(size))) % size); +} + FLOAT_TYPE conv_2d_dw_whcn(uint idx) { uint i0 = idx / p.dst_w; uint dst_x = idx - i0 * p.dst_w; @@ -39,19 +44,35 @@ FLOAT_TYPE conv_2d_dw_whcn(uint idx) { uint knl_i = c * p.knl_h * p.knl_w; FLOAT_TYPE sum = 0.0; - for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { - uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int - continue; + + if (p.circular != 0u) { + for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { + int raw_y = int(dst_y) * p.stride_y + int(knl_y) * p.dilation_y - p.pad_y; + uint src_y = wrap_coord(raw_y, p.src_h); + for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { + int raw_x = int(dst_x) * p.stride_x + int(knl_x) * p.dilation_x - p.pad_x; + uint src_x = wrap_coord(raw_x, p.src_w); + FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * p.src_w + src_x]); + FLOAT_TYPE k = FLOAT_TYPE(knl_data[knl_i + knl_y * p.knl_w + knl_x]); + sum = fma(v, k, sum); + } } - for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { - uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int + } + else { + for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { + uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int continue; } - FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * p.src_w + src_x]); - FLOAT_TYPE k = FLOAT_TYPE(knl_data[knl_i + knl_y * p.knl_w + knl_x]); - sum = fma(v, k, sum); + for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { + uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int + continue; + } + FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * p.src_w + src_x]); + FLOAT_TYPE k = FLOAT_TYPE(knl_data[knl_i + knl_y * p.knl_w + knl_x]); + sum = fma(v, k, sum); + } } } return sum; @@ -70,19 +91,34 @@ FLOAT_TYPE conv_2d_dw_cwhn(uint idx) { uint knl_row = p.knl_w * p.channels; FLOAT_TYPE sum = 0.0; - for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { - uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int - continue; + if (p.circular != 0u) { + for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { + int raw_y = int(dst_y) * p.stride_y + int(knl_y) * p.dilation_y - p.pad_y; + uint src_y = wrap_coord(raw_y, p.src_h); + for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { + int raw_x = int(dst_x) * p.stride_x + int(knl_x) * p.dilation_x - p.pad_x; + uint src_x = wrap_coord(raw_x, p.src_w); + FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * src_row + src_x * p.channels + c]); + FLOAT_TYPE k = FLOAT_TYPE(knl_data[ knl_y * knl_row + knl_x * p.channels + c]); + sum = fma(v, k, sum); + } } - for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { - uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int + } + else { + for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { + uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int continue; } - FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * src_row + src_x * p.channels + c]); - FLOAT_TYPE k = FLOAT_TYPE(knl_data[ knl_y * knl_row + knl_x * p.channels + c]); - sum = fma(v, k, sum); + for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { + uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int + continue; + } + FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * src_row + src_x * p.channels + c]); + FLOAT_TYPE k = FLOAT_TYPE(knl_data[ knl_y * knl_row + knl_x * p.channels + c]); + sum = fma(v, k, sum); + } } } return sum; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp index 0367e80bbfa..c18fd92ccaf 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp @@ -70,6 +70,8 @@ layout(push_constant) uniform parameter { uint32_t s0mp; uint32_t s0L; uint32_t s1mp; uint32_t s1L; #endif + + uint32_t circular; } p; @@ -174,6 +176,10 @@ ACC_TYPE perElemOpStore(const in uint32_t r, const in uint32_t c, const in ACC_T } #endif +uint32_t wrap_coord(int coord, uint32_t size) { + return uint32_t((uint(coord + int(size))) % size); +} + void main() { #ifdef COOPMAT2 coopmat matC; @@ -274,7 +280,8 @@ void main() { KH_idx_b = fastdiv(CRS_remainder, p.KWmp, p.KWL); // divide by p.KW; KW_idx_b = CRS_remainder - KH_idx_b * p.KW; #endif - + uint32_t H_pos; + uint32_t W_pos; #ifdef TRANSPOSE uint32_t H_idx_x_s1 = OH_idx - KH_idx_b * p.d1 + p.p1; uint32_t W_idx_x_s0 = OW_idx - KW_idx_b * p.d0 + p.p0; @@ -284,13 +291,15 @@ void main() { uint32_t H_idx = OH_idx * p.s1 + KH_idx_b * p.d1 - p.p1; uint32_t W_idx = OW_idx * p.s0 + KW_idx_b * p.d0 - p.p0; #endif + H_pos = (p.circular != 0) ? wrap_coord(int(H_idx), p.H) : H_idx; + W_pos = (p.circular != 0) ? wrap_coord(int(W_idx), p.W) : W_idx; uint32_t src_idx = - min(max(W_idx + H_idx * p.nb11 + Cin_idx_b * p.nb12 + N_idx * p.nb13, 0), p.Cin * p.N * p.W * p.H - 1); + min(max(W_pos + H_pos * p.nb11 + Cin_idx_b * p.nb12 + N_idx * p.nb13, 0), p.Cin * p.N * p.W * p.H - 1); float val = src_data[src_idx]; if (CRS_idx_b >= CRS || NPQ_idx >= NPQ - || H_idx >= p.H || W_idx >= p.W // Lower bound checks aren't necessary. (idx >= 0x80000000 for such case) + || H_pos >= p.H || W_pos >= p.W // Lower bound checks aren't necessary. (idx >= 0x80000000 for such case) #ifdef TRANSPOSE - || (H_idx_x_s1 - H_idx * p.s1 != 0) || (W_idx_x_s0 - W_idx * p.s0 != 0) + || (H_idx_x_s1 - H_pos * p.s1 != 0) || (W_idx_x_s0 - W_pos * p.s0 != 0) #endif ) { val = 0.0; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp index f3c81768727..f2fd5929bf4 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp @@ -8,6 +8,7 @@ layout (push_constant) uniform parameter uint ne00; uint ne01; uint ne02; uint ne03; uint nb00; uint nb01; uint nb02; uint nb03; uint ne10; uint ne11; uint ne12; uint ne13; uint nb10; uint nb11; uint nb12; uint nb13; uint misalign_offsets; + uint circular; uint lp0; uint rp0; uint lp1; uint rp1; @@ -18,6 +19,10 @@ layout (push_constant) uniform parameter uint get_aoffset() { return p.misalign_offsets >> 16; } uint get_doffset() { return p.misalign_offsets & 0xFFFF; } +uint wrap_coord(int coord, uint size) { + return (uint(coord + int(size))) % size; // add size to avoid issues with negative +} + layout (binding = 0) readonly buffer A {A_TYPE data_a[];}; layout (binding = 1) writeonly buffer D {D_TYPE data_d[];}; @@ -40,10 +45,21 @@ void main() { const uint src0_idx = (i3 - p.lp3)*p.nb03 + (i2 - p.lp2)*p.nb02 + (i1 - p.lp1)*p.nb01 + (i0 - p.lp0)*p.nb00; const uint dst_idx = i3*p.nb13 + i2*p.nb12 + i1*p.nb11 + i0*p.nb10; - const bool is_src0 = i0 >= p.lp0 && i0 < p.ne10 - p.rp0 && - i1 >= p.lp1 && i1 < p.ne11 - p.rp1 && - i2 >= p.lp2 && i2 < p.ne12 - p.rp2 && - i3 >= p.lp3 && i3 < p.ne13 - p.rp3; + if (p.circular != 0u) { + const uint ci0 = wrap_coord(int(i0) - int(p.lp0), p.ne00); + const uint ci1 = wrap_coord(int(i1) - int(p.lp1), p.ne01); + const uint ci2 = wrap_coord(int(i2) - int(p.lp2), p.ne02); + const uint ci3 = wrap_coord(int(i3) - int(p.lp3), p.ne03); + const uint circular_src_idx = ci3*p.nb03 + ci2*p.nb02 + ci1*p.nb01 + ci0*p.nb00; + data_d[get_doffset() + dst_idx] = D_TYPE(data_a[get_aoffset() + circular_src_idx]); + } + else { + const bool is_src0 = i0 >= p.lp0 && i0 < p.ne10 - p.rp0 && + i1 >= p.lp1 && i1 < p.ne11 - p.rp1 && + i2 >= p.lp2 && i2 < p.ne12 - p.rp2 && + i3 >= p.lp3 && i3 < p.ne13 - p.rp3; + data_d[get_doffset() + dst_idx] = D_TYPE(is_src0 ? data_a[get_aoffset() + src0_idx] : 0.0f); + } + - data_d[get_doffset() + dst_idx] = D_TYPE(is_src0 ? data_a[get_aoffset() + src0_idx] : 0.0f); } diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 9be35c1be84..1113164f488 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -4457,6 +4457,29 @@ struct ggml_tensor * ggml_conv_2d( return result; } + +// ggml_conv_2d_circular + +// a: [OC,IC, KH, KW] +// b: [N, IC, IH, IW] +// result: [N, OC, OH, OW] +struct ggml_tensor * ggml_conv_2d_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1) { + if (p0 == 0 && p1 == 0) { + return ggml_conv_2d(ctx, a, b, s0, s1, p0, p1, d0, d1); + } + struct ggml_tensor * b_padded = ggml_pad_ext_circular(ctx, b, p0, p0, p1, p1, 0, 0, 0, 0); + return ggml_conv_2d(ctx, a, b_padded, s0, s1, 0, 0, d0, d1); +} + // a: [OC*IC, KD, KH, KW] // b: [N*IC, ID, IH, IW] // result: [N*OD, OH, OW, IC * KD * KH * KW] @@ -4585,6 +4608,25 @@ struct ggml_tensor * ggml_conv_2d_dw( return result; } +// ggml_conv_2d_dw_circular + +struct ggml_tensor * ggml_conv_2d_dw_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1) { + if (p0 == 0 && p1 == 0) { + return ggml_conv_2d_dw(ctx, a, b, s0, s1, p0, p1, d0, d1); + } + struct ggml_tensor * b_padded = ggml_pad_ext_circular(ctx, b, p0, p0, p1, p1, 0, 0, 0, 0); + return ggml_conv_2d_dw(ctx, a, b_padded, s0, s1, 0, 0, d0, d1); +} + // ggml_conv_2d_dw_direct struct ggml_tensor * ggml_conv_2d_dw_direct( @@ -4616,7 +4658,9 @@ struct ggml_tensor * ggml_conv_2d_dw_direct( result->nb[2] = type_size; } - int32_t params[] = { stride0, stride1, pad0, pad1, dilation0, dilation1 }; + int circular = 0; // default not circular + + int32_t params[] = { stride0, stride1, pad0, pad1, dilation0, dilation1, circular }; ggml_set_op_params(result, params, sizeof(params)); result->op = GGML_OP_CONV_2D_DW; @@ -4625,6 +4669,24 @@ struct ggml_tensor * ggml_conv_2d_dw_direct( return result; } +// ggml_conv_2d_dw_direct_circular + +struct ggml_tensor * ggml_conv_2d_dw_direct_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int stride0, + int stride1, + int pad0, + int pad1, + int dilation0, + int dilation1) { + struct ggml_tensor * result = + ggml_conv_2d_dw_direct(ctx, a, b, stride0, stride1, pad0, pad1, dilation0, dilation1); + ggml_set_op_params_i32(result, 6, 1); // circular + return result; +} + // ggml_conv_2d_direct struct ggml_tensor * ggml_conv_2d_direct( @@ -4655,6 +4717,7 @@ struct ggml_tensor * ggml_conv_2d_direct( ggml_set_op_params_i32(result, 3, p1); ggml_set_op_params_i32(result, 4, d0); ggml_set_op_params_i32(result, 5, d1); + ggml_set_op_params_i32(result, 6, 0); // default not circularc result->op = GGML_OP_CONV_2D; result->src[0] = a; @@ -4663,6 +4726,23 @@ struct ggml_tensor * ggml_conv_2d_direct( return result; } +// ggml_conv_2d_direct_circular + +struct ggml_tensor * ggml_conv_2d_direct_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int s0, + int s1, + int p0, + int p1, + int d0, + int d1) { + struct ggml_tensor * result = ggml_conv_2d_direct(ctx, a, b, s0, s1, p0, p1, d0, d1); + ggml_set_op_params_i32(result, 6, 1); // circular + return result; +} + // ggml_conv_3d_direct struct ggml_tensor * ggml_conv_3d_direct( @@ -4735,6 +4815,7 @@ struct ggml_tensor * ggml_conv_transpose_2d_p0( struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); ggml_set_op_params_i32(result, 0, stride); + ggml_set_op_params_i32(result, 1, 0); // circular default off result->op = GGML_OP_CONV_TRANSPOSE_2D; result->src[0] = a; @@ -4743,6 +4824,18 @@ struct ggml_tensor * ggml_conv_transpose_2d_p0( return result; } +// ggml_conv_transpose_2d_p0_circular + +struct ggml_tensor * ggml_conv_transpose_2d_p0_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + struct ggml_tensor * b, + int stride) { + struct ggml_tensor * result = ggml_conv_transpose_2d_p0(ctx, a, b, stride); + ggml_set_op_params_i32(result, 1, 1); // circular enabled + return result; +} + // ggml_pool_* static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) { @@ -4894,6 +4987,18 @@ struct ggml_tensor * ggml_pad( return ggml_pad_ext(ctx, a, 0, p0, 0, p1, 0, p2, 0, p3); } +// ggml_pad_circular + +struct ggml_tensor * ggml_pad_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + int p0, + int p1, + int p2, + int p3) { + return ggml_pad_ext_circular(ctx, a, 0, p0, 0, p1, 0, p2, 0, p3); +} + struct ggml_tensor * ggml_pad_ext( struct ggml_context * ctx, struct ggml_tensor * a, @@ -4920,6 +5025,7 @@ struct ggml_tensor * ggml_pad_ext( ggml_set_op_params_i32(result, 5, rp2); ggml_set_op_params_i32(result, 6, lp3); ggml_set_op_params_i32(result, 7, rp3); + ggml_set_op_params_i32(result, 8, 0); // not circular by default result->op = GGML_OP_PAD; @@ -4928,6 +5034,25 @@ struct ggml_tensor * ggml_pad_ext( return result; } +// ggml_pad_ext_circular + +struct ggml_tensor * ggml_pad_ext_circular( + struct ggml_context * ctx, + struct ggml_tensor * a, + int lp0, + int rp0, + int lp1, + int rp1, + int lp2, + int rp2, + int lp3, + int rp3 + ) { + struct ggml_tensor * result = ggml_pad_ext(ctx, a, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3); + ggml_set_op_params_i32(result, 8, 1); // circular + return result; +} + // ggml_pad_reflect_1d struct ggml_tensor * ggml_pad_reflect_1d( From d7f5958b9e3f3553e211e9cdab83f3c222c504fa Mon Sep 17 00:00:00 2001 From: bepis Date: Mon, 3 Nov 2025 13:50:52 -0800 Subject: [PATCH 02/39] Feat: Added cpu circular --- ggml/src/ggml-cpu/ops.cpp | 324 +++++++++++++++++++++++++++----------- 1 file changed, 232 insertions(+), 92 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index f66d36ff62c..92fd172769a 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -6674,6 +6674,10 @@ static void ggml_call_mul_mat(ggml_type type, const ggml_compute_params * params ggml_compute_forward_mul_mat(params, &dst); } +static inline int64_t ggml_wrap_coord(int64_t coord, int64_t size) { + return (coord + size) % size; // adding size avoids negative number weirdness +} + // ggml_compute_forward_conv_2d static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, @@ -6694,6 +6698,7 @@ static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params const int32_t pad_y = dst->op_params[3]; const int32_t dilation_x = dst->op_params[4]; const int32_t dilation_y = dst->op_params[5]; + const bool circular = dst->op_params[6]; const int64_t c_in = src->ne[2]; const int64_t c_out = kernel->ne[3]; @@ -6734,40 +6739,73 @@ static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); //im2col for a patch - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t src_x = (p / dst_w) % dst_h; - const int64_t src_y = p % dst_w; + if (circular == 0) { + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t src_x = (p / dst_w) % dst_h; + const int64_t src_y = p % dst_w; - const float * src_base = (const float *)((const char *)src_data + batch_n * src->nb[3]); - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; + const float * src_base = (const float *)((const char *)src_data + batch_n * src->nb[3]); + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; - for (int64_t ic = 0; ic < c_in; ++ic) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; - const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; + for (int64_t ic = 0; ic < c_in; ++ic) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; + const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; - int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; + int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; - float src_val; - if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { - src_val = 0.0f; - } else { - const float * src_ptr = (const float *)((const char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]); - src_val = *src_ptr; + float src_val; + if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const float * src_ptr = (const float *)((const char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]); + src_val = *src_ptr; + } + + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } } + } + } + } // patches handled by this thread + } + else { + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t src_x = (p / dst_w) % dst_h; + const int64_t src_y = p % dst_w; + + const float * src_base = (const float *)((const char *)src_data + batch_n * src->nb[3]); + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; + + for (int64_t ic = 0; ic < c_in; ++ic) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sy = ggml_wrap_coord(src_x * stride_y + ky * dilation_y - pad_y, src_h); + const int64_t sx = ggml_wrap_coord(src_y * stride_x + kx * dilation_x - pad_x, src_w); + + int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + const float * src_ptr = (const float *)((const char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]); + float src_val = *src_ptr; + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } } } } - } - } // patches handled by this thread + } // patches handled by this thread + } + ggml_barrier(params->threadpool); @@ -7066,6 +7104,7 @@ struct ggml_conv_2d_dw_params { int pad_y; int dilation_x; int dilation_y; + int circular; }; static void ggml_compute_forward_conv_2d_dw_cwhn( @@ -7091,57 +7130,103 @@ static void ggml_compute_forward_conv_2d_dw_cwhn( const int64_t c_pkg_end = 0; #endif - for (int64_t row = row_start; row < row_end; ++row) { - const int64_t dst_y = row % p.dst_h; - const float * src_data = (const float *)src->data + (row / p.dst_h) * p.src_w * p.src_h * c; - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float * dst_data = (float *)dst->data + (row * p.dst_w + dst_x) * c; - const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; - const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; + const int64_t circular = p.circular; -#ifdef GGML_SIMD - // Vectorized loop - for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; + if (circular == 0) { + for (int64_t row = row_start; row < row_end; ++row) { + const int64_t dst_y = row % p.dst_h; + const float * src_data = (const float *)src->data + (row / p.dst_h) * p.src_w * p.src_h * c; + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float * dst_data = (float *)dst->data + (row * p.dst_w + dst_x) * c; + const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; + const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; + + #ifdef GGML_SIMD + // Vectorized loop + for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); + GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); + sum = GGML_F32_VEC_FMA(sum, k, s); + } } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { + GGML_F32_VEC_STORE(dst_data + c_i, sum); + } + #endif + // Scalar loop + for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { continue; } - GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); - GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); - sum = GGML_F32_VEC_FMA(sum, k, s); + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] + * src_data[(src_y * p.src_w + src_x) * c + c_i]; + } } + dst_data[c_i] = sum; } - GGML_F32_VEC_STORE(dst_data + c_i, sum); } -#endif - // Scalar loop - for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; + } + } + else { + for (int64_t row = row_start; row < row_end; ++row) { + const int64_t dst_y = row % p.dst_h; + const float * src_data = (const float *)src->data + (row / p.dst_h) * p.src_w * p.src_h * c; + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float * dst_data = (float *)dst->data + (row * p.dst_w + dst_x) * c; + const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; + const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; + + #ifdef GGML_SIMD + // Vectorized loop + for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = ggml_wrap_coord(src_y_base + knl_y * p.dilation_y, p.src_h); + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = ggml_wrap_coord(src_x_base + knl_x * p.dilation_x, p.src_w); + GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); + GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); + sum = GGML_F32_VEC_FMA(sum, k, s); + } } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; + GGML_F32_VEC_STORE(dst_data + c_i, sum); + } + #endif + // Scalar loop + for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = ggml_wrap_coord(src_y_base + knl_y * p.dilation_y, p.src_h); + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = ggml_wrap_coord(src_x_base + knl_x * p.dilation_x, p.src_w); + sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] + * src_data[(src_y * p.src_w + src_x) * c + c_i]; } - sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] - * src_data[(src_y * p.src_w + src_x) * c + c_i]; } + dst_data[c_i] = sum; } - dst_data[c_i] = sum; } } } + } static void ggml_compute_forward_conv_2d_dw_whcn( @@ -7156,30 +7241,57 @@ static void ggml_compute_forward_conv_2d_dw_whcn( const int64_t start = params->ith * per_thread; const int64_t end = MIN(start + per_thread, n); - for (int64_t i = start; i < end; ++i) { - const float * knl_data = (const float *)kernel->data + (i % p.channels) * p.knl_w * p.knl_h; - const float * src_data = (const float *)src->data + i * p.src_w * p.src_h; - float * dst_data = (float *)dst->data + i * p.dst_w * p.dst_h; + const int64_t circular = p.circular; - for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + if (circular == 0) { + for (int64_t i = start; i < end; ++i) { + const float * knl_data = (const float *)kernel->data + (i % p.channels) * p.knl_w * p.knl_h; + const float * src_data = (const float *)src->data + i * p.src_w * p.src_h; + float * dst_data = (float *)dst->data + i * p.dst_w * p.dst_h; - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x < 0 || src_x >= p.src_w) { + for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y < 0 || src_y >= p.src_h) { continue; } - sum += knl_data[knl_y * p.knl_w + knl_x] - * src_data[src_y * p.src_w + src_x]; + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + sum += knl_data[knl_y * p.knl_w + knl_x] + * src_data[src_y * p.src_w + src_x]; + } } + dst_data[dst_y * p.dst_w + dst_x] = sum; + } + } + } + } + else { + for (int64_t i = start; i < end; ++i) { + const float * knl_data = (const float *)kernel->data + (i % p.channels) * p.knl_w * p.knl_h; + const float * src_data = (const float *)src->data + i * p.src_w * p.src_h; + float * dst_data = (float *)dst->data + i * p.dst_w * p.dst_h; + + for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = ggml_wrap_coord(dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y, p.src_h); + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = ggml_wrap_coord(dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x, p.src_w); + sum += knl_data[knl_y * p.knl_w + knl_x] + * src_data[src_y * p.src_w + src_x]; + } + } + dst_data[dst_y * p.dst_w + dst_x] = sum; } - dst_data[dst_y * p.dst_w + dst_x] = sum; } } } @@ -7206,6 +7318,7 @@ void ggml_compute_forward_conv_2d_dw( p.pad_y = dst->op_params[3]; p.dilation_x = dst->op_params[4]; p.dilation_y = dst->op_params[5]; + p.circular = dst->op_params[6]; GGML_ASSERT(kernel->ne[3] == p.channels); GGML_ASSERT(dst->ne[3] == p.batch); @@ -7626,24 +7739,51 @@ static void ggml_compute_forward_pad_f32( const int32_t rp2 = ggml_get_op_params_i32(dst, 5); const int32_t lp3 = ggml_get_op_params_i32(dst, 6); const int32_t rp3 = ggml_get_op_params_i32(dst, 7); + const int32_t circular = ggml_get_op_params_i32(dst, 8); // TODO: optimize - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - for (int64_t i3 = 0; i3 < ne3; ++i3) { - const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; - if ((i0 >= lp0 && i0 < ne0 - rp0) \ - && (i1 >= lp1 && i1 < ne1 - rp1) \ - && (i2 >= lp2 && i2 < ne2 - rp2) \ - && (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t src_idx = (i3 - lp3)*nb03 + (i2 - lp2)*nb02 + (i1 - lp1)*nb01 + (i0 - lp0)*nb00; + if (circular == 0) { + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + for (int64_t i3 = 0; i3 < ne3; ++i3) { + const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; + if ((i0 >= lp0 && i0 < ne0 - rp0) \ + && (i1 >= lp1 && i1 < ne1 - rp1) \ + && (i2 >= lp2 && i2 < ne2 - rp2) \ + && (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t src_idx = (i3 - lp3)*nb03 + (i2 - lp2)*nb02 + (i1 - lp1)*nb01 + (i0 - lp0)*nb00; + const float * src_ptr = (const float *)((char *) src0->data + src_idx); + dst_ptr[dst_idx] = *src_ptr; + } else { + dst_ptr[dst_idx] = 0; + } + } + } + } + } + } + else { + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + for (int64_t i3 = 0; i3 < ne3; ++i3) { + const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; + const int64_t src_i0 = ggml_wrap_coord(i0 - lp0, ne00); + const int64_t src_i1 = ggml_wrap_coord(i1 - lp1, ne01); + const int64_t src_i2 = ggml_wrap_coord(i2 - lp2, ne02); + const int64_t src_i3 = ggml_wrap_coord(i3 - lp3, ne03); + + const int64_t src_idx = + src_i3*nb03 + + src_i2*nb02 + + src_i1*nb01 + + src_i0*nb00; + const float * src_ptr = (const float *)((char *) src0->data + src_idx); dst_ptr[dst_idx] = *src_ptr; - } else { - dst_ptr[dst_idx] = 0; } } } From 1b62b49aa82bf1e2eb69a43f77881ab6b3d6b5f9 Mon Sep 17 00:00:00 2001 From: bepis Date: Mon, 3 Nov 2025 14:34:56 -0800 Subject: [PATCH 03/39] Feat: Added cuda kernels --- ggml/src/ggml-cuda/conv2d-dw.cu | 71 ++++++++++++++++------ ggml/src/ggml-cuda/conv2d-transpose.cu | 82 ++++++++++++++++++-------- ggml/src/ggml-cuda/conv2d.cu | 60 ++++++++++++++----- ggml/src/ggml-cuda/pad.cu | 66 +++++++++++++++------ 4 files changed, 204 insertions(+), 75 deletions(-) diff --git a/ggml/src/ggml-cuda/conv2d-dw.cu b/ggml/src/ggml-cuda/conv2d-dw.cu index 7583233b1b7..255f131a6ae 100644 --- a/ggml/src/ggml-cuda/conv2d-dw.cu +++ b/ggml/src/ggml-cuda/conv2d-dw.cu @@ -8,6 +8,7 @@ struct conv_params { int padding_x, padding_y; int dilation_x, dilation_y; int channels, batches; + int circular; }; struct kernel_bounds { @@ -17,14 +18,23 @@ struct kernel_bounds { __device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int out_x, int out_y, const conv_params & params) { kernel_bounds bounds; - bounds.y_min = max(0, (params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); - bounds.y_max = - min(params.kernel_h, - (params.in_h + params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); - bounds.x_min = max(0, (params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); - bounds.x_max = - min(params.kernel_w, - (params.in_w + params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); + if (params.circular) { + bounds.y_min = 0; + bounds.y_max = params.kernel_h; + bounds.x_min = 0; + bounds.x_max = params.kernel_w; + } + else { + bounds.y_min = max(0, (params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); + bounds.y_max = + min(params.kernel_h, + (params.in_h + params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); + bounds.x_min = max(0, (params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); + bounds.x_max = + min(params.kernel_w, + (params.in_w + params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); + + } return bounds; } @@ -32,6 +42,10 @@ __device__ __forceinline__ int calculate_input_coord(int out_coord, int kern_coo return out_coord * stride + kern_coord * dilation - padding; } +__device__ __forceinline__ int wrap_coord(int coord, int size) { + return (coord % size + size) % size; +} + struct whcn_layout { __device__ static int input_index(int n, int c, int y, int x, const conv_params & params) { return n * (params.channels * params.in_w * params.in_h) + c * params.in_w * params.in_h + y * params.in_w + x; @@ -83,7 +97,8 @@ __global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restr const int in_w, const int in_h, const int out_w, const int out_h, const int kernel_w, const int kernel_h, const int stride_x, const int stride_y, const int padding_x, const int padding_y, const int dilation_x, const int dilation_y, - const int channels, const int batches) { + const int channels, const int batches, + const int circular) { const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; const int total_elements = batches * channels * out_h * out_w; @@ -92,7 +107,7 @@ __global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restr } conv_params params = { in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, - stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches }; + stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches, circular }; int batch_idx, channel_idx, out_y_idx, out_x_idx; Layout::unpack_indices(global_idx, params, batch_idx, channel_idx, out_y_idx, out_x_idx); @@ -100,18 +115,35 @@ __global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restr T accumulator = 0; kernel_bounds bounds = calculate_kernel_bounds(out_x_idx, out_y_idx, params); - for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { - int in_y_idx = calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y); + if (params.circular == 0) { + for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { + int src_y_idx = calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y); + + for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { + int src_x_idx = calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x); + + const T input_val = input[Layout::input_index(batch_idx, channel_idx, src_y_idx, src_x_idx, params)]; + const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; + + accumulator += input_val * kernel_val; + } + } + } + else { + for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { + int in_y_idx = wrap_coord(calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y), params.in_h); - for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { - int in_x_idx = calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x); + for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { + int in_x_idx = wrap_coord(calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x), params.in_w); - const T input_val = input[Layout::input_index(batch_idx, channel_idx, in_y_idx, in_x_idx, params)]; - const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; + const T input_val = input[Layout::input_index(batch_idx, channel_idx, src_y_idx, src_x_idx, params)]; + const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; - accumulator += input_val * kernel_val; + accumulator += input_val * kernel_val; + } } } + output[Layout::output_index(batch_idx, channel_idx, out_y_idx, out_x_idx, params)] = accumulator; } @@ -132,6 +164,7 @@ void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) const int padding_y = p[3]; const int dilation_x = p[4]; const int dilation_y = p[5]; + const int circular = p[6]; const int in_w = input->ne[0]; const int in_h = input->ne[1]; @@ -150,11 +183,11 @@ void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) if (ggml_is_contiguous(input)) { conv2d_dw_kernel<<>>( x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, - dilation_x, dilation_y, channels, batches); + dilation_x, dilation_y, channels, batches, circular); } else if (ggml_is_contiguous_channels(input)) { conv2d_dw_kernel<<>>( x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, - dilation_x, dilation_y, channels, batches); + dilation_x, dilation_y, channels, batches, circular); } else { GGML_ABORT("Unsupported memory layout for conv_2d_dw"); } diff --git a/ggml/src/ggml-cuda/conv2d-transpose.cu b/ggml/src/ggml-cuda/conv2d-transpose.cu index 03224e404d3..253f1dc3613 100644 --- a/ggml/src/ggml-cuda/conv2d-transpose.cu +++ b/ggml/src/ggml-cuda/conv2d-transpose.cu @@ -3,10 +3,16 @@ #include "conv2d-transpose.cuh" #include "ggml.h" + +__device__ __forceinline__ int wrap_coord(int coord, int size) { + return (coord % size + size) % size; +} + __global__ void conv2d_transpose_kernel(const float * __restrict__ input, const half * __restrict__ kernel, float * __restrict__ output, const int in_w, const int in_h, const int out_w, const int out_h, const int kernel_w, const int kernel_h, const int stride, - const int c_in, const int c_out, const int batches) { + const int c_in, const int c_out, const int batches, + const int circular) { const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; const int total_elements = out_w * out_h * c_out * batches; @@ -22,28 +28,55 @@ __global__ void conv2d_transpose_kernel(const float * __restrict__ input, const float accumulator = 0; // For each output idx, find the inputs that contribute to it by checking stride alignment and bounds - - for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { - for (int kh = 0; kh < kernel_h; ++kh) { - int in_y = out_y_idx - kh; - if (in_y < 0 || in_y % stride) continue; - in_y /= stride; - if (in_y >= in_h) continue; - - for (int kw = 0; kw < kernel_w; ++kw) { - int in_x = out_x_idx - kw; - if (in_x < 0 || in_x % stride) continue; - in_x /= stride; - if (in_x >= in_w) continue; - - const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; - const int kernel_idx = - (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; - - float input_val = input[input_idx]; - half kern_val = kernel[kernel_idx]; - - accumulator += input_val * (float) kern_val; + if (circular == 0) { + for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { + for (int kh = 0; kh < kernel_h; ++kh) { + int in_y = out_y_idx - kh; + if (in_y < 0 || in_y % stride) continue; + in_y /= stride; + if (in_y >= in_h) continue; + + for (int kw = 0; kw < kernel_w; ++kw) { + int in_x = out_x_idx - kw; + if (in_x < 0 || in_x % stride) continue; + in_x /= stride; + if (in_x >= in_w) continue; + + const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; + const int kernel_idx = + (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; + + float input_val = input[input_idx]; + half kern_val = kernel[kernel_idx]; + + accumulator += input_val * (float) kern_val; + } + } + } + } + else { + for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { + for (int kh = 0; kh < kernel_h; ++kh) { + int in_y = out_y_idx - kh; + if (in_y % stride) continue; + in_y /= stride; + in_y = wrap_coord(in_y, in_h); + + for (int kw = 0; kw < kernel_w; ++kw) { + int in_x = out_x_idx - kw; + if (in_x % stride) continue; + in_x /= stride; + in_x = wrap_coord(in_x, in_w); + + const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; + const int kernel_idx = + (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; + + float input_val = input[input_idx]; + half kern_val = kernel[kernel_idx]; + + accumulator += input_val * (float) kern_val; + } } } } @@ -72,6 +105,7 @@ void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor const int kernel_h = kernel->ne[1]; const int stride = dst->op_params[0]; const int batches = input->ne[3]; + const int circular = dst->op_params[1]; GGML_ASSERT(channels_in == kernel->ne[3]); GGML_ASSERT(stride > 0); @@ -87,5 +121,5 @@ void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor conv2d_transpose_kernel<<>>( input_data, kernel_data, output_data, input_w, input_h, output_w, output_h, kernel_w, kernel_h, stride, - channels_in, channels_out, batches); + channels_in, channels_out, batches, circular); } diff --git a/ggml/src/ggml-cuda/conv2d.cu b/ggml/src/ggml-cuda/conv2d.cu index 142dd66903a..e9d78df2c88 100644 --- a/ggml/src/ggml-cuda/conv2d.cu +++ b/ggml/src/ggml-cuda/conv2d.cu @@ -11,6 +11,7 @@ struct conv_params { const int64_t IC, OC; const int64_t B; const int64_t TOTAL; + const int64_t CIRCULAR; }; struct kernel_bounds { @@ -26,12 +27,24 @@ __device__ __forceinline__ int64_t min64(int64_t a, int64_t b) { return (a < b) ? a : b; } +__device__ __forceinline__ int wrap_coord(int coord, int size) { + return (coord % size + size) % size; +} + __device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int64_t out_x, int64_t out_y, const conv_params & P) { kernel_bounds bounds; - bounds.y_min = max64(0, (P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); - bounds.y_max = min64(P.KH, (P.IH + P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); - bounds.x_min = max64(0, (P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); - bounds.x_max = min64(P.KW, (P.IW + P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); + if (P.CIRCULAR) { + bounds.y_min = 0; + bounds.y_max = P.KH; + bounds.x_min = 0; + bounds.x_max = P.KW; + } + else { + bounds.y_min = max64(0, (P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); + bounds.y_max = min64(P.KH, (P.IH + P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); + bounds.x_min = max64(0, (P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); + bounds.x_max = min64(P.KW, (P.IW + P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); + } return bounds; } @@ -84,19 +97,37 @@ static __global__ void conv2d_kernel(const float * __restrict__ input, Layout::unpack_indices(global_idx, P, n, c_out, out_y, out_x); float acc = 0.0f; + if (P.CIRCULAR == 0) { + for (int64_t c_in = 0; c_in < P.IC; ++c_in) { + kernel_bounds bounds = calculate_kernel_bounds(out_x, out_y, P); + + for (int64_t ky = bounds.y_min; ky < bounds.y_max; ++ky) { + const int64_t in_y = calculate_input_coord(out_y, ky, P.ST_Y, P.DL_Y, P.PD_Y); - for (int64_t c_in = 0; c_in < P.IC; ++c_in) { - kernel_bounds bounds = calculate_kernel_bounds(out_x, out_y, P); + for (int64_t kx = bounds.x_min; kx < bounds.x_max; ++kx) { + const int64_t in_x = calculate_input_coord(out_x, kx, P.ST_X, P.DL_X, P.PD_X); + + const float input_val = input[Layout::input_index(n, c_in, in_y, in_x, P)]; + const T kernel_val = kernel[Layout::kernel_index(c_out, c_in, ky, kx, P)]; + acc += (input_val * ggml_cuda_cast(kernel_val)); + } + } + } + } + else { + for (int64_t c_in = 0; c_in < P.IC; ++c_in) { + kernel_bounds bounds = calculate_kernel_bounds(out_x, out_y, P); - for (int64_t ky = bounds.y_min; ky < bounds.y_max; ++ky) { - const int64_t in_y = calculate_input_coord(out_y, ky, P.ST_Y, P.DL_Y, P.PD_Y); + for (int64_t ky = bounds.y_min; ky < bounds.y_max; ++ky) { + const int64_t in_y = wrap_coord(calculate_input_coord(out_y, ky, P.ST_Y, P.DL_Y, P.PD_Y), P.IH); - for (int64_t kx = bounds.x_min; kx < bounds.x_max; ++kx) { - const int64_t in_x = calculate_input_coord(out_x, kx, P.ST_X, P.DL_X, P.PD_X); + for (int64_t kx = bounds.x_min; kx < bounds.x_max; ++kx) { + const int64_t in_x = wrap_coord(calculate_input_coord(out_x, kx, P.ST_X, P.DL_X, P.PD_X), P.IW); - const float input_val = input[Layout::input_index(n, c_in, in_y, in_x, P)]; - const T kernel_val = kernel[Layout::kernel_index(c_out, c_in, ky, kx, P)]; - acc += (input_val * ggml_cuda_cast(kernel_val)); + const float input_val = input[Layout::input_index(n, c_in, in_y, in_x, P)]; + const T kernel_val = kernel[Layout::kernel_index(c_out, c_in, ky, kx, P)]; + acc += (input_val * ggml_cuda_cast(kernel_val)); + } } } } @@ -141,6 +172,7 @@ void ggml_cuda_op_conv2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int PD_Y = p[3]; // padding_y const int DL_X = p[4]; // dilation_x const int DL_Y = p[5]; // dilation_y + const int CIRCULAR = p[6]; // No cwhn GGML_ASSERT(p[6] == false); @@ -156,7 +188,7 @@ void ggml_cuda_op_conv2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int B = input->ne[3]; // n_batches const int64_t total = B * OC * OH * OW; - conv_params params = { IW, IH, OW, OH, KW, KH, ST_X, ST_Y, PD_X, PD_Y, DL_X, DL_Y, IC, OC, B, total }; + conv_params params = { IW, IH, OW, OH, KW, KH, ST_X, ST_Y, PD_X, PD_Y, DL_X, DL_Y, IC, OC, B, total, CIRCULAR }; if (kernel->type == GGML_TYPE_F16) { conv2d_cuda_f16(X_D, (half *) K_D, Y_D, params, st); diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 29aef33c1a4..f3f06897e42 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -1,9 +1,18 @@ +#include + #include "pad.cuh" + + +__device__ __forceinline__ int64_t wrap_coord(int64_t coord, int64_t size) { + return (coord % size + size) % size; +} + static __global__ void pad_f32(const float * src, float * dst, const int lp0, const int rp0, const int lp1, const int rp1, const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3) { + const int ne0, const int ne1, const int ne2, const int ne3, + const int circular) { // blockIdx.z: i3*ne2+i2 // blockIdx.y: i1 // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE @@ -12,39 +21,59 @@ static __global__ void pad_f32(const float * src, float * dst, int i1 = blockIdx.y; int i2 = blockIdx.z % ne2; int i3 = blockIdx.z / ne2; + if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { return; } - - // operation + const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; - if ((i0 >= lp0 && i0 < ne0 - rp0) && - (i1 >= lp1 && i1 < ne1 - rp1) && - (i2 >= lp2 && i2 < ne2 - rp2) && - (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t i00 = i0 - lp0; - const int64_t i01 = i1 - lp1; - const int64_t i02 = i2 - lp2; - const int64_t i03 = i3 - lp3; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne01 = ne1 - lp1 - rp1; + + if (circular == 0) { + // operation + if ((i0 >= lp0 && i0 < ne0 - rp0) && + (i1 >= lp1 && i1 < ne1 - rp1) && + (i2 >= lp2 && i2 < ne2 - rp2) && + (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t i00 = i0 - lp0; + const int64_t i01 = i1 - lp1; + const int64_t i02 = i2 - lp2; + const int64_t i03 = i3 - lp3; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne00 = ne0 - lp0 - rp0; + + const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } else { + dst[dst_idx] = 0.0f; + } + } + else { const int64_t ne00 = ne0 - lp0 - rp0; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne03 = ne3 - lp3 - rp3; + + const int64_t i00 = wrap_coord(i0 - lp0, ne00); + const int64_t i01 = wrap_coord(i1 - lp1, ne01); + const int64_t i02 = wrap_coord(i2 - lp2, ne02); + const int64_t i03 = wrap_coord(i3 - lp3, ne03); const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; dst[dst_idx] = src[src_idx]; - } else { - dst[dst_idx] = 0.0f; } } static void pad_f32_cuda(const float * src, float * dst, const int lp0, const int rp0, const int lp1, const int rp1, const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3, cudaStream_t stream) { + const int ne0, const int ne1, const int ne2, const int ne3, + const int circular, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2*ne3); - pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3); + pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); } void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -65,8 +94,9 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int32_t rp2 = ((const int32_t*)(dst->op_params))[5]; const int32_t lp3 = ((const int32_t*)(dst->op_params))[6]; const int32_t rp3 = ((const int32_t*)(dst->op_params))[7]; + const int32_t circular = ((const int32_t*)(dst->op_params))[8]; pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, - dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], stream); + dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], circular, stream); } From 60bed3b95c653fc8fdebd5c429f8d5d50a37bf0d Mon Sep 17 00:00:00 2001 From: bepis Date: Mon, 3 Nov 2025 15:03:52 -0800 Subject: [PATCH 04/39] Added tests --- tests/test-backend-ops.cpp | 463 +++++++++++++++++++++++++++++++++++-- 1 file changed, 444 insertions(+), 19 deletions(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 967a53c63d8..4e0546cb4d8 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4454,6 +4454,7 @@ struct test_conv_2d : public test_case { const int dilation1; // Whether the inputs are contiguous in the channel dim or the width dim const bool cwhn; + const bool circular; // If true, the direct CONV_2D will be used in the graph, otherwise it // uses ggml_conv_2d: @@ -4463,7 +4464,7 @@ struct test_conv_2d : public test_case { // IM2COL -> MUL_MM graph will be built. std::string vars() override { - return VARS_TO_STR10(ne_input, ne_kernel, type_kernel, stride0, stride1, padding0, padding1, dilation0, dilation1, cwhn); + return VARS_TO_STR11(ne_input, ne_kernel, type_kernel, stride0, stride1, padding0, padding1, dilation0, dilation1, cwhn, circular); } double max_nmse_err() override { @@ -4499,7 +4500,8 @@ struct test_conv_2d : public test_case { test_conv_2d(std::array ne_input = { 64, 64, 16, 1 }, std::array ne_kernel = { 3, 3, 1, 16 }, ggml_type type_kernel = GGML_TYPE_F32, int stride0 = 1, - int stride1 = 1, int padding0 = 0, int padding1 = 0, int dilation0 = 1, int dilation1 = 1, bool cwhn = false) : + int stride1 = 1, int padding0 = 0, int padding1 = 0, int dilation0 = 1, int dilation1 = 1, + bool cwhn = false, bool circular = false) : ne_input(ne_input), ne_kernel(ne_kernel), type_kernel(type_kernel), @@ -4509,7 +4511,8 @@ struct test_conv_2d : public test_case { padding1(padding1), dilation0(dilation0), dilation1(dilation1), - cwhn(cwhn) {} + cwhn(cwhn), + circular(circular) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); @@ -4527,8 +4530,58 @@ struct test_conv_2d : public test_case { kernel = ggml_permute(ctx, kernel, 3, 2, 0, 1); } - ggml_tensor * out = - ggml_conv_2d_direct(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); + ggml_tensor * out = circular + ? ggml_conv_2d_direct_circular(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1) + : ggml_conv_2d_direct(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); + ggml_set_name(out, "out"); + return out; + } +}; + +struct test_conv_2d_im2col : public test_case { + const std::array ne_input; + const std::array ne_kernel; + const ggml_type type_kernel; + const int stride0; + const int stride1; + const int padding0; + const int padding1; + const int dilation0; + const int dilation1; + const bool circular; + + std::string vars() override { + return VARS_TO_STR10(ne_input, ne_kernel, type_kernel, stride0, stride1, padding0, padding1, dilation0, dilation1, circular); + } + + test_conv_2d_im2col(std::array ne_input = { 32, 24, 8, 2 }, + std::array ne_kernel = { 3, 3, 8, 4 }, + ggml_type type_kernel = GGML_TYPE_F32, + int stride0 = 1, int stride1 = 1, + int padding0 = 0, int padding1 = 0, + int dilation0 = 1, int dilation1 = 1, + bool circular = false) + : ne_input(ne_input), + ne_kernel(ne_kernel), + type_kernel(type_kernel), + stride0(stride0), + stride1(stride1), + padding0(padding0), + padding1(padding1), + dilation0(dilation0), + dilation1(dilation1), + circular(circular) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); + ggml_set_name(input, "input"); + + ggml_tensor * kernel = ggml_new_tensor(ctx, type_kernel, 4, ne_kernel.data()); + ggml_set_name(kernel, "kernel"); + + ggml_tensor * out = circular + ? ggml_conv_2d_circular(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1) + : ggml_conv_2d(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); ggml_set_name(out, "out"); return out; } @@ -4542,15 +4595,16 @@ struct test_conv_2d_dw : public test_case { const int padding; const int dilation; const bool cwhn; + const bool circular; std::string vars() override { - return VARS_TO_STR6(ne_input, ne_kernel, stride, padding, dilation, cwhn); + return VARS_TO_STR7(ne_input, ne_kernel, stride, padding, dilation, cwhn, circular); } test_conv_2d_dw(std::array ne_input = {64, 64, 16, 1}, std::array ne_kernel = {3, 3, 1, 16}, - int stride = 1, int padding = 0, int dilation = 1, bool cwhn = false) - : ne_input(ne_input), ne_kernel(ne_kernel), stride(stride), padding(padding), dilation(dilation), cwhn(cwhn) {} + int stride = 1, int padding = 0, int dilation = 1, bool cwhn = false, bool circular = false) + : ne_input(ne_input), ne_kernel(ne_kernel), stride(stride), padding(padding), dilation(dilation), cwhn(cwhn), circular(circular) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); @@ -4568,14 +4622,270 @@ struct test_conv_2d_dw : public test_case { kernel = ggml_permute(ctx, kernel, 3, 2, 0, 1); } - ggml_tensor * out = ggml_conv_2d_dw_direct( - ctx, kernel, input, - stride, stride, padding, padding, dilation, dilation); + ggml_tensor * out = circular + ? ggml_conv_2d_dw_direct_circular(ctx, kernel, input, + stride, stride, padding, padding, dilation, dilation) + : ggml_conv_2d_dw_direct(ctx, kernel, input, + stride, stride, padding, padding, dilation, dilation); ggml_set_name(out, "out"); return out; } }; +struct test_conv_2d_direct_circular_manual : public test_case { + const std::array ne_input{5, 4, 1, 1}; + const std::array ne_kernel{3, 3, 1, 1}; + const int stride0 = 1; + const int stride1 = 1; + const int padding0 = 2; + const int padding1 = 1; + const int dilation0 = 1; + const int dilation1 = 1; + + ggml_tensor * input = nullptr; + ggml_tensor * kernel = nullptr; + ggml_tensor * expected = nullptr; + + std::string vars() override { + return "manual_conv2d_direct_circular"; + } + + ggml_tensor * build_graph(ggml_context * ctx) override { + input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); + kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data()); + ggml_set_name(input, "input"); + ggml_set_name(kernel, "kernel"); + + ggml_tensor * actual = ggml_conv_2d_direct_circular( + ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); + ggml_set_name(actual, "actual"); + + int64_t ne_out[4] = { + conv_out_size(ne_input[0], ne_kernel[0], stride0, padding0, dilation0), + conv_out_size(ne_input[1], ne_kernel[1], stride1, padding1, dilation1), + ne_kernel[3], + ne_input[3], + }; + + expected = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_out); + ggml_set_name(expected, "expected"); + + ggml_tensor * diff = ggml_sub(ctx, actual, expected); + ggml_tensor * sq = ggml_sqr(ctx, diff); + ggml_tensor * loss = ggml_sum(ctx, sq); + ggml_set_name(loss, "loss"); + return loss; + } + + void initialize_tensors(ggml_context * ctx) override { + test_case::initialize_tensors(ctx); + + std::vector input_data(ggml_nelements(input)); + for (size_t i = 0; i < input_data.size(); ++i) { + input_data[i] = static_cast(std::sin(static_cast(i + 1))); + } + ggml_backend_tensor_set(input, input_data.data(), 0, input_data.size() * sizeof(float)); + + std::vector kernel_data(ggml_nelements(kernel)); + for (size_t i = 0; i < kernel_data.size(); ++i) { + kernel_data[i] = static_cast(std::cos(static_cast(i + 1))); + } + ggml_backend_tensor_set(kernel, kernel_data.data(), 0, kernel_data.size() * sizeof(float)); + + int64_t ne_out[4] = { + conv_out_size(ne_input[0], ne_kernel[0], stride0, padding0, dilation0), + conv_out_size(ne_input[1], ne_kernel[1], stride1, padding1, dilation1), + ne_kernel[3], + ne_input[3], + }; + std::vector expected_data(ggml_nelements(expected), 0.0f); + + for (int64_t n = 0; n < ne_input[3]; ++n) { + for (int64_t oc = 0; oc < ne_kernel[3]; ++oc) { + for (int64_t oy = 0; oy < ne_out[1]; ++oy) { + for (int64_t ox = 0; ox < ne_out[0]; ++ox) { + float sum = 0.0f; + for (int64_t ic = 0; ic < ne_kernel[2]; ++ic) { + for (int64_t ky = 0; ky < ne_kernel[1]; ++ky) { + const int64_t in_y = wrap_coord_circular( + oy * stride1 + ky * dilation1 - padding1, ne_input[1]); + for (int64_t kx = 0; kx < ne_kernel[0]; ++kx) { + const int64_t in_x = wrap_coord_circular( + ox * stride0 + kx * dilation0 - padding0, ne_input[0]); + const int64_t src_idx = offset4d(ne_input.data(), in_x, in_y, ic, n); + const int64_t ker_idx = offset4d(ne_kernel.data(), kx, ky, ic, oc); + sum += input_data[src_idx] * kernel_data[ker_idx]; + } + } + } + expected_data[offset4d(ne_out, ox, oy, oc, n)] = sum; + } + } + } + } + + ggml_backend_tensor_set(expected, expected_data.data(), 0, expected_data.size() * sizeof(float)); + } + + double max_nmse_err() override { + return 1e-8; + } +}; + +struct test_conv_2d_dw_direct_circular_manual : public test_case { + const std::array ne_input{4, 3, 2, 1}; + const std::array ne_kernel{3, 2, 1, 2}; + const int stride = 1; + const int padding = 1; + const int dilation = 1; + + ggml_tensor * input = nullptr; + ggml_tensor * kernel = nullptr; + ggml_tensor * expected = nullptr; + + std::string vars() override { + return "manual_conv2d_dw_direct_circular"; + } + + ggml_tensor * build_graph(ggml_context * ctx) override { + input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); + kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data()); + ggml_set_name(input, "input"); + ggml_set_name(kernel, "kernel"); + + ggml_tensor * actual = ggml_conv_2d_dw_direct_circular( + ctx, kernel, input, stride, stride, padding, padding, dilation, dilation); + ggml_set_name(actual, "actual"); + + int64_t ne_out[4] = { + conv_out_size(ne_input[0], ne_kernel[0], stride, padding, dilation), + conv_out_size(ne_input[1], ne_kernel[1], stride, padding, dilation), + ne_input[2], + ne_input[3], + }; + expected = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_out); + ggml_set_name(expected, "expected"); + + ggml_tensor * diff = ggml_sub(ctx, actual, expected); + ggml_tensor * sq = ggml_sqr(ctx, diff); + ggml_tensor * loss = ggml_sum(ctx, sq); + ggml_set_name(loss, "loss"); + return loss; + } + + void initialize_tensors(ggml_context * ctx) override { + test_case::initialize_tensors(ctx); + + std::vector input_data(ggml_nelements(input)); + for (size_t i = 0; i < input_data.size(); ++i) { + input_data[i] = static_cast(i % 7); + } + ggml_backend_tensor_set(input, input_data.data(), 0, input_data.size() * sizeof(float)); + + std::vector kernel_data(ggml_nelements(kernel)); + for (size_t i = 0; i < kernel_data.size(); ++i) { + kernel_data[i] = static_cast((i % 5) - 2); + } + ggml_backend_tensor_set(kernel, kernel_data.data(), 0, kernel_data.size() * sizeof(float)); + + int64_t ne_out[4] = { + conv_out_size(ne_input[0], ne_kernel[0], stride, padding, dilation), + conv_out_size(ne_input[1], ne_kernel[1], stride, padding, dilation), + ne_input[2], + ne_input[3], + }; + + std::vector expected_data(ggml_nelements(expected), 0.0f); + for (int64_t n = 0; n < ne_input[3]; ++n) { + for (int64_t c = 0; c < ne_input[2]; ++c) { + for (int64_t oy = 0; oy < ne_out[1]; ++oy) { + for (int64_t ox = 0; ox < ne_out[0]; ++ox) { + float sum = 0.0f; + for (int64_t ky = 0; ky < ne_kernel[1]; ++ky) { + const int64_t in_y = wrap_coord_circular( + oy * stride + ky * dilation - padding, ne_input[1]); + for (int64_t kx = 0; kx < ne_kernel[0]; ++kx) { + const int64_t in_x = wrap_coord_circular( + ox * stride + kx * dilation - padding, ne_input[0]); + const int64_t src_idx = offset4d(ne_input.data(), in_x, in_y, c, n); + const int64_t ker_idx = offset4d(ne_kernel.data(), kx, ky, 0, c); + sum += input_data[src_idx] * kernel_data[ker_idx]; + } + } + expected_data[offset4d(ne_out, ox, oy, c, n)] = sum; + } + } + } + } + + ggml_backend_tensor_set(expected, expected_data.data(), 0, expected_data.size() * sizeof(float)); + } + + double max_nmse_err() override { + return 1e-8; + } +}; + +struct test_conv_2d_circular_pipeline : public test_case { + const std::array ne_input{6, 5, 3, 2}; + const std::array ne_kernel{3, 3, 3, 4}; + const int stride0 = 2; + const int stride1 = 1; + const int padding0 = 1; + const int padding1 = 2; + const int dilation0 = 1; + const int dilation1 = 2; + + ggml_tensor * input = nullptr; + ggml_tensor * kernel = nullptr; + + std::string vars() override { + return "conv2d_circular_vs_pipeline"; + } + + ggml_tensor * build_graph(ggml_context * ctx) override { + input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); + kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data()); + ggml_set_name(input, "input"); + ggml_set_name(kernel, "kernel"); + + ggml_tensor * actual = ggml_conv_2d_circular( + ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); + ggml_set_name(actual, "actual"); + + ggml_tensor * padded = ggml_pad_ext_circular(ctx, input, padding0, padding0, padding1, padding1, 0, 0, 0, 0); + ggml_set_name(padded, "padded"); + ggml_tensor * reference = ggml_conv_2d(ctx, kernel, padded, stride0, stride1, 0, 0, dilation0, dilation1); + ggml_set_name(reference, "reference"); + + ggml_tensor * diff = ggml_sub(ctx, actual, reference); + ggml_tensor * sq = ggml_sqr(ctx, diff); + ggml_tensor * loss = ggml_sum(ctx, sq); + ggml_set_name(loss, "loss"); + return loss; + } + + void initialize_tensors(ggml_context * ctx) override { + test_case::initialize_tensors(ctx); + + std::vector input_data(ggml_nelements(input)); + for (size_t i = 0; i < input_data.size(); ++i) { + input_data[i] = static_cast(std::fmod(static_cast(i * 3 + 1), 17.0)); + } + ggml_backend_tensor_set(input, input_data.data(), 0, input_data.size() * sizeof(float)); + + std::vector kernel_data(ggml_nelements(kernel)); + for (size_t i = 0; i < kernel_data.size(); ++i) { + kernel_data[i] = static_cast(std::fmod(static_cast(i * 7 + 3), 11.0) - 5.0); + } + ggml_backend_tensor_set(kernel, kernel_data.data(), 0, kernel_data.size() * sizeof(float)); + } + + double max_nmse_err() override { + return 1e-8; + } +}; + // GGML_OP_CONV_3D struct test_conv_3d : public test_case { // Logical 5D dimensions @@ -5288,26 +5598,43 @@ struct test_acc : public test_case { }; // GGML_OP_PAD +static inline int64_t wrap_coord_circular(int64_t coord, int64_t size) { + GGML_ASSERT(size > 0); + const int64_t mod = coord % size; + return mod < 0 ? mod + size : mod; +} + +static inline int64_t offset4d(const int64_t ne[4], int64_t i0, int64_t i1, int64_t i2, int64_t i3) { + return ((i3 * ne[2] + i2) * ne[1] + i1) * ne[0] + i0; +} + +static inline int64_t conv_out_size(int64_t ins, int64_t ks, int stride, int pad, int dilation) { + return (ins + 2 * pad - dilation * (ks - 1) - 1) / stride + 1; +} + struct test_pad : public test_case { const ggml_type type; const std::array ne_a; const int pad_0; const int pad_1; + const bool circular; std::string vars() override { - return VARS_TO_STR4(type, ne_a, pad_0, pad_1); + return VARS_TO_STR5(type, ne_a, pad_0, pad_1, circular); } test_pad(ggml_type type = GGML_TYPE_F32, std::array ne_a = {512, 512, 1, 1}, - int pad_0 = 1, int pad_1 = 1) - : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1) {} + int pad_0 = 1, int pad_1 = 1, bool circular = false) + : type(type), ne_a(ne_a), pad_0(pad_0), pad_1(pad_1), circular(circular) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data()); ggml_set_name(a, "a"); - ggml_tensor * out = ggml_pad(ctx, a, pad_0, pad_1, 0, 0); + ggml_tensor * out = circular + ? ggml_pad_circular(ctx, a, pad_0, pad_1, 0, 0) + : ggml_pad(ctx, a, pad_0, pad_1, 0, 0); ggml_set_name(out, "out"); return out; @@ -5326,17 +5653,19 @@ struct test_pad_ext : public test_case { const int lp3; const int rp3; const bool v; + const bool circular; std::string vars() override { - return VARS_TO_STR11(type, ne_a, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, v); + return VARS_TO_STR12(type, ne_a, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, v, circular); } test_pad_ext(ggml_type type = GGML_TYPE_F32, std::array ne_a = {512, 512, 3, 1}, int lp0 = 1, int rp0 = 1, int lp1 = 1, int rp1 = 1, int lp2 = 1, int rp2 = 1, int lp3 = 1, int rp3 = 1, - bool v = false) - : type(type), ne_a(ne_a), lp0(lp0), rp0(rp0), lp1(lp1), rp1(rp1), lp2(lp2), rp2(rp2), lp3(lp3), rp3(rp3), v(v) {} + bool v = false, bool circular = false) + : type(type), ne_a(ne_a), lp0(lp0), rp0(rp0), lp1(lp1), rp1(rp1), lp2(lp2), rp2(rp2), lp3(lp3), rp3(rp3), + v(v), circular(circular) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne_a.data()); @@ -5347,7 +5676,9 @@ struct test_pad_ext : public test_case { ggml_set_name(a, "view of a"); } - ggml_tensor * out = ggml_pad_ext(ctx, a, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3); + ggml_tensor * out = circular + ? ggml_pad_ext_circular(ctx, a, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3) + : ggml_pad_ext(ctx, a, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3); ggml_set_name(out, "out"); return out; @@ -5355,6 +5686,83 @@ struct test_pad_ext : public test_case { }; // GGML_OP_PAD_REFLECT_1D + +struct test_pad_ext_circular_manual : public test_case { + const std::array ne_src{4, 3, 1, 1}; + const std::array pads_l{1, 2, 0, 0}; + const std::array pads_r{2, 1, 0, 0}; + + ggml_tensor * input = nullptr; + ggml_tensor * expected = nullptr; + + std::string vars() override { + return "manual_pad_ext_circular"; + } + + ggml_tensor * build_graph(ggml_context * ctx) override { + input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_src.data()); + ggml_set_name(input, "input"); + + ggml_tensor * actual = ggml_pad_ext_circular(ctx, input, + pads_l[0], pads_r[0], pads_l[1], pads_r[1], pads_l[2], pads_r[2], pads_l[3], pads_r[3]); + ggml_set_name(actual, "actual"); + + int64_t ne_dst[4] = { + ne_src[0] + pads_l[0] + pads_r[0], + ne_src[1] + pads_l[1] + pads_r[1], + ne_src[2] + pads_l[2] + pads_r[2], + ne_src[3] + pads_l[3] + pads_r[3], + }; + + expected = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_dst); + ggml_set_name(expected, "expected"); + + ggml_tensor * diff = ggml_sub(ctx, actual, expected); + ggml_tensor * sq = ggml_sqr(ctx, diff); + ggml_tensor * loss = ggml_sum(ctx, sq); + ggml_set_name(loss, "loss"); + return loss; + } + + void initialize_tensors(ggml_context * ctx) override { + test_case::initialize_tensors(ctx); + + std::vector src_data(ggml_nelements(input)); + for (size_t i = 0; i < src_data.size(); ++i) { + src_data[i] = static_cast(i + 1); + } + ggml_backend_tensor_set(input, src_data.data(), 0, src_data.size() * sizeof(float)); + + int64_t ne_dst[4] = { + ne_src[0] + pads_l[0] + pads_r[0], + ne_src[1] + pads_l[1] + pads_r[1], + ne_src[2] + pads_l[2] + pads_r[2], + ne_src[3] + pads_l[3] + pads_r[3], + }; + + std::vector exp_data(ggml_nelements(expected)); + for (int64_t i3 = 0; i3 < ne_dst[3]; ++i3) { + for (int64_t i2 = 0; i2 < ne_dst[2]; ++i2) { + for (int64_t i1 = 0; i1 < ne_dst[1]; ++i1) { + for (int64_t i0 = 0; i0 < ne_dst[0]; ++i0) { + const int64_t src_i0 = wrap_coord_circular(i0 - pads_l[0], ne_src[0]); + const int64_t src_i1 = wrap_coord_circular(i1 - pads_l[1], ne_src[1]); + const int64_t src_i2 = wrap_coord_circular(i2 - pads_l[2], ne_src[2]); + const int64_t src_i3 = wrap_coord_circular(i3 - pads_l[3], ne_src[3]); + exp_data[offset4d(ne_dst, i0, i1, i2, i3)] = + src_data[offset4d(ne_src.data(), src_i0, src_i1, src_i2, src_i3)]; + } + } + } + } + ggml_backend_tensor_set(expected, exp_data.data(), 0, exp_data.size() * sizeof(float)); + } + + double max_nmse_err() override { + return 1e-8; + } +}; + struct test_pad_reflect_1d : public test_case { const ggml_type type; const std::array ne_a; @@ -6477,10 +6885,23 @@ static std::vector> make_test_cases_eval() { // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true)); // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true)); + test_cases.emplace_back(new test_conv_2d({37, 29, 3, 2}, {3, 2, 3, 5}, GGML_TYPE_F32, 1, 1, 4, 3, 1, 1, false, true)); + test_cases.emplace_back(new test_conv_2d({19, 23, 4, 1}, {5, 3, 4, 4}, GGML_TYPE_F16, 2, 1, 3, 2, 1, 2, false, true)); + test_cases.emplace_back(new test_conv_2d({16, 18, 6, 3}, {3, 3, 6, 8}, GGML_TYPE_F32, 1, 2, 2, 3, 1, 1, true, true)); + + test_cases.emplace_back(new test_conv_2d_im2col()); + test_cases.emplace_back(new test_conv_2d_im2col({17, 13, 6, 2}, {3, 3, 6, 4}, GGML_TYPE_F32, 1, 2, 2, 3, 1, 1, true)); + test_cases.emplace_back(new test_conv_2d_im2col({11, 7, 2, 1}, {3, 3, 2, 3}, GGML_TYPE_F16, 1, 1, 1, 1, 2, 1, true)); + test_cases.emplace_back(new test_conv_2d_direct_circular_manual()); + test_cases.emplace_back(new test_conv_2d_dw({17, 34, 9, 1}, {3, 3, 1, 9}, 1, 0, 1, false)); test_cases.emplace_back(new test_conv_2d_dw({17, 34, 9, 1}, {3, 3, 1, 9}, 1, 0, 1, true)); test_cases.emplace_back(new test_conv_2d_dw({32, 8, 64, 1}, {3, 3, 1, 64}, 2, 1, 1, false)); test_cases.emplace_back(new test_conv_2d_dw({32, 8, 64, 1}, {3, 3, 1, 64}, 2, 1, 1, true)); + test_cases.emplace_back(new test_conv_2d_dw({29, 19, 8, 2}, {5, 3, 1, 8}, 1, 2, 1, false, true)); + test_cases.emplace_back(new test_conv_2d_dw({24, 14, 16, 1}, {3, 3, 1, 16}, 2, 1, 2, true, true)); + test_cases.emplace_back(new test_conv_2d_dw_direct_circular_manual()); + test_cases.emplace_back(new test_conv_2d_circular_pipeline()); // CONV_3D auto calc_conv_output_size_3d = [](int64_t ins, int64_t ks, int s, int p, int d) -> int64_t { @@ -7214,7 +7635,10 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_group_norm_mul_add(GGML_TYPE_F32, {9, 9, 1280, 1})); test_cases.emplace_back(new test_acc()); test_cases.emplace_back(new test_pad()); + test_cases.emplace_back(new test_pad_ext_circular_manual()); + test_cases.emplace_back(new test_pad(GGML_TYPE_F32, {33, 17, 2, 1}, 4, 3, true)); test_cases.emplace_back(new test_pad_ext()); + test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {19, 11, 5, 2}, 2, 4, 1, 3, 0, 0, 0, 0, false, true)); test_cases.emplace_back(new test_pad_reflect_1d()); test_cases.emplace_back(new test_pad_reflect_1d(GGML_TYPE_F32, {3000, 384, 4, 1})); test_cases.emplace_back(new test_roll()); @@ -7225,6 +7649,7 @@ static std::vector> make_test_cases_eval() { for (bool v : {false, true}) { test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {512, 512, 1, 1}, 0, 1, 0, 1, 0, 0, 0, 0, v)); test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {11, 22, 33, 44}, 1, 2, 3, 4, 5, 6, 7, 8, v)); + test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {23, 17, 7, 3}, 2, 1, 3, 0, 1, 2, 0, 0, v, true)); } for (int hsk : { 40, 64, 72, 80, 96, 128, 192, 256, 576 }) { From 5700a4e7e92200a3119d36a4719390c787cc1831 Mon Sep 17 00:00:00 2001 From: bepis Date: Mon, 3 Nov 2025 15:37:57 -0800 Subject: [PATCH 05/39] Added tests --- tests/test-backend-ops.cpp | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 4e0546cb4d8..29b2a77b60e 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -4632,6 +4632,20 @@ struct test_conv_2d_dw : public test_case { } }; +static inline int64_t conv_out_size(int64_t ins, int64_t ks, int stride, int pad, int dilation) { + return (ins + 2 * pad - dilation * (ks - 1) - 1) / stride + 1; +} +// GGML_OP_PAD +static inline int64_t wrap_coord_circular(int64_t coord, int64_t size) { + GGML_ASSERT(size > 0); + const int64_t mod = coord % size; + return mod < 0 ? mod + size : mod; +} + +static inline int64_t offset4d(const int64_t ne[4], int64_t i0, int64_t i1, int64_t i2, int64_t i3) { + return ((i3 * ne[2] + i2) * ne[1] + i1) * ne[0] + i0; +} + struct test_conv_2d_direct_circular_manual : public test_case { const std::array ne_input{5, 4, 1, 1}; const std::array ne_kernel{3, 3, 1, 1}; @@ -5597,20 +5611,7 @@ struct test_acc : public test_case { } }; -// GGML_OP_PAD -static inline int64_t wrap_coord_circular(int64_t coord, int64_t size) { - GGML_ASSERT(size > 0); - const int64_t mod = coord % size; - return mod < 0 ? mod + size : mod; -} -static inline int64_t offset4d(const int64_t ne[4], int64_t i0, int64_t i1, int64_t i2, int64_t i3) { - return ((i3 * ne[2] + i2) * ne[1] + i1) * ne[0] + i0; -} - -static inline int64_t conv_out_size(int64_t ins, int64_t ks, int stride, int pad, int dilation) { - return (ins + 2 * pad - dilation * (ks - 1) - 1) / stride + 1; -} struct test_pad : public test_case { const ggml_type type; From 9861a3d0c480509819abf6a67bf6a9cb8a8a2239 Mon Sep 17 00:00:00 2001 From: bepis Date: Fri, 14 Nov 2025 16:35:13 -0800 Subject: [PATCH 06/39] Removed non-pad operations --- ggml/include/ggml.h | 61 +--- ggml/src/ggml-cpu/ops.cpp | 270 ++++++------------ ggml/src/ggml-cuda/conv2d-dw.cu | 71 ++--- ggml/src/ggml-cuda/conv2d-transpose.cu | 84 ++---- ggml/src/ggml-cuda/conv2d.cu | 62 +--- ggml/src/ggml-vulkan/ggml-vulkan.cpp | 10 - .../ggml-vulkan/vulkan-shaders/conv2d_dw.comp | 76 ++--- .../ggml-vulkan/vulkan-shaders/conv2d_mm.comp | 15 +- ggml/src/ggml.c | 95 +----- 9 files changed, 165 insertions(+), 579 deletions(-) diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index c215fc2bcad..1b714d99214 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -1943,18 +1943,6 @@ extern "C" { int d0, // dilation dimension 0 int d1); // dilation dimension 1 - - GGML_API struct ggml_tensor * ggml_conv_2d_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, // convolution kernel - struct ggml_tensor * b, // data - int s0, // stride dimension 0 - int s1, // stride dimension 1 - int p0, // padding dimension 0 - int p1, // padding dimension 1 - int d0, // dilation dimension 0 - int d1); // dilation dimension 1 - GGML_API struct ggml_tensor * ggml_im2col_3d( struct ggml_context * ctx, struct ggml_tensor * a, @@ -2028,19 +2016,6 @@ extern "C" { int d0, // dilation dimension 0 int d1); // dilation dimension 1 - - // depthwise (via im2col and mul_mat) - GGML_API struct ggml_tensor * ggml_conv_2d_dw_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, // convolution kernel - struct ggml_tensor * b, // data - int s0, // stride dimension 0 - int s1, // stride dimension 1 - int p0, // padding dimension 0 - int p1, // padding dimension 1 - int d0, // dilation dimension 0 - int d1); // dilation dimension 1 - // Depthwise 2D convolution // may be faster than ggml_conv_2d_dw, but not available in all backends // a: KW KH 1 C convolution kernel @@ -2057,35 +2032,12 @@ extern "C" { int dilation0, int dilation1); - // Depthwise 2D convolution (on a torus) - // may be faster than ggml_conv_2d_dw, but not available in all backends - // a: KW KH 1 C convolution kernel - // b: W H C N input data - // res: W_out H_out C N - GGML_API struct ggml_tensor * ggml_conv_2d_dw_direct_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int stride0, - int stride1, - int pad0, - int pad1, - int dilation0, - int dilation1); - GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0( struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b, int stride); - // circular (on a torus) - GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int stride); - GGML_API struct ggml_tensor * ggml_conv_2d_direct( struct ggml_context * ctx, struct ggml_tensor * a, // convolution kernel [KW, KH, IC, OC] @@ -2096,17 +2048,6 @@ extern "C" { int p1, // padding dimension 1 int d0, // dilation dimension 0 int d1); // dilation dimension 1 - - GGML_API struct ggml_tensor * ggml_conv_2d_direct_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, // convolution kernel [KW, KH, IC, OC] - struct ggml_tensor * b, // input data [W, H, C, N] - int s0, // stride dimension 0 - int s1, // stride dimension 1 - int p0, // padding dimension 0 - int p1, // padding dimension 1 - int d0, // dilation dimension 0 - int d1); // dilation dimension 1 GGML_API struct ggml_tensor * ggml_conv_3d_direct( struct ggml_context * ctx, @@ -2238,7 +2179,7 @@ extern "C" { int rp3 ); - // circular padding + // pad each dimension with values on the other side of the torus (looping around) GGML_API struct ggml_tensor * ggml_pad_ext_circular( struct ggml_context * ctx, struct ggml_tensor * a, diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 700d2f970ba..4058095b8ff 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -6432,6 +6432,7 @@ static inline int64_t ggml_wrap_coord(int64_t coord, int64_t size) { // ggml_compute_forward_conv_2d + static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, const ggml_tensor * kernel, // [KW, KH, IC, OC] const ggml_tensor * src, // [W, H, C, N] @@ -6450,7 +6451,6 @@ static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params const int32_t pad_y = dst->op_params[3]; const int32_t dilation_x = dst->op_params[4]; const int32_t dilation_y = dst->op_params[5]; - const bool circular = dst->op_params[6]; const int64_t c_in = src->ne[2]; const int64_t c_out = kernel->ne[3]; @@ -6491,73 +6491,40 @@ static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); //im2col for a patch - if (circular == 0) { - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t src_x = (p / dst_w) % dst_h; - const int64_t src_y = p % dst_w; - - const float * src_base = (const float *)((const char *)src_data + batch_n * src->nb[3]); - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t src_x = (p / dst_w) % dst_h; + const int64_t src_y = p % dst_w; - for (int64_t ic = 0; ic < c_in; ++ic) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; - const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; + const float * src_base = (const float *)((const char *)src_data + batch_n * src->nb[3]); + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; - int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; + for (int64_t ic = 0; ic < c_in; ++ic) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; + const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; - float src_val; - if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { - src_val = 0.0f; - } else { - const float * src_ptr = (const float *)((const char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]); - src_val = *src_ptr; - } + int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); - } + float src_val; + if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const float * src_ptr = (const float *)((const char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]); + src_val = *src_ptr; } - } - } - } // patches handled by this thread - } - else { - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t src_x = (p / dst_w) % dst_h; - const int64_t src_y = p % dst_w; - const float * src_base = (const float *)((const char *)src_data + batch_n * src->nb[3]); - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; - - for (int64_t ic = 0; ic < c_in; ++ic) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sy = ggml_wrap_coord(src_x * stride_y + ky * dilation_y - pad_y, src_h); - const int64_t sx = ggml_wrap_coord(src_y * stride_x + kx * dilation_x - pad_x, src_w); - - int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; - - const float * src_ptr = (const float *)((const char *)src_base + sx * src->nb[0] + sy * src->nb[1] + ic * src->nb[2]); - float src_val = *src_ptr; - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); - } + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); } } } - } // patches handled by this thread - } - + } + } // patches handled by this thread ggml_barrier(params->threadpool); @@ -6856,7 +6823,6 @@ struct ggml_conv_2d_dw_params { int pad_y; int dilation_x; int dilation_y; - int circular; }; static void ggml_compute_forward_conv_2d_dw_cwhn( @@ -6886,103 +6852,57 @@ static void ggml_compute_forward_conv_2d_dw_cwhn( const int64_t c_pkg_end = 0; #endif - const int64_t circular = p.circular; + for (int64_t row = row_start; row < row_end; ++row) { + const int64_t dst_y = row % p.dst_h; + const float * src_data = (const float *)src->data + (row / p.dst_h) * p.src_w * p.src_h * c; + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float * dst_data = (float *)dst->data + (row * p.dst_w + dst_x) * c; + const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; + const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; - if (circular == 0) { - for (int64_t row = row_start; row < row_end; ++row) { - const int64_t dst_y = row % p.dst_h; - const float * src_data = (const float *)src->data + (row / p.dst_h) * p.src_w * p.src_h * c; - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float * dst_data = (float *)dst->data + (row * p.dst_w + dst_x) * c; - const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; - const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; - - #ifdef GGML_SIMD - // Vectorized loop - for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); - GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); - sum = GGML_F32_VEC_FMA(sum, k, s); - } +#ifdef GGML_SIMD + // Vectorized loop + for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; } - GGML_F32_VEC_STORE(dst_data + c_i, sum); - } - #endif - // Scalar loop - for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { continue; } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] - * src_data[(src_y * p.src_w + src_x) * c + c_i]; - } + GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); + GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); + sum = GGML_F32_VEC_FMA(sum, k, s); } - dst_data[c_i] = sum; } + GGML_F32_VEC_STORE(dst_data + c_i, sum); } - } - } - else { - for (int64_t row = row_start; row < row_end; ++row) { - const int64_t dst_y = row % p.dst_h; - const float * src_data = (const float *)src->data + (row / p.dst_h) * p.src_w * p.src_h * c; - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float * dst_data = (float *)dst->data + (row * p.dst_w + dst_x) * c; - const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; - const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; - - #ifdef GGML_SIMD - // Vectorized loop - for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = ggml_wrap_coord(src_y_base + knl_y * p.dilation_y, p.src_h); - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = ggml_wrap_coord(src_x_base + knl_x * p.dilation_x, p.src_w); - GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); - GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); - sum = GGML_F32_VEC_FMA(sum, k, s); - } +#endif + // Scalar loop + for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; } - GGML_F32_VEC_STORE(dst_data + c_i, sum); - } - #endif - // Scalar loop - for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = ggml_wrap_coord(src_y_base + knl_y * p.dilation_y, p.src_h); - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = ggml_wrap_coord(src_x_base + knl_x * p.dilation_x, p.src_w); - sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] - * src_data[(src_y * p.src_w + src_x) * c + c_i]; + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; } + sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] + * src_data[(src_y * p.src_w + src_x) * c + c_i]; } - dst_data[c_i] = sum; } + dst_data[c_i] = sum; } } } - } static void ggml_compute_forward_conv_2d_dw_whcn( @@ -6997,57 +6917,30 @@ static void ggml_compute_forward_conv_2d_dw_whcn( const int64_t start = params->ith * per_thread; const int64_t end = MIN(start + per_thread, n); - const int64_t circular = p.circular; - - if (circular == 0) { - for (int64_t i = start; i < end; ++i) { - const float * knl_data = (const float *)kernel->data + (i % p.channels) * p.knl_w * p.knl_h; - const float * src_data = (const float *)src->data + i * p.src_w * p.src_h; - float * dst_data = (float *)dst->data + i * p.dst_w * p.dst_h; + for (int64_t i = start; i < end; ++i) { + const float * knl_data = (const float *)kernel->data + (i % p.channels) * p.knl_w * p.knl_h; + const float * src_data = (const float *)src->data + i * p.src_w * p.src_h; + float * dst_data = (float *)dst->data + i * p.dst_w * p.dst_h; - for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - sum += knl_data[knl_y * p.knl_w + knl_x] - * src_data[src_y * p.src_w + src_x]; - } + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; } - dst_data[dst_y * p.dst_w + dst_x] = sum; - } - } - } - } - else { - for (int64_t i = start; i < end; ++i) { - const float * knl_data = (const float *)kernel->data + (i % p.channels) * p.knl_w * p.knl_h; - const float * src_data = (const float *)src->data + i * p.src_w * p.src_h; - float * dst_data = (float *)dst->data + i * p.dst_w * p.dst_h; - - for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = ggml_wrap_coord(dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y, p.src_h); - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = ggml_wrap_coord(dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x, p.src_w); - sum += knl_data[knl_y * p.knl_w + knl_x] - * src_data[src_y * p.src_w + src_x]; + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; } + sum += knl_data[knl_y * p.knl_w + knl_x] + * src_data[src_y * p.src_w + src_x]; } - dst_data[dst_y * p.dst_w + dst_x] = sum; } + dst_data[dst_y * p.dst_w + dst_x] = sum; } } } @@ -7074,7 +6967,6 @@ void ggml_compute_forward_conv_2d_dw( p.pad_y = dst->op_params[3]; p.dilation_x = dst->op_params[4]; p.dilation_y = dst->op_params[5]; - p.circular = dst->op_params[6]; GGML_ASSERT(kernel->ne[3] == p.channels); GGML_ASSERT(dst->ne[3] == p.batch); diff --git a/ggml/src/ggml-cuda/conv2d-dw.cu b/ggml/src/ggml-cuda/conv2d-dw.cu index 255f131a6ae..7583233b1b7 100644 --- a/ggml/src/ggml-cuda/conv2d-dw.cu +++ b/ggml/src/ggml-cuda/conv2d-dw.cu @@ -8,7 +8,6 @@ struct conv_params { int padding_x, padding_y; int dilation_x, dilation_y; int channels, batches; - int circular; }; struct kernel_bounds { @@ -18,23 +17,14 @@ struct kernel_bounds { __device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int out_x, int out_y, const conv_params & params) { kernel_bounds bounds; - if (params.circular) { - bounds.y_min = 0; - bounds.y_max = params.kernel_h; - bounds.x_min = 0; - bounds.x_max = params.kernel_w; - } - else { - bounds.y_min = max(0, (params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); - bounds.y_max = - min(params.kernel_h, - (params.in_h + params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); - bounds.x_min = max(0, (params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); - bounds.x_max = - min(params.kernel_w, - (params.in_w + params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); - - } + bounds.y_min = max(0, (params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); + bounds.y_max = + min(params.kernel_h, + (params.in_h + params.padding_y - out_y * params.stride_y + params.dilation_y - 1) / params.dilation_y); + bounds.x_min = max(0, (params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); + bounds.x_max = + min(params.kernel_w, + (params.in_w + params.padding_x - out_x * params.stride_x + params.dilation_x - 1) / params.dilation_x); return bounds; } @@ -42,10 +32,6 @@ __device__ __forceinline__ int calculate_input_coord(int out_coord, int kern_coo return out_coord * stride + kern_coord * dilation - padding; } -__device__ __forceinline__ int wrap_coord(int coord, int size) { - return (coord % size + size) % size; -} - struct whcn_layout { __device__ static int input_index(int n, int c, int y, int x, const conv_params & params) { return n * (params.channels * params.in_w * params.in_h) + c * params.in_w * params.in_h + y * params.in_w + x; @@ -97,8 +83,7 @@ __global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restr const int in_w, const int in_h, const int out_w, const int out_h, const int kernel_w, const int kernel_h, const int stride_x, const int stride_y, const int padding_x, const int padding_y, const int dilation_x, const int dilation_y, - const int channels, const int batches, - const int circular) { + const int channels, const int batches) { const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; const int total_elements = batches * channels * out_h * out_w; @@ -107,7 +92,7 @@ __global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restr } conv_params params = { in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, - stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches, circular }; + stride_y, padding_x, padding_y, dilation_x, dilation_y, channels, batches }; int batch_idx, channel_idx, out_y_idx, out_x_idx; Layout::unpack_indices(global_idx, params, batch_idx, channel_idx, out_y_idx, out_x_idx); @@ -115,35 +100,18 @@ __global__ void conv2d_dw_kernel(const T * __restrict__ input, const T * __restr T accumulator = 0; kernel_bounds bounds = calculate_kernel_bounds(out_x_idx, out_y_idx, params); - if (params.circular == 0) { - for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { - int src_y_idx = calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y); - - for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { - int src_x_idx = calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x); - - const T input_val = input[Layout::input_index(batch_idx, channel_idx, src_y_idx, src_x_idx, params)]; - const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; - - accumulator += input_val * kernel_val; - } - } - } - else { - for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { - int in_y_idx = wrap_coord(calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y), params.in_h); + for (int kern_y = bounds.y_min; kern_y < bounds.y_max; ++kern_y) { + int in_y_idx = calculate_input_coord(out_y_idx, kern_y, params.stride_y, params.dilation_y, params.padding_y); - for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { - int in_x_idx = wrap_coord(calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x), params.in_w); + for (int kern_x = bounds.x_min; kern_x < bounds.x_max; ++kern_x) { + int in_x_idx = calculate_input_coord(out_x_idx, kern_x, params.stride_x, params.dilation_x, params.padding_x); - const T input_val = input[Layout::input_index(batch_idx, channel_idx, src_y_idx, src_x_idx, params)]; - const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; + const T input_val = input[Layout::input_index(batch_idx, channel_idx, in_y_idx, in_x_idx, params)]; + const T kernel_val = kernel[Layout::kernel_index(channel_idx, kern_y, kern_x, params)]; - accumulator += input_val * kernel_val; - } + accumulator += input_val * kernel_val; } } - output[Layout::output_index(batch_idx, channel_idx, out_y_idx, out_x_idx, params)] = accumulator; } @@ -164,7 +132,6 @@ void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) const int padding_y = p[3]; const int dilation_x = p[4]; const int dilation_y = p[5]; - const int circular = p[6]; const int in_w = input->ne[0]; const int in_h = input->ne[1]; @@ -183,11 +150,11 @@ void ggml_cuda_op_conv2d_dw(ggml_backend_cuda_context & ctx, ggml_tensor * dst) if (ggml_is_contiguous(input)) { conv2d_dw_kernel<<>>( x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, - dilation_x, dilation_y, channels, batches, circular); + dilation_x, dilation_y, channels, batches); } else if (ggml_is_contiguous_channels(input)) { conv2d_dw_kernel<<>>( x_d, w_d, y_d, in_w, in_h, out_w, out_h, kernel_w, kernel_h, stride_x, stride_y, padding_x, padding_y, - dilation_x, dilation_y, channels, batches, circular); + dilation_x, dilation_y, channels, batches); } else { GGML_ABORT("Unsupported memory layout for conv_2d_dw"); } diff --git a/ggml/src/ggml-cuda/conv2d-transpose.cu b/ggml/src/ggml-cuda/conv2d-transpose.cu index 253f1dc3613..eff6313f6b1 100644 --- a/ggml/src/ggml-cuda/conv2d-transpose.cu +++ b/ggml/src/ggml-cuda/conv2d-transpose.cu @@ -3,16 +3,10 @@ #include "conv2d-transpose.cuh" #include "ggml.h" - -__device__ __forceinline__ int wrap_coord(int coord, int size) { - return (coord % size + size) % size; -} - __global__ void conv2d_transpose_kernel(const float * __restrict__ input, const half * __restrict__ kernel, float * __restrict__ output, const int in_w, const int in_h, const int out_w, const int out_h, const int kernel_w, const int kernel_h, const int stride, - const int c_in, const int c_out, const int batches, - const int circular) { + const int c_in, const int c_out, const int batches) { const int global_idx = blockIdx.x * blockDim.x + threadIdx.x; const int total_elements = out_w * out_h * c_out * batches; @@ -28,55 +22,28 @@ __global__ void conv2d_transpose_kernel(const float * __restrict__ input, const float accumulator = 0; // For each output idx, find the inputs that contribute to it by checking stride alignment and bounds - if (circular == 0) { - for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { - for (int kh = 0; kh < kernel_h; ++kh) { - int in_y = out_y_idx - kh; - if (in_y < 0 || in_y % stride) continue; - in_y /= stride; - if (in_y >= in_h) continue; - - for (int kw = 0; kw < kernel_w; ++kw) { - int in_x = out_x_idx - kw; - if (in_x < 0 || in_x % stride) continue; - in_x /= stride; - if (in_x >= in_w) continue; - - const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; - const int kernel_idx = - (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; - - float input_val = input[input_idx]; - half kern_val = kernel[kernel_idx]; - - accumulator += input_val * (float) kern_val; - } - } - } - } - else { - for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { - for (int kh = 0; kh < kernel_h; ++kh) { - int in_y = out_y_idx - kh; - if (in_y % stride) continue; - in_y /= stride; - in_y = wrap_coord(in_y, in_h); - - for (int kw = 0; kw < kernel_w; ++kw) { - int in_x = out_x_idx - kw; - if (in_x % stride) continue; - in_x /= stride; - in_x = wrap_coord(in_x, in_w); - - const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; - const int kernel_idx = - (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; - - float input_val = input[input_idx]; - half kern_val = kernel[kernel_idx]; - - accumulator += input_val * (float) kern_val; - } + + for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) { + for (int kh = 0; kh < kernel_h; ++kh) { + int in_y = out_y_idx - kh; + if (in_y < 0 || in_y % stride) continue; + in_y /= stride; + if (in_y >= in_h) continue; + + for (int kw = 0; kw < kernel_w; ++kw) { + int in_x = out_x_idx - kw; + if (in_x < 0 || in_x % stride) continue; + in_x /= stride; + if (in_x >= in_w) continue; + + const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x; + const int kernel_idx = + (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw; + + float input_val = input[input_idx]; + half kern_val = kernel[kernel_idx]; + + accumulator += input_val * (float) kern_val; } } } @@ -105,7 +72,6 @@ void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor const int kernel_h = kernel->ne[1]; const int stride = dst->op_params[0]; const int batches = input->ne[3]; - const int circular = dst->op_params[1]; GGML_ASSERT(channels_in == kernel->ne[3]); GGML_ASSERT(stride > 0); @@ -121,5 +87,5 @@ void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor conv2d_transpose_kernel<<>>( input_data, kernel_data, output_data, input_w, input_h, output_w, output_h, kernel_w, kernel_h, stride, - channels_in, channels_out, batches, circular); -} + channels_in, channels_out, batches); +} \ No newline at end of file diff --git a/ggml/src/ggml-cuda/conv2d.cu b/ggml/src/ggml-cuda/conv2d.cu index e9d78df2c88..744712a0d57 100644 --- a/ggml/src/ggml-cuda/conv2d.cu +++ b/ggml/src/ggml-cuda/conv2d.cu @@ -11,7 +11,6 @@ struct conv_params { const int64_t IC, OC; const int64_t B; const int64_t TOTAL; - const int64_t CIRCULAR; }; struct kernel_bounds { @@ -27,24 +26,12 @@ __device__ __forceinline__ int64_t min64(int64_t a, int64_t b) { return (a < b) ? a : b; } -__device__ __forceinline__ int wrap_coord(int coord, int size) { - return (coord % size + size) % size; -} - __device__ __forceinline__ kernel_bounds calculate_kernel_bounds(int64_t out_x, int64_t out_y, const conv_params & P) { kernel_bounds bounds; - if (P.CIRCULAR) { - bounds.y_min = 0; - bounds.y_max = P.KH; - bounds.x_min = 0; - bounds.x_max = P.KW; - } - else { - bounds.y_min = max64(0, (P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); - bounds.y_max = min64(P.KH, (P.IH + P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); - bounds.x_min = max64(0, (P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); - bounds.x_max = min64(P.KW, (P.IW + P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); - } + bounds.y_min = max64(0, (P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); + bounds.y_max = min64(P.KH, (P.IH + P.PD_Y - out_y * P.ST_Y + P.DL_Y - 1) / P.DL_Y); + bounds.x_min = max64(0, (P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); + bounds.x_max = min64(P.KW, (P.IW + P.PD_X - out_x * P.ST_X + P.DL_X - 1) / P.DL_X); return bounds; } @@ -97,37 +84,19 @@ static __global__ void conv2d_kernel(const float * __restrict__ input, Layout::unpack_indices(global_idx, P, n, c_out, out_y, out_x); float acc = 0.0f; - if (P.CIRCULAR == 0) { - for (int64_t c_in = 0; c_in < P.IC; ++c_in) { - kernel_bounds bounds = calculate_kernel_bounds(out_x, out_y, P); - - for (int64_t ky = bounds.y_min; ky < bounds.y_max; ++ky) { - const int64_t in_y = calculate_input_coord(out_y, ky, P.ST_Y, P.DL_Y, P.PD_Y); - for (int64_t kx = bounds.x_min; kx < bounds.x_max; ++kx) { - const int64_t in_x = calculate_input_coord(out_x, kx, P.ST_X, P.DL_X, P.PD_X); + for (int64_t c_in = 0; c_in < P.IC; ++c_in) { + kernel_bounds bounds = calculate_kernel_bounds(out_x, out_y, P); - const float input_val = input[Layout::input_index(n, c_in, in_y, in_x, P)]; - const T kernel_val = kernel[Layout::kernel_index(c_out, c_in, ky, kx, P)]; - acc += (input_val * ggml_cuda_cast(kernel_val)); - } - } - } - } - else { - for (int64_t c_in = 0; c_in < P.IC; ++c_in) { - kernel_bounds bounds = calculate_kernel_bounds(out_x, out_y, P); + for (int64_t ky = bounds.y_min; ky < bounds.y_max; ++ky) { + const int64_t in_y = calculate_input_coord(out_y, ky, P.ST_Y, P.DL_Y, P.PD_Y); - for (int64_t ky = bounds.y_min; ky < bounds.y_max; ++ky) { - const int64_t in_y = wrap_coord(calculate_input_coord(out_y, ky, P.ST_Y, P.DL_Y, P.PD_Y), P.IH); + for (int64_t kx = bounds.x_min; kx < bounds.x_max; ++kx) { + const int64_t in_x = calculate_input_coord(out_x, kx, P.ST_X, P.DL_X, P.PD_X); - for (int64_t kx = bounds.x_min; kx < bounds.x_max; ++kx) { - const int64_t in_x = wrap_coord(calculate_input_coord(out_x, kx, P.ST_X, P.DL_X, P.PD_X), P.IW); - - const float input_val = input[Layout::input_index(n, c_in, in_y, in_x, P)]; - const T kernel_val = kernel[Layout::kernel_index(c_out, c_in, ky, kx, P)]; - acc += (input_val * ggml_cuda_cast(kernel_val)); - } + const float input_val = input[Layout::input_index(n, c_in, in_y, in_x, P)]; + const T kernel_val = kernel[Layout::kernel_index(c_out, c_in, ky, kx, P)]; + acc += (input_val * ggml_cuda_cast(kernel_val)); } } } @@ -172,7 +141,6 @@ void ggml_cuda_op_conv2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int PD_Y = p[3]; // padding_y const int DL_X = p[4]; // dilation_x const int DL_Y = p[5]; // dilation_y - const int CIRCULAR = p[6]; // No cwhn GGML_ASSERT(p[6] == false); @@ -188,11 +156,11 @@ void ggml_cuda_op_conv2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int B = input->ne[3]; // n_batches const int64_t total = B * OC * OH * OW; - conv_params params = { IW, IH, OW, OH, KW, KH, ST_X, ST_Y, PD_X, PD_Y, DL_X, DL_Y, IC, OC, B, total, CIRCULAR }; + conv_params params = { IW, IH, OW, OH, KW, KH, ST_X, ST_Y, PD_X, PD_Y, DL_X, DL_Y, IC, OC, B, total }; if (kernel->type == GGML_TYPE_F16) { conv2d_cuda_f16(X_D, (half *) K_D, Y_D, params, st); } else { conv2d_cuda_f32(X_D, K_D, Y_D, params, st); } -} +} \ No newline at end of file diff --git a/ggml/src/ggml-vulkan/ggml-vulkan.cpp b/ggml/src/ggml-vulkan/ggml-vulkan.cpp index ecd77aa9a98..41fa79283e3 100644 --- a/ggml/src/ggml-vulkan/ggml-vulkan.cpp +++ b/ggml/src/ggml-vulkan/ggml-vulkan.cpp @@ -1282,8 +1282,6 @@ struct vk_op_conv2d_push_constants { // init_fastdiv_values constants for dividing by OW, OW*OH uint32_t OWmp; uint32_t OWL; uint32_t OWOHmp; uint32_t OWOHL; - - uint32_t circular; }; template <> void init_pushconst_fastdiv(vk_op_conv2d_push_constants &p) { @@ -1326,8 +1324,6 @@ struct vk_op_conv_transpose_2d_push_constants { // init_fastdiv_values constants for dividing by OW, OW*OH uint32_t OWmp; uint32_t OWL; uint32_t OWOHmp; uint32_t OWOHL; - - uint32_t circular; }; template <> void init_pushconst_fastdiv(vk_op_conv_transpose_2d_push_constants &p) { @@ -1352,7 +1348,6 @@ struct vk_op_conv2d_dw_push_constants { int32_t pad_y; int32_t dilation_x; int32_t dilation_y; - uint32_t circular; }; struct vk_op_upscale_push_constants { @@ -10228,8 +10223,6 @@ static void ggml_vk_conv_2d(ggml_backend_vk_context * ctx, vk_context & subctx, p.nb2 = static_cast(nb2 / nb0); p.nb3 = static_cast(nb3 / nb0); - p.circular = static_cast(dst->op_params[6]); - GGML_ASSERT(ne03 == ne2); GGML_ASSERT(ne02 == ne12); @@ -10279,8 +10272,6 @@ static void ggml_vk_conv_transpose_2d(ggml_backend_vk_context * ctx, vk_context p.nb2 = static_cast(nb2 / nb0); p.nb3 = static_cast(nb3 / nb0); - p.circular = static_cast(dst->op_params[1]); - GGML_ASSERT(ne02 == ne2); GGML_ASSERT(ne03 == ne12); @@ -10304,7 +10295,6 @@ static void ggml_vk_conv_2d_dw(ggml_backend_vk_context * ctx, vk_context& subctx p.pad_y = dst->op_params[3]; p.dilation_x = dst->op_params[4]; p.dilation_y = dst->op_params[5]; - p.circular = dst->op_params[6]; GGML_ASSERT(src0->ne[3] == p.channels); GGML_ASSERT(src1->ne[3] == p.batches); diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp index 88bd1d7a755..70a301488eb 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_dw.comp @@ -19,7 +19,6 @@ layout (push_constant) uniform parameter int pad_y; int dilation_x; int dilation_y; - uint circular; } p; layout (binding = 0) readonly buffer A {A_TYPE knl_data[];}; @@ -28,10 +27,6 @@ layout (binding = 2) writeonly buffer D {D_TYPE dst_data[];}; layout(local_size_x = 512, local_size_y = 1, local_size_z = 1) in; -uint32_t wrap_coord(int coord, uint32_t size) { - return uint32_t((uint(coord + int(size))) % size); -} - FLOAT_TYPE conv_2d_dw_whcn(uint idx) { uint i0 = idx / p.dst_w; uint dst_x = idx - i0 * p.dst_w; @@ -44,35 +39,19 @@ FLOAT_TYPE conv_2d_dw_whcn(uint idx) { uint knl_i = c * p.knl_h * p.knl_w; FLOAT_TYPE sum = 0.0; - - if (p.circular != 0u) { - for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { - int raw_y = int(dst_y) * p.stride_y + int(knl_y) * p.dilation_y - p.pad_y; - uint src_y = wrap_coord(raw_y, p.src_h); - for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { - int raw_x = int(dst_x) * p.stride_x + int(knl_x) * p.dilation_x - p.pad_x; - uint src_x = wrap_coord(raw_x, p.src_w); - FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * p.src_w + src_x]); - FLOAT_TYPE k = FLOAT_TYPE(knl_data[knl_i + knl_y * p.knl_w + knl_x]); - sum = fma(v, k, sum); - } + for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { + uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int + continue; } - } - else { - for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { - uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int + for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { + uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int continue; } - for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { - uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int - continue; - } - FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * p.src_w + src_x]); - FLOAT_TYPE k = FLOAT_TYPE(knl_data[knl_i + knl_y * p.knl_w + knl_x]); - sum = fma(v, k, sum); - } + FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * p.src_w + src_x]); + FLOAT_TYPE k = FLOAT_TYPE(knl_data[knl_i + knl_y * p.knl_w + knl_x]); + sum = fma(v, k, sum); } } return sum; @@ -91,34 +70,19 @@ FLOAT_TYPE conv_2d_dw_cwhn(uint idx) { uint knl_row = p.knl_w * p.channels; FLOAT_TYPE sum = 0.0; - if (p.circular != 0u) { - for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { - int raw_y = int(dst_y) * p.stride_y + int(knl_y) * p.dilation_y - p.pad_y; - uint src_y = wrap_coord(raw_y, p.src_h); - for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { - int raw_x = int(dst_x) * p.stride_x + int(knl_x) * p.dilation_x - p.pad_x; - uint src_x = wrap_coord(raw_x, p.src_w); - FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * src_row + src_x * p.channels + c]); - FLOAT_TYPE k = FLOAT_TYPE(knl_data[ knl_y * knl_row + knl_x * p.channels + c]); - sum = fma(v, k, sum); - } + for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { + uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int + continue; } - } - else { - for (uint knl_y = 0; knl_y < p.knl_h; ++knl_y) { - uint src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y >= p.src_h) { // src_y < 0 will wrap to a large unsigned int + for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { + uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int continue; } - for (uint knl_x = 0; knl_x < p.knl_w; ++knl_x) { - uint src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x >= p.src_w) { // src_x < 0 will wrap to a large unsigned int - continue; - } - FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * src_row + src_x * p.channels + c]); - FLOAT_TYPE k = FLOAT_TYPE(knl_data[ knl_y * knl_row + knl_x * p.channels + c]); - sum = fma(v, k, sum); - } + FLOAT_TYPE v = FLOAT_TYPE(src_data[src_i + src_y * src_row + src_x * p.channels + c]); + FLOAT_TYPE k = FLOAT_TYPE(knl_data[ knl_y * knl_row + knl_x * p.channels + c]); + sum = fma(v, k, sum); } } return sum; diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp index b4c606f6389..e9bdbf7db5e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/conv2d_mm.comp @@ -64,8 +64,6 @@ layout(push_constant) uniform parameter { // fastdiv helper values uint32_t OWmp; uint32_t OWL; uint32_t OWOHmp; uint32_t OWOHL; - - uint32_t circular; } p; @@ -179,10 +177,6 @@ ACC_TYPE perElemOpStore(const in uint32_t r, const in uint32_t c, const in ACC_T } #endif -uint32_t wrap_coord(int coord, uint32_t size) { - return uint32_t((uint(coord + int(size))) % size); -} - void main() { #ifdef COOPMAT2 coopmat matC; @@ -283,8 +277,7 @@ void main() { KH_idx_b = CRS_remainder / KW; KW_idx_b = CRS_remainder % KW; #endif - uint32_t H_pos; - uint32_t W_pos; + #ifdef TRANSPOSE uint32_t H_idx_x_s1 = OH_idx - KH_idx_b * d1 + p1; uint32_t W_idx_x_s0 = OW_idx - KW_idx_b * d0 + p0; @@ -294,13 +287,11 @@ void main() { uint32_t H_idx = OH_idx * s1 + KH_idx_b * d1 - p1; uint32_t W_idx = OW_idx * s0 + KW_idx_b * d0 - p0; #endif - H_pos = (p.circular != 0) ? wrap_coord(int(H_idx), p.H) : H_idx; - W_pos = (p.circular != 0) ? wrap_coord(int(W_idx), p.W) : W_idx; uint32_t src_idx = - min(max(W_pos + H_pos * p.nb11 + Cin_idx_b * p.nb12 + N_idx * p.nb13, 0), p.Cin * p.N * p.W * p.H - 1); + min(max(W_idx + H_idx * p.nb11 + Cin_idx_b * p.nb12 + N_idx * p.nb13, 0), p.Cin * p.N * p.W * p.H - 1); float val = src_data[src_idx]; if (CRS_idx_b >= CRS || NPQ_idx >= NPQ - || H_pos >= p.H || W_pos >= p.W // Lower bound checks aren't necessary. (idx >= 0x80000000 for such case) + || H_idx >= p.H || W_idx >= p.W // Lower bound checks aren't necessary. (idx >= 0x80000000 for such case) #ifdef TRANSPOSE || (H_idx_x_s1 - H_idx * s1 != 0) || (W_idx_x_s0 - W_idx * s0 != 0) #endif diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 1113164f488..14729d304a7 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -4457,29 +4457,6 @@ struct ggml_tensor * ggml_conv_2d( return result; } - -// ggml_conv_2d_circular - -// a: [OC,IC, KH, KW] -// b: [N, IC, IH, IW] -// result: [N, OC, OH, OW] -struct ggml_tensor * ggml_conv_2d_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int s0, - int s1, - int p0, - int p1, - int d0, - int d1) { - if (p0 == 0 && p1 == 0) { - return ggml_conv_2d(ctx, a, b, s0, s1, p0, p1, d0, d1); - } - struct ggml_tensor * b_padded = ggml_pad_ext_circular(ctx, b, p0, p0, p1, p1, 0, 0, 0, 0); - return ggml_conv_2d(ctx, a, b_padded, s0, s1, 0, 0, d0, d1); -} - // a: [OC*IC, KD, KH, KW] // b: [N*IC, ID, IH, IW] // result: [N*OD, OH, OW, IC * KD * KH * KW] @@ -4608,25 +4585,6 @@ struct ggml_tensor * ggml_conv_2d_dw( return result; } -// ggml_conv_2d_dw_circular - -struct ggml_tensor * ggml_conv_2d_dw_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int s0, - int s1, - int p0, - int p1, - int d0, - int d1) { - if (p0 == 0 && p1 == 0) { - return ggml_conv_2d_dw(ctx, a, b, s0, s1, p0, p1, d0, d1); - } - struct ggml_tensor * b_padded = ggml_pad_ext_circular(ctx, b, p0, p0, p1, p1, 0, 0, 0, 0); - return ggml_conv_2d_dw(ctx, a, b_padded, s0, s1, 0, 0, d0, d1); -} - // ggml_conv_2d_dw_direct struct ggml_tensor * ggml_conv_2d_dw_direct( @@ -4658,9 +4616,7 @@ struct ggml_tensor * ggml_conv_2d_dw_direct( result->nb[2] = type_size; } - int circular = 0; // default not circular - - int32_t params[] = { stride0, stride1, pad0, pad1, dilation0, dilation1, circular }; + int32_t params[] = { stride0, stride1, pad0, pad1, dilation0, dilation1 }; ggml_set_op_params(result, params, sizeof(params)); result->op = GGML_OP_CONV_2D_DW; @@ -4669,24 +4625,6 @@ struct ggml_tensor * ggml_conv_2d_dw_direct( return result; } -// ggml_conv_2d_dw_direct_circular - -struct ggml_tensor * ggml_conv_2d_dw_direct_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int stride0, - int stride1, - int pad0, - int pad1, - int dilation0, - int dilation1) { - struct ggml_tensor * result = - ggml_conv_2d_dw_direct(ctx, a, b, stride0, stride1, pad0, pad1, dilation0, dilation1); - ggml_set_op_params_i32(result, 6, 1); // circular - return result; -} - // ggml_conv_2d_direct struct ggml_tensor * ggml_conv_2d_direct( @@ -4717,7 +4655,6 @@ struct ggml_tensor * ggml_conv_2d_direct( ggml_set_op_params_i32(result, 3, p1); ggml_set_op_params_i32(result, 4, d0); ggml_set_op_params_i32(result, 5, d1); - ggml_set_op_params_i32(result, 6, 0); // default not circularc result->op = GGML_OP_CONV_2D; result->src[0] = a; @@ -4726,23 +4663,6 @@ struct ggml_tensor * ggml_conv_2d_direct( return result; } -// ggml_conv_2d_direct_circular - -struct ggml_tensor * ggml_conv_2d_direct_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int s0, - int s1, - int p0, - int p1, - int d0, - int d1) { - struct ggml_tensor * result = ggml_conv_2d_direct(ctx, a, b, s0, s1, p0, p1, d0, d1); - ggml_set_op_params_i32(result, 6, 1); // circular - return result; -} - // ggml_conv_3d_direct struct ggml_tensor * ggml_conv_3d_direct( @@ -4815,7 +4735,6 @@ struct ggml_tensor * ggml_conv_transpose_2d_p0( struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne); ggml_set_op_params_i32(result, 0, stride); - ggml_set_op_params_i32(result, 1, 0); // circular default off result->op = GGML_OP_CONV_TRANSPOSE_2D; result->src[0] = a; @@ -4824,18 +4743,6 @@ struct ggml_tensor * ggml_conv_transpose_2d_p0( return result; } -// ggml_conv_transpose_2d_p0_circular - -struct ggml_tensor * ggml_conv_transpose_2d_p0_circular( - struct ggml_context * ctx, - struct ggml_tensor * a, - struct ggml_tensor * b, - int stride) { - struct ggml_tensor * result = ggml_conv_transpose_2d_p0(ctx, a, b, stride); - ggml_set_op_params_i32(result, 1, 1); // circular enabled - return result; -} - // ggml_pool_* static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) { From 38f872435d2e894943acb74ae7fbace65f415216 Mon Sep 17 00:00:00 2001 From: bepis Date: Fri, 14 Nov 2025 16:40:21 -0800 Subject: [PATCH 07/39] Removed unneded changes --- ggml/src/ggml-cuda/conv2d-transpose.cu | 2 +- ggml/src/ggml-cuda/conv2d.cu | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/conv2d-transpose.cu b/ggml/src/ggml-cuda/conv2d-transpose.cu index eff6313f6b1..03224e404d3 100644 --- a/ggml/src/ggml-cuda/conv2d-transpose.cu +++ b/ggml/src/ggml-cuda/conv2d-transpose.cu @@ -88,4 +88,4 @@ void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor conv2d_transpose_kernel<<>>( input_data, kernel_data, output_data, input_w, input_h, output_w, output_h, kernel_w, kernel_h, stride, channels_in, channels_out, batches); -} \ No newline at end of file +} diff --git a/ggml/src/ggml-cuda/conv2d.cu b/ggml/src/ggml-cuda/conv2d.cu index 744712a0d57..142dd66903a 100644 --- a/ggml/src/ggml-cuda/conv2d.cu +++ b/ggml/src/ggml-cuda/conv2d.cu @@ -163,4 +163,4 @@ void ggml_cuda_op_conv2d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { } else { conv2d_cuda_f32(X_D, K_D, Y_D, params, st); } -} \ No newline at end of file +} From d4a664bffbfe3e4654dce02691f80abc47c1fde1 Mon Sep 17 00:00:00 2001 From: bepis Date: Fri, 14 Nov 2025 16:49:14 -0800 Subject: [PATCH 08/39] removed backend non pad tests --- tests/test-backend-ops.cpp | 606 +++++++++++++++---------------------- 1 file changed, 248 insertions(+), 358 deletions(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 54f0bf00bb5..b702992d83b 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -175,6 +175,38 @@ static void init_tensor_kq_mask(ggml_tensor * tensor, float min = -1.0f, float m ggml_backend_tensor_set(tensor, data_f16.data(), 0, data_f16.size()*sizeof(ggml_fp16_t)); } +// generate a lower triangular matrix +static void init_tensor_tril(ggml_tensor * tensor, float min = -1.0f, float max = 1.0f) { + GGML_ASSERT(tensor->type == GGML_TYPE_F32); + GGML_ASSERT(tensor->ne[0] == tensor->ne[1]); + + GGML_TENSOR_LOCALS(int32_t, ne, tensor, ne); + GGML_TENSOR_LOCALS(size_t, nb, tensor, nb); + + std::vector data_f32(ne0*ne1*ne2*ne3); + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_real_distribution dis(min, max); + + for (int64_t i3 = 0; i3 < ne3; i3++) { + for (int64_t i2 = 0; i2 < ne2; i2++) { + for (int64_t i1 = 0; i1 < ne1; i1++) { + for (int64_t i0 = 0; i0 < ne0; i0++) { + int64_t idx = (i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3) / sizeof(float); + if (i0 <= i1) { + data_f32[idx] = dis(gen); + } else { + data_f32[idx] = 0.0f; + } + } + } + } + } + + ggml_backend_tensor_set(tensor, data_f32.data(), 0, ggml_nbytes(tensor)); +} + static std::vector tensor_to_float(const ggml_tensor * t) { std::vector tv; tv.reserve(ggml_nelements(t)); @@ -1804,7 +1836,8 @@ struct test_unary : public test_case { ggml_tensor * build_graph(ggml_context * ctx) override { const bool grad_supported = op == GGML_UNARY_OP_ABS || op == GGML_UNARY_OP_SGN || op == GGML_UNARY_OP_NEG || - op == GGML_UNARY_OP_STEP || op == GGML_UNARY_OP_RELU || op == GGML_UNARY_OP_SILU; + op == GGML_UNARY_OP_STEP || op == GGML_UNARY_OP_RELU || op == GGML_UNARY_OP_SILU || + op == GGML_UNARY_OP_EXPM1 || op == GGML_UNARY_OP_SOFTPLUS; ggml_tensor * a; if (v & 1) { @@ -2779,7 +2812,7 @@ struct test_bin_bcast : public test_case { const std::array nr; int nf; // number of fused ops, nf == 1 -> single op (no fusion) - bool run_whole_graph() override { return true; } + bool run_whole_graph() override { return nf > 1; } std::string vars() override { return VARS_TO_STR4(type, ne, nr, nf); @@ -4602,7 +4635,6 @@ struct test_conv_2d : public test_case { const int dilation1; // Whether the inputs are contiguous in the channel dim or the width dim const bool cwhn; - const bool circular; // If true, the direct CONV_2D will be used in the graph, otherwise it // uses ggml_conv_2d: @@ -4612,7 +4644,7 @@ struct test_conv_2d : public test_case { // IM2COL -> MUL_MM graph will be built. std::string vars() override { - return VARS_TO_STR11(ne_input, ne_kernel, type_kernel, stride0, stride1, padding0, padding1, dilation0, dilation1, cwhn, circular); + return VARS_TO_STR10(ne_input, ne_kernel, type_kernel, stride0, stride1, padding0, padding1, dilation0, dilation1, cwhn); } double max_nmse_err() override { @@ -4648,8 +4680,7 @@ struct test_conv_2d : public test_case { test_conv_2d(std::array ne_input = { 64, 64, 16, 1 }, std::array ne_kernel = { 3, 3, 1, 16 }, ggml_type type_kernel = GGML_TYPE_F32, int stride0 = 1, - int stride1 = 1, int padding0 = 0, int padding1 = 0, int dilation0 = 1, int dilation1 = 1, - bool cwhn = false, bool circular = false) : + int stride1 = 1, int padding0 = 0, int padding1 = 0, int dilation0 = 1, int dilation1 = 1, bool cwhn = false) : ne_input(ne_input), ne_kernel(ne_kernel), type_kernel(type_kernel), @@ -4659,8 +4690,7 @@ struct test_conv_2d : public test_case { padding1(padding1), dilation0(dilation0), dilation1(dilation1), - cwhn(cwhn), - circular(circular) {} + cwhn(cwhn) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); @@ -4678,58 +4708,8 @@ struct test_conv_2d : public test_case { kernel = ggml_permute(ctx, kernel, 3, 2, 0, 1); } - ggml_tensor * out = circular - ? ggml_conv_2d_direct_circular(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1) - : ggml_conv_2d_direct(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); - ggml_set_name(out, "out"); - return out; - } -}; - -struct test_conv_2d_im2col : public test_case { - const std::array ne_input; - const std::array ne_kernel; - const ggml_type type_kernel; - const int stride0; - const int stride1; - const int padding0; - const int padding1; - const int dilation0; - const int dilation1; - const bool circular; - - std::string vars() override { - return VARS_TO_STR10(ne_input, ne_kernel, type_kernel, stride0, stride1, padding0, padding1, dilation0, dilation1, circular); - } - - test_conv_2d_im2col(std::array ne_input = { 32, 24, 8, 2 }, - std::array ne_kernel = { 3, 3, 8, 4 }, - ggml_type type_kernel = GGML_TYPE_F32, - int stride0 = 1, int stride1 = 1, - int padding0 = 0, int padding1 = 0, - int dilation0 = 1, int dilation1 = 1, - bool circular = false) - : ne_input(ne_input), - ne_kernel(ne_kernel), - type_kernel(type_kernel), - stride0(stride0), - stride1(stride1), - padding0(padding0), - padding1(padding1), - dilation0(dilation0), - dilation1(dilation1), - circular(circular) {} - - ggml_tensor * build_graph(ggml_context * ctx) override { - ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); - ggml_set_name(input, "input"); - - ggml_tensor * kernel = ggml_new_tensor(ctx, type_kernel, 4, ne_kernel.data()); - ggml_set_name(kernel, "kernel"); - - ggml_tensor * out = circular - ? ggml_conv_2d_circular(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1) - : ggml_conv_2d(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); + ggml_tensor * out = + ggml_conv_2d_direct(ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); ggml_set_name(out, "out"); return out; } @@ -4743,16 +4723,15 @@ struct test_conv_2d_dw : public test_case { const int padding; const int dilation; const bool cwhn; - const bool circular; std::string vars() override { - return VARS_TO_STR7(ne_input, ne_kernel, stride, padding, dilation, cwhn, circular); + return VARS_TO_STR6(ne_input, ne_kernel, stride, padding, dilation, cwhn); } test_conv_2d_dw(std::array ne_input = {64, 64, 16, 1}, std::array ne_kernel = {3, 3, 1, 16}, - int stride = 1, int padding = 0, int dilation = 1, bool cwhn = false, bool circular = false) - : ne_input(ne_input), ne_kernel(ne_kernel), stride(stride), padding(padding), dilation(dilation), cwhn(cwhn), circular(circular) {} + int stride = 1, int padding = 0, int dilation = 1, bool cwhn = false) + : ne_input(ne_input), ne_kernel(ne_kernel), stride(stride), padding(padding), dilation(dilation), cwhn(cwhn) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); @@ -4770,284 +4749,14 @@ struct test_conv_2d_dw : public test_case { kernel = ggml_permute(ctx, kernel, 3, 2, 0, 1); } - ggml_tensor * out = circular - ? ggml_conv_2d_dw_direct_circular(ctx, kernel, input, - stride, stride, padding, padding, dilation, dilation) - : ggml_conv_2d_dw_direct(ctx, kernel, input, - stride, stride, padding, padding, dilation, dilation); + ggml_tensor * out = ggml_conv_2d_dw_direct( + ctx, kernel, input, + stride, stride, padding, padding, dilation, dilation); ggml_set_name(out, "out"); return out; } }; -static inline int64_t conv_out_size(int64_t ins, int64_t ks, int stride, int pad, int dilation) { - return (ins + 2 * pad - dilation * (ks - 1) - 1) / stride + 1; -} -// GGML_OP_PAD -static inline int64_t wrap_coord_circular(int64_t coord, int64_t size) { - GGML_ASSERT(size > 0); - const int64_t mod = coord % size; - return mod < 0 ? mod + size : mod; -} - -static inline int64_t offset4d(const int64_t ne[4], int64_t i0, int64_t i1, int64_t i2, int64_t i3) { - return ((i3 * ne[2] + i2) * ne[1] + i1) * ne[0] + i0; -} - -struct test_conv_2d_direct_circular_manual : public test_case { - const std::array ne_input{5, 4, 1, 1}; - const std::array ne_kernel{3, 3, 1, 1}; - const int stride0 = 1; - const int stride1 = 1; - const int padding0 = 2; - const int padding1 = 1; - const int dilation0 = 1; - const int dilation1 = 1; - - ggml_tensor * input = nullptr; - ggml_tensor * kernel = nullptr; - ggml_tensor * expected = nullptr; - - std::string vars() override { - return "manual_conv2d_direct_circular"; - } - - ggml_tensor * build_graph(ggml_context * ctx) override { - input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); - kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data()); - ggml_set_name(input, "input"); - ggml_set_name(kernel, "kernel"); - - ggml_tensor * actual = ggml_conv_2d_direct_circular( - ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); - ggml_set_name(actual, "actual"); - - int64_t ne_out[4] = { - conv_out_size(ne_input[0], ne_kernel[0], stride0, padding0, dilation0), - conv_out_size(ne_input[1], ne_kernel[1], stride1, padding1, dilation1), - ne_kernel[3], - ne_input[3], - }; - - expected = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_out); - ggml_set_name(expected, "expected"); - - ggml_tensor * diff = ggml_sub(ctx, actual, expected); - ggml_tensor * sq = ggml_sqr(ctx, diff); - ggml_tensor * loss = ggml_sum(ctx, sq); - ggml_set_name(loss, "loss"); - return loss; - } - - void initialize_tensors(ggml_context * ctx) override { - test_case::initialize_tensors(ctx); - - std::vector input_data(ggml_nelements(input)); - for (size_t i = 0; i < input_data.size(); ++i) { - input_data[i] = static_cast(std::sin(static_cast(i + 1))); - } - ggml_backend_tensor_set(input, input_data.data(), 0, input_data.size() * sizeof(float)); - - std::vector kernel_data(ggml_nelements(kernel)); - for (size_t i = 0; i < kernel_data.size(); ++i) { - kernel_data[i] = static_cast(std::cos(static_cast(i + 1))); - } - ggml_backend_tensor_set(kernel, kernel_data.data(), 0, kernel_data.size() * sizeof(float)); - - int64_t ne_out[4] = { - conv_out_size(ne_input[0], ne_kernel[0], stride0, padding0, dilation0), - conv_out_size(ne_input[1], ne_kernel[1], stride1, padding1, dilation1), - ne_kernel[3], - ne_input[3], - }; - std::vector expected_data(ggml_nelements(expected), 0.0f); - - for (int64_t n = 0; n < ne_input[3]; ++n) { - for (int64_t oc = 0; oc < ne_kernel[3]; ++oc) { - for (int64_t oy = 0; oy < ne_out[1]; ++oy) { - for (int64_t ox = 0; ox < ne_out[0]; ++ox) { - float sum = 0.0f; - for (int64_t ic = 0; ic < ne_kernel[2]; ++ic) { - for (int64_t ky = 0; ky < ne_kernel[1]; ++ky) { - const int64_t in_y = wrap_coord_circular( - oy * stride1 + ky * dilation1 - padding1, ne_input[1]); - for (int64_t kx = 0; kx < ne_kernel[0]; ++kx) { - const int64_t in_x = wrap_coord_circular( - ox * stride0 + kx * dilation0 - padding0, ne_input[0]); - const int64_t src_idx = offset4d(ne_input.data(), in_x, in_y, ic, n); - const int64_t ker_idx = offset4d(ne_kernel.data(), kx, ky, ic, oc); - sum += input_data[src_idx] * kernel_data[ker_idx]; - } - } - } - expected_data[offset4d(ne_out, ox, oy, oc, n)] = sum; - } - } - } - } - - ggml_backend_tensor_set(expected, expected_data.data(), 0, expected_data.size() * sizeof(float)); - } - - double max_nmse_err() override { - return 1e-8; - } -}; - -struct test_conv_2d_dw_direct_circular_manual : public test_case { - const std::array ne_input{4, 3, 2, 1}; - const std::array ne_kernel{3, 2, 1, 2}; - const int stride = 1; - const int padding = 1; - const int dilation = 1; - - ggml_tensor * input = nullptr; - ggml_tensor * kernel = nullptr; - ggml_tensor * expected = nullptr; - - std::string vars() override { - return "manual_conv2d_dw_direct_circular"; - } - - ggml_tensor * build_graph(ggml_context * ctx) override { - input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); - kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data()); - ggml_set_name(input, "input"); - ggml_set_name(kernel, "kernel"); - - ggml_tensor * actual = ggml_conv_2d_dw_direct_circular( - ctx, kernel, input, stride, stride, padding, padding, dilation, dilation); - ggml_set_name(actual, "actual"); - - int64_t ne_out[4] = { - conv_out_size(ne_input[0], ne_kernel[0], stride, padding, dilation), - conv_out_size(ne_input[1], ne_kernel[1], stride, padding, dilation), - ne_input[2], - ne_input[3], - }; - expected = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_out); - ggml_set_name(expected, "expected"); - - ggml_tensor * diff = ggml_sub(ctx, actual, expected); - ggml_tensor * sq = ggml_sqr(ctx, diff); - ggml_tensor * loss = ggml_sum(ctx, sq); - ggml_set_name(loss, "loss"); - return loss; - } - - void initialize_tensors(ggml_context * ctx) override { - test_case::initialize_tensors(ctx); - - std::vector input_data(ggml_nelements(input)); - for (size_t i = 0; i < input_data.size(); ++i) { - input_data[i] = static_cast(i % 7); - } - ggml_backend_tensor_set(input, input_data.data(), 0, input_data.size() * sizeof(float)); - - std::vector kernel_data(ggml_nelements(kernel)); - for (size_t i = 0; i < kernel_data.size(); ++i) { - kernel_data[i] = static_cast((i % 5) - 2); - } - ggml_backend_tensor_set(kernel, kernel_data.data(), 0, kernel_data.size() * sizeof(float)); - - int64_t ne_out[4] = { - conv_out_size(ne_input[0], ne_kernel[0], stride, padding, dilation), - conv_out_size(ne_input[1], ne_kernel[1], stride, padding, dilation), - ne_input[2], - ne_input[3], - }; - - std::vector expected_data(ggml_nelements(expected), 0.0f); - for (int64_t n = 0; n < ne_input[3]; ++n) { - for (int64_t c = 0; c < ne_input[2]; ++c) { - for (int64_t oy = 0; oy < ne_out[1]; ++oy) { - for (int64_t ox = 0; ox < ne_out[0]; ++ox) { - float sum = 0.0f; - for (int64_t ky = 0; ky < ne_kernel[1]; ++ky) { - const int64_t in_y = wrap_coord_circular( - oy * stride + ky * dilation - padding, ne_input[1]); - for (int64_t kx = 0; kx < ne_kernel[0]; ++kx) { - const int64_t in_x = wrap_coord_circular( - ox * stride + kx * dilation - padding, ne_input[0]); - const int64_t src_idx = offset4d(ne_input.data(), in_x, in_y, c, n); - const int64_t ker_idx = offset4d(ne_kernel.data(), kx, ky, 0, c); - sum += input_data[src_idx] * kernel_data[ker_idx]; - } - } - expected_data[offset4d(ne_out, ox, oy, c, n)] = sum; - } - } - } - } - - ggml_backend_tensor_set(expected, expected_data.data(), 0, expected_data.size() * sizeof(float)); - } - - double max_nmse_err() override { - return 1e-8; - } -}; - -struct test_conv_2d_circular_pipeline : public test_case { - const std::array ne_input{6, 5, 3, 2}; - const std::array ne_kernel{3, 3, 3, 4}; - const int stride0 = 2; - const int stride1 = 1; - const int padding0 = 1; - const int padding1 = 2; - const int dilation0 = 1; - const int dilation1 = 2; - - ggml_tensor * input = nullptr; - ggml_tensor * kernel = nullptr; - - std::string vars() override { - return "conv2d_circular_vs_pipeline"; - } - - ggml_tensor * build_graph(ggml_context * ctx) override { - input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data()); - kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data()); - ggml_set_name(input, "input"); - ggml_set_name(kernel, "kernel"); - - ggml_tensor * actual = ggml_conv_2d_circular( - ctx, kernel, input, stride0, stride1, padding0, padding1, dilation0, dilation1); - ggml_set_name(actual, "actual"); - - ggml_tensor * padded = ggml_pad_ext_circular(ctx, input, padding0, padding0, padding1, padding1, 0, 0, 0, 0); - ggml_set_name(padded, "padded"); - ggml_tensor * reference = ggml_conv_2d(ctx, kernel, padded, stride0, stride1, 0, 0, dilation0, dilation1); - ggml_set_name(reference, "reference"); - - ggml_tensor * diff = ggml_sub(ctx, actual, reference); - ggml_tensor * sq = ggml_sqr(ctx, diff); - ggml_tensor * loss = ggml_sum(ctx, sq); - ggml_set_name(loss, "loss"); - return loss; - } - - void initialize_tensors(ggml_context * ctx) override { - test_case::initialize_tensors(ctx); - - std::vector input_data(ggml_nelements(input)); - for (size_t i = 0; i < input_data.size(); ++i) { - input_data[i] = static_cast(std::fmod(static_cast(i * 3 + 1), 17.0)); - } - ggml_backend_tensor_set(input, input_data.data(), 0, input_data.size() * sizeof(float)); - - std::vector kernel_data(ggml_nelements(kernel)); - for (size_t i = 0; i < kernel_data.size(); ++i) { - kernel_data[i] = static_cast(std::fmod(static_cast(i * 7 + 3), 11.0) - 5.0); - } - ggml_backend_tensor_set(kernel, kernel_data.data(), 0, kernel_data.size() * sizeof(float)); - } - - double max_nmse_err() override { - return 1e-8; - } -}; - // GGML_OP_CONV_3D struct test_conv_3d : public test_case { // Logical 5D dimensions @@ -5692,8 +5401,7 @@ struct test_acc : public test_case { } }; - - +// GGML_OP_PAD struct test_pad : public test_case { const ggml_type type; const std::array ne_a; @@ -5769,7 +5477,7 @@ struct test_pad_ext : public test_case { // GGML_OP_PAD_REFLECT_1D -struct test_pad_ext_circular_manual : public test_case { +struct test_pad_ext_circular : public test_case { const std::array ne_src{4, 3, 1, 1}; const std::array pads_l{1, 2, 0, 0}; const std::array pads_r{2, 1, 0, 0}; @@ -5845,6 +5553,8 @@ struct test_pad_ext_circular_manual : public test_case { } }; + +// GGML_OP_PAD_REFLECT_1D struct test_pad_reflect_1d : public test_case { const ggml_type type; const std::array ne_a; @@ -6211,6 +5921,7 @@ struct test_opt_step_adamw : public test_case { } }; +// GGML_OP_OPT_STEP_SGD struct test_opt_step_sgd : public test_case { const ggml_type type; const std::array ne; @@ -6250,6 +5961,170 @@ struct test_opt_step_sgd : public test_case { } }; +// GGML_OP_CUMSUM +struct test_cumsum : public test_case { + const ggml_type type; + const std::array ne; + + std::string vars() override { return VARS_TO_STR2(type, ne); } + + test_cumsum(ggml_type type = GGML_TYPE_F32, + std::array ne = { 10, 5, 4, 3 }) + : type(type), ne(ne) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]); + ggml_set_param(a); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_cumsum(ctx, a); + + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + init_tensor_uniform(t, -1.0f, 1.0f); + } + } +}; + +// GGML_OP_XIELU +struct test_xielu : public test_case { + const ggml_type type; + const std::array ne; + + std::string vars() override { return VARS_TO_STR2(type, ne); } + + test_xielu(ggml_type type = GGML_TYPE_F32, + std::array ne = { 10, 5, 4, 3 }) + : type(type), ne(ne) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]); + ggml_set_param(a); + ggml_set_name(a, "a"); + + float alpha_n = 4.0f; + float alpha_p = 20.0f; + float beta = 0.5f; + float eps = 0.0000001f; + + ggml_tensor * out = ggml_xielu(ctx, a, alpha_n, alpha_p, beta, eps); + + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + init_tensor_uniform(t, -1.0f, 1.0f); + } + } +}; + +// GGML_OP_TRI +struct test_tri : public test_case { + const ggml_type type; + const std::array ne; + const ggml_tri_type tri_type; + + std::string vars() override { return VARS_TO_STR3(type, ne, tri_type); } + + test_tri(ggml_tri_type tri_type, ggml_type type = GGML_TYPE_F32, + std::array ne = { 10, 10, 4, 3 }) + : type(type), ne(ne), tri_type(tri_type) { + GGML_ASSERT(ne[0] == ne[1]); + } + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]); + ggml_set_param(a); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_tri(ctx, a, tri_type); + + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + init_tensor_uniform(t, -1.0f, 1.0f); + } + } +}; + +// GGML_OP_FILL +struct test_fill : public test_case { + const ggml_type type; + const std::array ne; + float c; + + std::string vars() override { return VARS_TO_STR3(type, ne, c); } + + test_fill(float c, ggml_type type = GGML_TYPE_F32, + std::array ne = { 10, 10, 4, 3 }) + : type(type), ne(ne), c(c) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne[0], ne[1], ne[2], ne[3]); + ggml_set_param(a); + ggml_set_name(a, "a"); + + ggml_tensor * out = ggml_fill(ctx, a, c); + + ggml_set_name(out, "out"); + + return out; + } +}; + +// GGML_OP_SOLVE_TRI +struct test_solve_tri : public test_case { + const ggml_type type; + const std::array ne_lhs; + const std::array ne_rhs; + + std::string vars() override { return VARS_TO_STR3(type, ne_lhs, ne_rhs); } + + test_solve_tri(ggml_type type = GGML_TYPE_F32, + std::array ne_lhs = { 10, 10, 4, 3 }, + std::array ne_rhs = { 3, 10, 4, 3 } + ) + : type(type), ne_lhs(ne_lhs), ne_rhs(ne_rhs) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor_4d(ctx, type, ne_lhs[0], ne_lhs[1], ne_lhs[2], ne_lhs[3]); + ggml_set_param(a); + ggml_set_name(a, "a"); + + ggml_tensor * b = ggml_new_tensor_4d(ctx, type, ne_rhs[0], ne_rhs[1], ne_rhs[2], ne_rhs[3]); + ggml_set_param(b); + ggml_set_name(b, "b"); + + ggml_tensor * out = ggml_solve_tri(ctx, a, b, true, true, false); + ggml_set_name(out, "out"); + + return out; + } + + void initialize_tensors(ggml_context * ctx) override { + for (ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) { + if (strcmp(t->name, "a") == 0) { + // note: avoid zeros in the diagonal + init_tensor_tril(t, 0.1, 1.0f); + } else { + init_tensor_uniform(t, -1.0f, 1.0f); + } + } + } +}; + enum llm_norm_type { LLM_NORM, LLM_NORM_RMS, @@ -6691,6 +6566,9 @@ static std::vector> make_test_cases_eval() { for (ggml_type type : {GGML_TYPE_F16, GGML_TYPE_F32}) { for (int v : {0, 1}) { for (int op = 0; op < GGML_UNARY_OP_COUNT; op++) { + if (op == GGML_UNARY_OP_XIELU) { + continue; // need extra params, separate test + } test_cases.emplace_back(new test_unary((ggml_unary_op) op, type, { 128, 2, 2, 2 }, v)); test_cases.emplace_back(new test_unary((ggml_unary_op) op, type, { 5, 7, 11, 13 }, v)); } @@ -6967,23 +6845,10 @@ static std::vector> make_test_cases_eval() { // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true)); // test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32, {1024, 1024, 256, 1}, {3, 3, 256, 1}, 1, 1, 1, 1, 1, 1, true)); - test_cases.emplace_back(new test_conv_2d({37, 29, 3, 2}, {3, 2, 3, 5}, GGML_TYPE_F32, 1, 1, 4, 3, 1, 1, false, true)); - test_cases.emplace_back(new test_conv_2d({19, 23, 4, 1}, {5, 3, 4, 4}, GGML_TYPE_F16, 2, 1, 3, 2, 1, 2, false, true)); - test_cases.emplace_back(new test_conv_2d({16, 18, 6, 3}, {3, 3, 6, 8}, GGML_TYPE_F32, 1, 2, 2, 3, 1, 1, true, true)); - - test_cases.emplace_back(new test_conv_2d_im2col()); - test_cases.emplace_back(new test_conv_2d_im2col({17, 13, 6, 2}, {3, 3, 6, 4}, GGML_TYPE_F32, 1, 2, 2, 3, 1, 1, true)); - test_cases.emplace_back(new test_conv_2d_im2col({11, 7, 2, 1}, {3, 3, 2, 3}, GGML_TYPE_F16, 1, 1, 1, 1, 2, 1, true)); - test_cases.emplace_back(new test_conv_2d_direct_circular_manual()); - test_cases.emplace_back(new test_conv_2d_dw({17, 34, 9, 1}, {3, 3, 1, 9}, 1, 0, 1, false)); test_cases.emplace_back(new test_conv_2d_dw({17, 34, 9, 1}, {3, 3, 1, 9}, 1, 0, 1, true)); test_cases.emplace_back(new test_conv_2d_dw({32, 8, 64, 1}, {3, 3, 1, 64}, 2, 1, 1, false)); test_cases.emplace_back(new test_conv_2d_dw({32, 8, 64, 1}, {3, 3, 1, 64}, 2, 1, 1, true)); - test_cases.emplace_back(new test_conv_2d_dw({29, 19, 8, 2}, {5, 3, 1, 8}, 1, 2, 1, false, true)); - test_cases.emplace_back(new test_conv_2d_dw({24, 14, 16, 1}, {3, 3, 1, 16}, 2, 1, 2, true, true)); - test_cases.emplace_back(new test_conv_2d_dw_direct_circular_manual()); - test_cases.emplace_back(new test_conv_2d_circular_pipeline()); // CONV_3D auto calc_conv_output_size_3d = [](int64_t ins, int64_t ks, int s, int p, int d) -> int64_t { @@ -7712,8 +7577,13 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order)); test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16, 10, 10, 10}, order)); test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {60, 10, 10, 10}, order)); // qwen - test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {1024, 1, 1, 1}, order)); + test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {1023, 2, 1, 3}, order)); + test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {1024, 2, 1, 3}, order)); + test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {1025, 2, 1, 3}, order)); test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {16384, 1, 1, 1}, order)); // many backends only handle up to 1024 + test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {2047, 2, 1, 3}, order)); + test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {2048, 2, 1, 3}, order)); + test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {2049, 2, 1, 3}, order)); test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {2, 8, 8192, 1}, order)); // bailingmoe2 (group selection) } @@ -7754,21 +7624,39 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_group_norm_mul_add(GGML_TYPE_F32, {9, 9, 1280, 1})); test_cases.emplace_back(new test_acc()); test_cases.emplace_back(new test_pad()); - test_cases.emplace_back(new test_pad_ext_circular_manual()); - test_cases.emplace_back(new test_pad(GGML_TYPE_F32, {33, 17, 2, 1}, 4, 3, true)); + test_cases.emplace_back(new test_pad(GGML_TYPE_F32, {33, 17, 2, 1}, 4, 3, true)); // circular test_cases.emplace_back(new test_pad_ext()); - test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {19, 11, 5, 2}, 2, 4, 1, 3, 0, 0, 0, 0, false, true)); + test_cases.emplace_back(new test_pad_ext_circular()); test_cases.emplace_back(new test_pad_reflect_1d()); test_cases.emplace_back(new test_pad_reflect_1d(GGML_TYPE_F32, {3000, 384, 4, 1})); test_cases.emplace_back(new test_roll()); test_cases.emplace_back(new test_arange()); test_cases.emplace_back(new test_timestep_embedding()); test_cases.emplace_back(new test_leaky_relu()); + test_cases.emplace_back(new test_cumsum()); + + test_cases.emplace_back(new test_xielu()); + + test_cases.emplace_back(new test_tri(GGML_TRI_TYPE_LOWER)); + test_cases.emplace_back(new test_tri(GGML_TRI_TYPE_LOWER_DIAG)); + test_cases.emplace_back(new test_tri(GGML_TRI_TYPE_UPPER)); + test_cases.emplace_back(new test_tri(GGML_TRI_TYPE_UPPER_DIAG)); + + test_cases.emplace_back(new test_fill(0.0f)); + test_cases.emplace_back(new test_fill(2.0f, GGML_TYPE_F32, { 303, 207, 11, 3 })); + test_cases.emplace_back(new test_fill(-152.0f, GGML_TYPE_F32, { 800, 600, 4, 4 })); + + test_cases.emplace_back(new test_solve_tri()); + test_cases.emplace_back(new test_solve_tri(GGML_TYPE_F32, { 11, 11, 1, 1 }, { 5, 11, 1, 1 })); + test_cases.emplace_back(new test_solve_tri(GGML_TYPE_F32, { 17, 17, 2, 4 }, { 9, 17, 2, 4 })); + test_cases.emplace_back(new test_solve_tri(GGML_TYPE_F32, { 30, 30, 7, 1 }, { 8, 30, 7, 1 })); + test_cases.emplace_back(new test_solve_tri(GGML_TYPE_F32, { 42, 42, 5, 2 }, { 10, 42, 5, 2 })); + test_cases.emplace_back(new test_solve_tri(GGML_TYPE_F32, { 64, 64, 2, 2 }, { 10, 64, 2, 2 })); + test_cases.emplace_back(new test_solve_tri(GGML_TYPE_F32, { 100, 100, 4, 4 }, { 41, 100, 4, 4 })); for (bool v : {false, true}) { test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {512, 512, 1, 1}, 0, 1, 0, 1, 0, 0, 0, 0, v)); test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {11, 22, 33, 44}, 1, 2, 3, 4, 5, 6, 7, 8, v)); - test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {23, 17, 7, 3}, 2, 1, 3, 0, 1, 2, 0, 0, v, true)); } for (int hsk : { 40, 64, 72, 80, 96, 128, 192, 256, 576 }) { @@ -8057,6 +7945,8 @@ static std::vector> make_test_cases_perf() { test_cases.emplace_back(new test_sum(GGML_TYPE_F32, it)); } + test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {65000, 16, 1, 1})); + return test_cases; } From 552e5b2b61a719fa7146db24bda44e3e40ea1199 Mon Sep 17 00:00:00 2001 From: Phylliida Dev Date: Tue, 18 Nov 2025 13:25:03 -0800 Subject: [PATCH 09/39] Update test-backend-ops.cpp --- tests/test-backend-ops.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 27402ad4d74..cf5acad0edc 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -5490,6 +5490,16 @@ struct test_pad_ext : public test_case { // GGML_OP_PAD_REFLECT_1D +static inline int64_t wrap_coord_circular(int64_t coord, int64_t size) { + GGML_ASSERT(size > 0); + const int64_t mod = coord % size; + return mod < 0 ? mod + size : mod; +} + +static inline int64_t offset4d(const int64_t ne[4], int64_t i0, int64_t i1, int64_t i2, int64_t i3) { + return ((i3 * ne[2] + i2) * ne[1] + i1) * ne[0] + i0; +} + struct test_pad_ext_circular : public test_case { const std::array ne_src{4, 3, 1, 1}; const std::array pads_l{1, 2, 0, 0}; From 1c69e4ed8581b60f790edd981ccc2fb51b9dd182 Mon Sep 17 00:00:00 2001 From: Phylliida Dev Date: Wed, 19 Nov 2025 13:24:32 -0800 Subject: [PATCH 10/39] Fixed comment on pad test --- tests/test-backend-ops.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index cf5acad0edc..93a2d50af7d 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -5488,8 +5488,6 @@ struct test_pad_ext : public test_case { } }; -// GGML_OP_PAD_REFLECT_1D - static inline int64_t wrap_coord_circular(int64_t coord, int64_t size) { GGML_ASSERT(size > 0); const int64_t mod = coord % size; @@ -5500,6 +5498,7 @@ static inline int64_t offset4d(const int64_t ne[4], int64_t i0, int64_t i1, int6 return ((i3 * ne[2] + i2) * ne[1] + i1) * ne[0] + i0; } +// GGML_OP_PAD (with extension and circular) struct test_pad_ext_circular : public test_case { const std::array ne_src{4, 3, 1, 1}; const std::array pads_l{1, 2, 0, 0}; From 429854b2a49c602af904e133105b11b58d0e5c85 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Wed, 19 Nov 2025 13:31:39 -0800 Subject: [PATCH 11/39] removed trailing whitespace --- ggml/src/ggml-cpu/ops.cpp | 2 +- ggml/src/ggml-cuda/pad.cu | 4 ++-- ggml/src/ggml-vulkan/vulkan-shaders/pad.comp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index c7207ec1c0a..76e94e760e0 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7602,7 +7602,7 @@ static void ggml_compute_forward_pad_f32( src_i2*nb02 + src_i1*nb01 + src_i0*nb00; - + const float * src_ptr = (const float *)((char *) src0->data + src_idx); dst_ptr[dst_idx] = *src_ptr; } diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index f3f06897e42..67eefe565c9 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -25,7 +25,7 @@ static __global__ void pad_f32(const float * src, float * dst, if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { return; } - + const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; if (circular == 0) { @@ -94,7 +94,7 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int32_t rp2 = ((const int32_t*)(dst->op_params))[5]; const int32_t lp3 = ((const int32_t*)(dst->op_params))[6]; const int32_t rp3 = ((const int32_t*)(dst->op_params))[7]; - const int32_t circular = ((const int32_t*)(dst->op_params))[8]; + const int32_t circular = ((const int32_t*)(dst->op_params))[8]; pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp index f2fd5929bf4..7c2494c2ea2 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp @@ -60,6 +60,6 @@ void main() { i3 >= p.lp3 && i3 < p.ne13 - p.rp3; data_d[get_doffset() + dst_idx] = D_TYPE(is_src0 ? data_a[get_aoffset() + src0_idx] : 0.0f); } - + } From cf720e888b8c4a1096d45c79fc12f29c7d786644 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Tue, 25 Nov 2025 13:19:08 -0800 Subject: [PATCH 12/39] Removed unneded test in test-backend-ops --- tests/test-backend-ops.cpp | 88 -------------------------------------- 1 file changed, 88 deletions(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 47d6f3eb075..2697cb73e8f 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -5498,94 +5498,6 @@ struct test_pad_ext : public test_case { } }; -static inline int64_t wrap_coord_circular(int64_t coord, int64_t size) { - GGML_ASSERT(size > 0); - const int64_t mod = coord % size; - return mod < 0 ? mod + size : mod; -} - -static inline int64_t offset4d(const int64_t ne[4], int64_t i0, int64_t i1, int64_t i2, int64_t i3) { - return ((i3 * ne[2] + i2) * ne[1] + i1) * ne[0] + i0; -} - -// GGML_OP_PAD (with extension and circular) -struct test_pad_ext_circular : public test_case { - const std::array ne_src{4, 3, 1, 1}; - const std::array pads_l{1, 2, 0, 0}; - const std::array pads_r{2, 1, 0, 0}; - - ggml_tensor * input = nullptr; - ggml_tensor * expected = nullptr; - - std::string vars() override { - return "manual_pad_ext_circular"; - } - - ggml_tensor * build_graph(ggml_context * ctx) override { - input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_src.data()); - ggml_set_name(input, "input"); - - ggml_tensor * actual = ggml_pad_ext_circular(ctx, input, - pads_l[0], pads_r[0], pads_l[1], pads_r[1], pads_l[2], pads_r[2], pads_l[3], pads_r[3]); - ggml_set_name(actual, "actual"); - - int64_t ne_dst[4] = { - ne_src[0] + pads_l[0] + pads_r[0], - ne_src[1] + pads_l[1] + pads_r[1], - ne_src[2] + pads_l[2] + pads_r[2], - ne_src[3] + pads_l[3] + pads_r[3], - }; - - expected = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_dst); - ggml_set_name(expected, "expected"); - - ggml_tensor * diff = ggml_sub(ctx, actual, expected); - ggml_tensor * sq = ggml_sqr(ctx, diff); - ggml_tensor * loss = ggml_sum(ctx, sq); - ggml_set_name(loss, "loss"); - return loss; - } - - void initialize_tensors(ggml_context * ctx) override { - test_case::initialize_tensors(ctx); - - std::vector src_data(ggml_nelements(input)); - for (size_t i = 0; i < src_data.size(); ++i) { - src_data[i] = static_cast(i + 1); - } - ggml_backend_tensor_set(input, src_data.data(), 0, src_data.size() * sizeof(float)); - - int64_t ne_dst[4] = { - ne_src[0] + pads_l[0] + pads_r[0], - ne_src[1] + pads_l[1] + pads_r[1], - ne_src[2] + pads_l[2] + pads_r[2], - ne_src[3] + pads_l[3] + pads_r[3], - }; - - std::vector exp_data(ggml_nelements(expected)); - for (int64_t i3 = 0; i3 < ne_dst[3]; ++i3) { - for (int64_t i2 = 0; i2 < ne_dst[2]; ++i2) { - for (int64_t i1 = 0; i1 < ne_dst[1]; ++i1) { - for (int64_t i0 = 0; i0 < ne_dst[0]; ++i0) { - const int64_t src_i0 = wrap_coord_circular(i0 - pads_l[0], ne_src[0]); - const int64_t src_i1 = wrap_coord_circular(i1 - pads_l[1], ne_src[1]); - const int64_t src_i2 = wrap_coord_circular(i2 - pads_l[2], ne_src[2]); - const int64_t src_i3 = wrap_coord_circular(i3 - pads_l[3], ne_src[3]); - exp_data[offset4d(ne_dst, i0, i1, i2, i3)] = - src_data[offset4d(ne_src.data(), src_i0, src_i1, src_i2, src_i3)]; - } - } - } - } - ggml_backend_tensor_set(expected, exp_data.data(), 0, exp_data.size() * sizeof(float)); - } - - double max_nmse_err() override { - return 1e-8; - } -}; - - // GGML_OP_PAD_REFLECT_1D struct test_pad_reflect_1d : public test_case { const ggml_type type; From a0bbbc29e25b17be9387ba36094f999e53f0de66 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Tue, 25 Nov 2025 13:29:45 -0800 Subject: [PATCH 13/39] Removed removed test from calls --- tests/test-backend-ops.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 61453ed46dc..0a8a3f0255a 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -7698,7 +7698,6 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_pad()); test_cases.emplace_back(new test_pad(GGML_TYPE_F32, {33, 17, 2, 1}, 4, 3, true)); // circular test_cases.emplace_back(new test_pad_ext()); - test_cases.emplace_back(new test_pad_ext_circular()); test_cases.emplace_back(new test_pad_reflect_1d()); test_cases.emplace_back(new test_pad_reflect_1d(GGML_TYPE_F32, {3000, 384, 4, 1})); test_cases.emplace_back(new test_roll()); From c9513b4cfa9242c6c12b33de8363fcd51524eda5 Mon Sep 17 00:00:00 2001 From: Phylliida Dev Date: Fri, 28 Nov 2025 20:02:25 -0800 Subject: [PATCH 14/39] Update ggml/src/ggml-vulkan/vulkan-shaders/pad.comp Co-authored-by: Ruben Ortlam --- ggml/src/ggml-vulkan/vulkan-shaders/pad.comp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp index 7c2494c2ea2..ae614c407dc 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp @@ -52,8 +52,7 @@ void main() { const uint ci3 = wrap_coord(int(i3) - int(p.lp3), p.ne03); const uint circular_src_idx = ci3*p.nb03 + ci2*p.nb02 + ci1*p.nb01 + ci0*p.nb00; data_d[get_doffset() + dst_idx] = D_TYPE(data_a[get_aoffset() + circular_src_idx]); - } - else { + } else { const bool is_src0 = i0 >= p.lp0 && i0 < p.ne10 - p.rp0 && i1 >= p.lp1 && i1 < p.ne11 - p.rp1 && i2 >= p.lp2 && i2 < p.ne12 - p.rp2 && From df6635f00b21f216ced2f107db81ad353587b590 Mon Sep 17 00:00:00 2001 From: Phylliida Dev Date: Fri, 28 Nov 2025 20:04:19 -0800 Subject: [PATCH 15/39] Fixed alignment --- ggml/src/ggml-vulkan/vulkan-shaders/pad.comp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp index ae614c407dc..7edc524490e 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp @@ -54,9 +54,9 @@ void main() { data_d[get_doffset() + dst_idx] = D_TYPE(data_a[get_aoffset() + circular_src_idx]); } else { const bool is_src0 = i0 >= p.lp0 && i0 < p.ne10 - p.rp0 && - i1 >= p.lp1 && i1 < p.ne11 - p.rp1 && - i2 >= p.lp2 && i2 < p.ne12 - p.rp2 && - i3 >= p.lp3 && i3 < p.ne13 - p.rp3; + i1 >= p.lp1 && i1 < p.ne11 - p.rp1 && + i2 >= p.lp2 && i2 < p.ne12 - p.rp2 && + i3 >= p.lp3 && i3 < p.ne13 - p.rp3; data_d[get_doffset() + dst_idx] = D_TYPE(is_src0 ? data_a[get_aoffset() + src0_idx] : 0.0f); } From 893065debcf7480f6264cc4f1da6c6eecec6ac85 Mon Sep 17 00:00:00 2001 From: Phylliida Dev Date: Sat, 29 Nov 2025 11:56:47 -0800 Subject: [PATCH 16/39] Formatting Co-authored-by: Aman Gupta --- ggml/src/ggml-cuda/pad.cu | 1 - 1 file changed, 1 deletion(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 67eefe565c9..c99885e9fce 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -3,7 +3,6 @@ #include "pad.cuh" - __device__ __forceinline__ int64_t wrap_coord(int64_t coord, int64_t size) { return (coord % size + size) % size; } From 3bfacc8a0ce39344bbc76de3d20bb7fdc51a85ab Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 18:48:46 -0800 Subject: [PATCH 17/39] Format pad --- ggml/src/ggml-cuda/pad.cu | 101 -------------------------------------- 1 file changed, 101 deletions(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index c99885e9fce..e69de29bb2d 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -1,101 +0,0 @@ -#include - -#include "pad.cuh" - - -__device__ __forceinline__ int64_t wrap_coord(int64_t coord, int64_t size) { - return (coord % size + size) % size; -} - -static __global__ void pad_f32(const float * src, float * dst, - const int lp0, const int rp0, const int lp1, const int rp1, - const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3, - const int circular) { - // blockIdx.z: i3*ne2+i2 - // blockIdx.y: i1 - // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE - // gridDim.y: ne1 - int i0 = threadIdx.x + blockIdx.x * blockDim.x; - int i1 = blockIdx.y; - int i2 = blockIdx.z % ne2; - int i3 = blockIdx.z / ne2; - - if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { - return; - } - - const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; - - if (circular == 0) { - // operation - if ((i0 >= lp0 && i0 < ne0 - rp0) && - (i1 >= lp1 && i1 < ne1 - rp1) && - (i2 >= lp2 && i2 < ne2 - rp2) && - (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t i00 = i0 - lp0; - const int64_t i01 = i1 - lp1; - const int64_t i02 = i2 - lp2; - const int64_t i03 = i3 - lp3; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne00 = ne0 - lp0 - rp0; - - const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } else { - dst[dst_idx] = 0.0f; - } - } - else { - const int64_t ne00 = ne0 - lp0 - rp0; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne03 = ne3 - lp3 - rp3; - - const int64_t i00 = wrap_coord(i0 - lp0, ne00); - const int64_t i01 = wrap_coord(i1 - lp1, ne01); - const int64_t i02 = wrap_coord(i2 - lp2, ne02); - const int64_t i03 = wrap_coord(i3 - lp3, ne03); - - const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } -} - -static void pad_f32_cuda(const float * src, float * dst, - const int lp0, const int rp0, const int lp1, const int rp1, - const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3, - const int circular, cudaStream_t stream) { - int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; - dim3 gridDim(num_blocks, ne1, ne2*ne3); - pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); -} - -void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src0_d = (const float *)src0->data; - float * dst_d = (float *)dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(ggml_is_contiguous(src0)); - - const int32_t lp0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t rp0 = ((const int32_t*)(dst->op_params))[1]; - const int32_t lp1 = ((const int32_t*)(dst->op_params))[2]; - const int32_t rp1 = ((const int32_t*)(dst->op_params))[3]; - const int32_t lp2 = ((const int32_t*)(dst->op_params))[4]; - const int32_t rp2 = ((const int32_t*)(dst->op_params))[5]; - const int32_t lp3 = ((const int32_t*)(dst->op_params))[6]; - const int32_t rp3 = ((const int32_t*)(dst->op_params))[7]; - const int32_t circular = ((const int32_t*)(dst->op_params))[8]; - - pad_f32_cuda(src0_d, dst_d, - lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, - dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], circular, stream); -} From 4cdba9f200259e0c053441ff9db77e5642f48daa Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 18:49:52 -0800 Subject: [PATCH 18/39] Format --- ggml/src/ggml-cuda/pad.cu | 103 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index e69de29bb2d..64988899656 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -0,0 +1,103 @@ +#include + +#include "pad.cuh" + + +__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { + // + size ensures negatives are handled properly + return (coord + size) % size; +} + +static __global__ void pad_f32(const float * src, float * dst, + const int lp0, const int rp0, const int lp1, const int rp1, + const int lp2, const int rp2, const int lp3, const int rp3, + const int ne0, const int ne1, const int ne2, const int ne3, + const bool circular) { + // blockIdx.z: i3*ne2+i2 + // blockIdx.y: i1 + // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE + // gridDim.y: ne1 + int i0 = threadIdx.x + blockIdx.x * blockDim.x; + int i1 = blockIdx.y; + int i2 = blockIdx.z % ne2; + int i3 = blockIdx.z / ne2; + + if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { + return; + } + + const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; + + if (!circular) { + // operation + if ((i0 >= lp0 && i0 < ne0 - rp0) && + (i1 >= lp1 && i1 < ne1 - rp1) && + (i2 >= lp2 && i2 < ne2 - rp2) && + (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t i00 = i0 - lp0; + const int64_t i01 = i1 - lp1; + const int64_t i02 = i2 - lp2; + const int64_t i03 = i3 - lp3; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne00 = ne0 - lp0 - rp0; + + const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } else { + dst[dst_idx] = 0.0f; + } + } + // circular means on a torus, so x and y wrap around + else { + const int64_t ne00 = ne0 - lp0 - rp0; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne03 = ne3 - lp3 - rp3; + + const int64_t i00 = wrap_around(i0 - lp0, ne00); + const int64_t i01 = wrap_around(i1 - lp1, ne01); + const int64_t i02 = wrap_around(i2 - lp2, ne02); + const int64_t i03 = wrap_around(i3 - lp3, ne03); + + const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } +} + +static void pad_f32_cuda(const float * src, float * dst, + const int lp0, const int rp0, const int lp1, const int rp1, + const int lp2, const int rp2, const int lp3, const int rp3, + const int ne0, const int ne1, const int ne2, const int ne3, + const int circular, cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; + dim3 gridDim(num_blocks, ne1, ne2*ne3); + pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); +} + +void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + const int32_t lp0 = ((const int32_t*)(dst->op_params))[0]; + const int32_t rp0 = ((const int32_t*)(dst->op_params))[1]; + const int32_t lp1 = ((const int32_t*)(dst->op_params))[2]; + const int32_t rp1 = ((const int32_t*)(dst->op_params))[3]; + const int32_t lp2 = ((const int32_t*)(dst->op_params))[4]; + const int32_t rp2 = ((const int32_t*)(dst->op_params))[5]; + const int32_t lp3 = ((const int32_t*)(dst->op_params))[6]; + const int32_t rp3 = ((const int32_t*)(dst->op_params))[7]; + const int32_t circular = ((const int32_t*)(dst->op_params))[8]; + + pad_f32_cuda(src0_d, dst_d, + lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, + dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool)circular, stream); +} From 606dd62885cf63f0965ce17ea1358fc74ef7fc40 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 18:50:29 -0800 Subject: [PATCH 19/39] Clang format --- ggml/src/ggml-cuda/pad.cu | 121 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 64988899656..4361c0596d9 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -101,3 +101,124 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool)circular, stream); } +#include "pad.cuh" + +#include + +__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { + // + size ensures negatives are handled properly + return (coord + size) % size; +} + +static __global__ void pad_f32(const float * src, + float * dst, + const int lp0, + const int rp0, + const int lp1, + const int rp1, + const int lp2, + const int rp2, + const int lp3, + const int rp3, + const int ne0, + const int ne1, + const int ne2, + const int ne3, + const bool circular) { + // blockIdx.z: i3*ne2+i2 + // blockIdx.y: i1 + // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE + // gridDim.y: ne1 + int i0 = threadIdx.x + blockIdx.x * blockDim.x; + int i1 = blockIdx.y; + int i2 = blockIdx.z % ne2; + int i3 = blockIdx.z / ne2; + + if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { + return; + } + + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + + if (!circular) { + // operation + if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && (i2 >= lp2 && i2 < ne2 - rp2) && + (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t i00 = i0 - lp0; + const int64_t i01 = i1 - lp1; + const int64_t i02 = i2 - lp2; + const int64_t i03 = i3 - lp3; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne00 = ne0 - lp0 - rp0; + + const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } else { + dst[dst_idx] = 0.0f; + } + } + // circular means on a torus, so x and y wrap around + else { + const int64_t ne00 = ne0 - lp0 - rp0; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne03 = ne3 - lp3 - rp3; + + const int64_t i00 = wrap_around(i0 - lp0, ne00); + const int64_t i01 = wrap_around(i1 - lp1, ne01); + const int64_t i02 = wrap_around(i2 - lp2, ne02); + const int64_t i03 = wrap_around(i3 - lp3, ne03); + + const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } +} + +static void pad_f32_cuda(const float * src, + float * dst, + const int lp0, + const int rp0, + const int lp1, + const int rp1, + const int lp2, + const int rp2, + const int lp3, + const int rp3, + const int ne0, + const int ne1, + const int ne2, + const int ne3, + const int circular, + cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; + dim3 gridDim(num_blocks, ne1, ne2 * ne3); + pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, + ne2, ne3, circular); +} + +void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *) src0->data; + float * dst_d = (float *) dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + const int32_t lp0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t rp0 = ((const int32_t *) (dst->op_params))[1]; + const int32_t lp1 = ((const int32_t *) (dst->op_params))[2]; + const int32_t rp1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t lp2 = ((const int32_t *) (dst->op_params))[4]; + const int32_t rp2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t lp3 = ((const int32_t *) (dst->op_params))[6]; + const int32_t rp3 = ((const int32_t *) (dst->op_params))[7]; + const int32_t circular = ((const int32_t *) (dst->op_params))[8]; + + pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + (bool) circular, stream); +} From 6dc7169d4ca5f1c486a7706bd73afc989bcd9e16 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 18:54:27 -0800 Subject: [PATCH 20/39] format --- ggml/src/ggml-cpu/ops.cpp | 10077 +++++++++++++++++++++++++++++++++++- 1 file changed, 10072 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index f336c46637e..a3a217493fb 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -6554,7 +6554,7 @@ static void ggml_call_mul_mat(ggml_type type, const ggml_compute_params * params ggml_compute_forward_mul_mat(params, &dst); } -static inline int64_t ggml_wrap_coord(int64_t coord, int64_t size) { +static inline int64_t ggml_wrap_around(int64_t coord, int64_t size) { return (coord + size) % size; // adding size avoids negative number weirdness } @@ -7592,10 +7592,10 @@ static void ggml_compute_forward_pad_f32( for (int64_t i0 = 0; i0 < ne0; ++i0) { for (int64_t i3 = 0; i3 < ne3; ++i3) { const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; - const int64_t src_i0 = ggml_wrap_coord(i0 - lp0, ne00); - const int64_t src_i1 = ggml_wrap_coord(i1 - lp1, ne01); - const int64_t src_i2 = ggml_wrap_coord(i2 - lp2, ne02); - const int64_t src_i3 = ggml_wrap_coord(i3 - lp3, ne03); + const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); + const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); + const int64_t src_i2 = ggml_wrap_around(i2 - lp2, ne02); + const int64_t src_i3 = ggml_wrap_around(i3 - lp3, ne03); const int64_t src_idx = src_i3*nb03 + @@ -10416,3 +10416,10070 @@ void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_ } } } +#include "ops.h" + +#include "binary-ops.h" +#include "ggml-cpu.h" +#include "ggml-impl.h" +#include "ggml.h" +#include "unary-ops.h" +#include "vec.h" + +#include +#include +#include +#include + +// ggml_compute_forward_dup + +static void ggml_compute_forward_dup_same_cont(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + GGML_ASSERT(src0->type == dst->type); + + const size_t nb0 = ggml_type_size(src0->type); + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by blocks + const int nk = ggml_nelements(src0) / ggml_blck_size(src0->type); + const int dr = (nk + nth - 1) / nth; + const int k0 = dr * ith; + const int k1 = MIN(k0 + dr, nk); + + if (k0 < k1) { + memcpy(((char *) dst->data + k0 * nb0), ((char *) src0->data + k0 * nb0), (k1 - k0) * nb0); + } +} + +template +static void ggml_compute_forward_dup_flt(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(!ggml_is_quantized(src0->type) && !ggml_is_quantized(dst->type)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // case: type & row size equal + if (src0->type == dst->type && ne00 == ne0 && nb00 == ggml_type_size(src0->type) && + nb0 == ggml_type_size(dst->type)) { + // copy by rows + const size_t rs = ne00 * nb00; + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ir0; i01 < ir1; i01++) { + memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); + } + } + } + return; + } + + // case: dst tensor is contiguous + if (ggml_is_contiguous(dst)) { + if (nb00 == sizeof(src_t)) { + if constexpr (std::is_same_v) { + // same type + size_t id = 0; + const size_t rs = ne00 * nb00; + char * dst_ptr = (char *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, rs); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + // casting between non-quantized types + size_t id = 0; + dst_t * dst_ptr = (dst_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const src_t * src0_ptr = + (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + for (int i00 = 0; i00 < ne00; i00++) { + float tmp = type_conversion_table::to_f32(src0_ptr[i00]); + dst_ptr[id] = type_conversion_table::from_f32(tmp); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } + } else { + //printf("%s: this is not optimal - fix me\n", __func__); + + size_t id = 0; + dst_t * dst_ptr = (dst_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const src_t * src0_ptr = + (src_t *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float tmp = type_conversion_table::to_f32(*src0_ptr); + dst_ptr[id] = type_conversion_table::from_f32(tmp); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } + return; + } + + // dst counters + int64_t i10 = 0; + int64_t i11 = 0; + int64_t i12 = 0; + int64_t i13 = 0; + + if constexpr (std::is_same_v) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + memcpy(dst_ptr, src0_ptr, sizeof(dst_t)); + + if (++i10 == ne00) { + i10 = 0; + if (++i11 == ne01) { + i11 = 0; + if (++i12 == ne02) { + i12 = 0; + if (++i13 == ne03) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + + } else { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + float tmp = type_conversion_table::to_f32(*(const src_t *) src0_ptr); + *(dst_t *) dst_ptr = type_conversion_table::from_f32(tmp); + + if (++i10 == ne0) { + i10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + } +} + +template +static void ggml_compute_forward_dup_to_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(!ggml_is_quantized(src0->type)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (ggml_is_contiguous(dst) && nb00 == sizeof(src_t) && ggml_get_type_traits_cpu(dst->type)->from_float) { + // casting non-quantized types --> intermediate f32 --> quantized + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; + float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + size_t id = 0; + size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); + char * dst_ptr = (char *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const src_t * src0_ptr = (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + for (int i00 = 0; i00 < ne00; i00++) { + src0_f32[i00] = type_conversion_table::to_f32(src0_ptr[i00]); + } + + quantize_row_q(src0_f32, dst_ptr + id, ne00); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + // printf("%s %s\n", ggml_type_name(src0->type), ggml_type_name(dst->type)); + GGML_ABORT("not implemented"); + } +} + +// A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy. +static void ggml_compute_forward_dup_bytes(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(src0->type == dst->type); + + GGML_TENSOR_UNARY_OP_LOCALS; + + if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) { + ggml_compute_forward_dup_same_cont(params, dst); + return; + } + + const size_t type_size = ggml_type_size(src0->type); + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (src0->type == dst->type && ggml_are_same_shape(src0, dst) && nb00 == type_size && nb0 == type_size) { + // copy by rows + const size_t rs = ggml_row_size(src0->type, ne00); + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ir0; i01 < ir1; i01++) { + memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); + } + } + } + return; + } + + if (ggml_is_contiguous(dst)) { + size_t id = 0; + char * dst_ptr = (char *) dst->data; + const size_t rs = ne00 * type_size; + + if (nb00 == type_size) { + // src0 is contigous on first dimension, copy by rows + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int64_t i01 = ir0; i01 < ir1; i01++) { + const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, rs); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + //printf("%s: this is not optimal - fix me\n", __func__); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + (char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, type_size); + + id += type_size; + } + } + id += rs * (ne01 - ir1); + } + } + } + + return; + } + + // dst counters + int64_t k10 = 0; + int64_t i11 = 0; + int64_t i12 = 0; + int64_t i13 = 0; + + // number of blocks in a row + const int64_t nk00 = ne00 / ggml_blck_size(src0->type); + const int64_t nk0 = ne0 / ggml_blck_size(dst->type); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + k10 += nk00 * ir0; + while (k10 >= nk0) { + k10 -= nk0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t k00 = 0; k00 < nk00; k00++) { + const char * src0_ptr = ((char *) src0->data + k00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + k10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + memcpy(dst_ptr, src0_ptr, type_size); + + if (++k10 == nk0) { + k10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + k10 += nk00 * (ne01 - ir1); + while (k10 >= nk0) { + k10 -= nk0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } +} + +static void ggml_compute_forward_dup_from_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + size_t qk = ggml_blck_size(type); + const int64_t nr = ggml_nelements(src1) / qk; + + // destination must be contiguous in the first dimension + GGML_ASSERT(nb10 == ggml_type_size(dst->type)); + // must either have first dimension large enough to hold a row, or fully contiguous + GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + uint32_t i = ir * qk; + + const int64_t i03 = i / (ne00 * ne01 * ne02); + const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); + const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; + const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; + const int64_t x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; + + const int64_t i13 = i / (ne10 * ne11 * ne12); + const int64_t i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); + const int64_t i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; + const int64_t i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; + const int64_t dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; + + dequantize_row_q((const void *) ((char *) src0->data + x_offset), (float *) ((char *) dst->data + dst_offset), + qk); + } +} + +void ggml_compute_forward_dup(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (src0->type == dst->type) { + ggml_compute_forward_dup_bytes(params, dst); + return; + } + + switch (src0->type) { + case GGML_TYPE_F16: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_BF16: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_F32: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_I32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_I32: + { + if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + GGML_ABORT("not implemented"); + } + } + break; + default: + { + if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_from_q(params, dst); + break; + } + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add + +static void ggml_compute_forward_add_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_type type = src0->type; + const ggml_type dtype = dst->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dtype)->from_float; + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ggml_is_quantized(src0->type)); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i03 = ir / (ne02 * ne01); + const int i02 = (ir - i03 * ne02 * ne01) / ne01; + const int i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + // src1 and dst are same shape as src0 => same indices + const int i13 = i03; + const int i12 = i02; + const int i11 = i01; + + const int i3 = i03; + const int i2 = i02; + const int i1 = i01; + + void * src0_row = (void *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * src1_row = (float *) ((char *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13)); + void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + assert(ne00 % 32 == 0); + + // unquantize row from src0 to temp buffer + dequantize_row_q(src0_row, wdata, ne00); + // add src1 + ggml_vec_acc_f32(ne00, wdata, src1_row); + // quantize row to dst + if (quantize_row_q != NULL) { + quantize_row_q(wdata, dst_row, ne00); + } else { + memcpy(dst_row, wdata, ne0 * nb0); + } + } +} + +void ggml_compute_forward_add(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + { + ggml_compute_forward_add_non_quantized(params, dst); + } + break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_add_q_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add_id + +static void ggml_compute_forward_add_id_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src2->type == GGML_TYPE_I32); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_TERNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + // src1 indices + const int i11 = *(int32_t *) ((char *) src2->data + i1 * nb20 + i2 * nb21); + + GGML_ASSERT(i11 >= 0 && i11 < ne11); + + ggml_vec_add_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), + (float *) ((char *) src1->data + i11 * nb11)); + } +} + +void ggml_compute_forward_add_id(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add_id_f32(params, dst); + } + break; + default: + { + GGML_ABORT("unsupported type for ggml_compute_forward_add_id: %s", ggml_type_name(src0->type)); + } + } +} + +// ggml_compute_forward_add1 + +static void ggml_compute_forward_add1_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + +#ifdef GGML_USE_ACCELERATE + GGML_UNUSED(ggml_vec_add1_f32); + + vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), 1, + (float *) ((char *) src1->data), 0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + 1, ne0); +#else + ggml_vec_add1_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), *(float *) src1->data); +#endif + } +} + +static void ggml_compute_forward_add1_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_f16_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F16); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(type)->from_float; + + // we don't support permuted src0 + GGML_ASSERT(nb00 == ggml_type_size(type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ggml_is_quantized(src0->type)); + GGML_ASSERT(dst->type == src0->type); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + void * src0_row = (void *) ((char *) src0->data + (i1 * nb01 + i2 * nb02 + i3 * nb03)); + void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb0)); + + assert(ne0 % 32 == 0); + + // unquantize row from src0 to temp buffer + dequantize_row_q(src0_row, wdata, ne0); + // add src1 + ggml_vec_acc1_f32(ne0, wdata, v); + // quantize row to dst + quantize_row_q(wdata, dst_row, ne0); + } +} + +static void ggml_compute_forward_add1_bf16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_bf16_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_BF16); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +void ggml_compute_forward_add1(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add1_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + if (src1->type == GGML_TYPE_F16) { + ggml_compute_forward_add1_f16_f16(params, dst); + } else if (src1->type == GGML_TYPE_F32) { + ggml_compute_forward_add1_f16_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } + } + break; + case GGML_TYPE_BF16: + { + if (src1->type == GGML_TYPE_BF16) { + ggml_compute_forward_add1_bf16_bf16(params, dst); + } else if (src1->type == GGML_TYPE_F32) { + ggml_compute_forward_add1_bf16_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } + } + break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_add1_q_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_acc + +static void ggml_compute_forward_acc_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during acc + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during acc + const size_t nb0 = ggml_element_size(src0); + + const size_t nb00 = nb0; + const size_t nb01 = nb1; + const size_t nb02 = nb2; + const size_t nb03 = nb3; + + GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb0 + (ne11 == 0 ? 0 : ne11 - 1) * nb1 + + (ne12 == 0 ? 0 : ne12 - 1) * nb2 + (ne13 == 0 ? 0 : ne13 - 1) * nb3 < + ggml_nbytes(dst)); + GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb00 + (ne11 == 0 ? 0 : ne11 - 1) * nb01 + + (ne12 == 0 ? 0 : ne12 - 1) * nb02 + (ne13 == 0 ? 0 : ne13 - 1) * nb03 < + ggml_nbytes(src0)); + + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + +#ifdef GGML_USE_ACCELERATE + vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), 1, + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11), 1, + (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), 1, nc); +#else + ggml_vec_add_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); +#endif + } +} + +void ggml_compute_forward_acc(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_acc_f32(params, dst); + } + break; + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_sum + +static void ggml_compute_forward_sum_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + assert(src0->nb[0] == sizeof(float)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + ggml_float sum = 0; + ggml_float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f32_ggf(ne00, &row_sum, + (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((float *) dst->data)[0] = sum; +} + +static void ggml_compute_forward_sum_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + + assert(src0->nb[0] == sizeof(ggml_fp16_t)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + float sum = 0; + float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f16_ggf(ne00, &row_sum, + (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); +} + +static void ggml_compute_forward_sum_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + + assert(src0->nb[0] == sizeof(ggml_bf16_t)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + float sum = 0; + float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_bf16_ggf(ne00, &row_sum, + (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum); +} + +void ggml_compute_forward_sum(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sum_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_sum_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_sum_bf16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cumsum + +static void ggml_compute_forward_cumsum_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne01); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + const auto [ir0, ir1] = get_thread_range(params, src0); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + float * src_row = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * dst_row = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + ggml_vec_cumsum_f32(ne00, dst_row, src_row); + } +} + +void ggml_compute_forward_cumsum(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cumsum_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_sum_rows + +static void ggml_compute_forward_sum_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne0 == 1); + GGML_ASSERT(ne1 == ne01); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + for (int64_t i3 = 0; i3 < ne03; i3++) { + for (int64_t i2 = 0; i2 < ne02; i2++) { + for (int64_t i1 = 0; i1 < ne01; i1++) { + float * src_row = (float *) ((char *) src0->data + i1 * nb01 + i2 * nb02 + i3 * nb03); + float * dst_row = (float *) ((char *) dst->data + i1 * nb1 + i2 * nb2 + i3 * nb3); + float row_sum = 0; + ggml_vec_sum_f32(ne00, &row_sum, src_row); + dst_row[0] = row_sum; + } + } + } +} + +void ggml_compute_forward_sum_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sum_rows_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_mean + +static void ggml_compute_forward_mean_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(src0->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + assert(ne0 == 1); + assert(ne1 == ne01); + assert(ne2 == ne02); + assert(ne3 == ne03); + + GGML_UNUSED(ne0); + GGML_UNUSED(ne1); + GGML_UNUSED(ne2); + GGML_UNUSED(ne3); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + + *(float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3) /= (float) ne00; + } + } + } +} + +void ggml_compute_forward_mean(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_mean_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_argmax + +static void ggml_compute_forward_argmax_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(src0->nb[0] == sizeof(float)); + assert(dst->nb[0] == sizeof(float)); + + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + + const size_t nb01 = src0->nb[1]; + const size_t nb0 = dst->nb[0]; + + for (int64_t i1 = 0; i1 < ne01; i1++) { + float * src = (float *) ((char *) src0->data + i1 * nb01); + int32_t * dst_ = (int32_t *) ((char *) dst->data + i1 * nb0); + int v = 0; + ggml_vec_argmax_f32(ne00, &v, src); + dst_[0] = v; + } +} + +void ggml_compute_forward_argmax(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_argmax_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_count_equal + +static void ggml_compute_forward_count_equal_i32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS; + + GGML_ASSERT(src0->type == GGML_TYPE_I32); + GGML_ASSERT(src1->type == GGML_TYPE_I32); + GGML_ASSERT(ggml_are_same_shape(src0, src1)); + GGML_ASSERT(ggml_is_scalar(dst)); + GGML_ASSERT(dst->type == GGML_TYPE_I64); + + const int64_t nr = ggml_nrows(src0); + + const int ith = params->ith; + const int nth = params->nth; + + int64_t * sums = (int64_t *) params->wdata; + int64_t sum_thread = 0; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne03) / ne01; + const int64_t i01 = ir - i03 * ne03 - i02 * ne02; + + const char * data0 = (const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01; + const char * data1 = (const char *) src1->data + i03 * nb13 + i02 * nb12 + i01 * nb11; + + for (int64_t i00 = 0; i00 < ne00; ++i00) { + const int32_t val0 = *((const int32_t *) (data0 + i00 * nb00)); + const int32_t val1 = *((const int32_t *) (data1 + i00 * nb10)); + + sum_thread += val0 == val1; + } + } + if (ith != 0) { + sums[ith] = sum_thread; + } + ggml_barrier(params->threadpool); + + if (ith != 0) { + return; + } + + for (int ith_other = 1; ith_other < nth; ++ith_other) { + sum_thread += sums[ith_other]; + } + *((int64_t *) dst->data) = sum_thread; +} + +void ggml_compute_forward_count_equal(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_I32: + { + ggml_compute_forward_count_equal_i32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_repeat + +static void ggml_compute_forward_repeat_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(src0, dst)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne0 / ne00); + const int nr1 = (int) (ne1 / ne01); + const int nr2 = (int) (ne2 / ne02); + const int nr3 = (int) (ne3 / ne03); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne03; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne02; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne01; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_vec_cpy_f32( + ne00, + (float *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + (i2 * ne02 + k2) * nb2 + + (i1 * ne01 + k1) * nb1 + (i0 * ne00) * nb0), + (float *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01)); + } + } + } + } + } + } + } +} + +static void ggml_compute_forward_repeat_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(src0, dst)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne0 / ne00); + const int nr1 = (int) (ne1 / ne01); + const int nr2 = (int) (ne2 / ne02); + const int nr3 = (int) (ne3 / ne03); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne03; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne02; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne01; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + + (i2 * ne02 + k2) * nb2 + (i1 * ne01 + k1) * nb1 + + (i0 * ne00) * nb0); + ggml_fp16_t * x = + (ggml_fp16_t *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01); + // ggml_vec_cpy_f16(ne00, y, x) + for (int i = 0; i < ne00; ++i) { + y[i] = x[i]; + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_repeat(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_I16: + { + ggml_compute_forward_repeat_f16(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_repeat_f32(params, dst); + } + break; + // TODO: templateify the implemenation and support for I64 + // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 + //case GGML_TYPE_I64: + // { + // ggml_compute_forward_repeat_i64(params, dst); + // } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_repeat_back + +static void ggml_compute_forward_repeat_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(dst, src0)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne00 / ne0); + const int nr1 = (int) (ne01 / ne1); + const int nr2 = (int) (ne02 / ne2); + const int nr3 = (int) (ne03 / ne3); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + if (ggml_is_contiguous(dst)) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } else { + for (int k3 = 0; k3 < ne3; k3++) { + for (int k2 = 0; k2 < ne2; k2++) { + for (int k1 = 0; k1 < ne1; k1++) { + ggml_vec_set_f32(ne0, (float *) ((char *) dst->data + k1 * nb1 + k2 * nb2 + k3 * nb3), 0); + } + } + } + } + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne3; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne2; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne1; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_vec_acc_f32( + ne0, (float *) ((char *) dst->data + (k3) *nb3 + (k2) *nb2 + (k1) *nb1), + (float *) ((char *) src0->data + (i3 * ne3 + k3) * nb03 + (i2 * ne2 + k2) * nb02 + + (i1 * ne1 + k1) * nb01 + (i0 * ne0) * nb00)); + } + } + } + } + } + } + } +} + +void ggml_compute_forward_repeat_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_repeat_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_concat + +static void ggml_compute_forward_concat_any(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + const size_t len = ggml_type_size(src0->type); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const char * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + (i3) *nb03; + } else { + x = (const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + (i2 - o[2]) * nb12 + + (i3 - o[3]) * nb13; + } + + char * y = (char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3; + + memcpy(y, x, len); + } + } + } + } +} + +static void ggml_compute_forward_concat_i8(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(int8_t)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const int8_t * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const int8_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const int8_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + int8_t * y = (int8_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +static void ggml_compute_forward_concat_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(ggml_fp16_t)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const ggml_fp16_t * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const ggml_fp16_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const ggml_fp16_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +static void ggml_compute_forward_concat_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const float * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const float *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const float *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +void ggml_compute_forward_concat(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_I16: + { + ggml_compute_forward_concat_f16(params, dst); + } + break; + case GGML_TYPE_I8: + { + ggml_compute_forward_concat_i8(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_concat_f32(params, dst); + } + break; + default: + { + ggml_compute_forward_concat_any(params, dst); + } + } +} + +// ggml_compute_forward_gelu + +static void ggml_compute_forward_gelu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_fill + +static void ggml_compute_forward_fill_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const float c = ggml_get_op_params_f32(dst, 0); + + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); + GGML_TENSOR_LOCALS(size_t, nb, dst, nb); + + const auto [ir0, ir1] = get_thread_range(params, dst); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne2 * ne1); + const int64_t i02 = (ir - i03 * ne2 * ne1) / ne1; + const int64_t i01 = (ir - i03 * ne2 * ne1 - i02 * ne1); + + float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); + + ggml_vec_set_f32(ne0, dst_ptr, c); + } +} + +void ggml_compute_forward_fill(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_fill_f32(params, dst); +} + +// ggml_compute_tri + +static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(ggml_is_contiguous(src0)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const auto [ir0, ir1] = get_thread_range(params, src0); + + bool (*bipred)(int, int); + + switch (ttype) { + case GGML_TRI_TYPE_LOWER: + bipred = [](int i, int r) { + return i < r; + }; + break; + case GGML_TRI_TYPE_LOWER_DIAG: + bipred = [](int i, int r) { + return i <= r; + }; + break; + case GGML_TRI_TYPE_UPPER: + bipred = [](int i, int r) { + return i > r; + }; + break; + case GGML_TRI_TYPE_UPPER_DIAG: + bipred = [](int i, int r) { + return i >= r; + }; + break; + default: + GGML_ABORT("invalid tri type"); + } + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const float * src_ptr = (const float *) ((const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01); + float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); + + for (int i0 = 0; i0 < ne0; ++i0) { + dst_ptr[i0] = bipred(i0, i01) ? src_ptr[i0] : 0.0f; + } + } +} + +void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_tri_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gelu_erf + +static void ggml_compute_forward_gelu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_erf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_erf_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_erf_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gelu_quick + +static void ggml_compute_forward_gelu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_quick(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_quick_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_quick_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_silu + +static void ggml_compute_forward_silu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_silu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_silu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_silu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_silu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_leaky_relu + +static void ggml_compute_forward_leaky_relu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + float negative_slope; + memcpy(&negative_slope, dst->op_params, sizeof(float)); + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_leaky_relu_f32(nc, (float *) ((char *) dst->data + i * (dst->nb[1])), + (float *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); + } +} + +static void ggml_compute_forward_leaky_relu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + float negative_slope; + memcpy(&negative_slope, dst->op_params, sizeof(float)); + + assert(dst->nb[0] == sizeof(ggml_fp16_t)); + assert(src0->nb[0] == sizeof(ggml_fp16_t)); + + for (int i = 0; i < n; i++) { + ggml_vec_leaky_relu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); + } +} + +void ggml_compute_forward_leaky_relu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_leaky_relu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_leaky_relu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_silu_back + +static void ggml_compute_forward_silu_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + assert(ggml_is_contiguous_1(grad)); + assert(ggml_is_contiguous_1(src1)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src1, dst)); + assert(ggml_are_same_shape(src1, grad)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; + const int nr = ggml_nrows(src1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_backward_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src1->data + i1 * (src1->nb[1])), + (float *) ((char *) grad->data + i1 * (grad->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_silu_back_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + assert(ggml_is_contiguous_1(grad)); + assert(ggml_is_contiguous_1(src1)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src1, dst)); + assert(ggml_are_same_shape(src1, grad)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; + const int nr = ggml_nrows(src1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_backward_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src1->data + i1 * (src1->nb[1])), + (ggml_fp16_t *) ((char *) grad->data + i1 * (grad->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +void ggml_compute_forward_silu_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_silu_back_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_silu_back_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_reglu + +static void ggml_compute_forward_reglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_reglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_reglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_reglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_reglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu + +static void ggml_compute_forward_geglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu + +static void ggml_compute_forward_swiglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_swiglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu_oai + +static void ggml_compute_forward_swiglu_oai_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + const float alpha = ggml_get_op_params_f32(dst, 2); + const float limit = ggml_get_op_params_f32(dst, 3); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + float * dst_p = (float *) ((char *) dst->data + i1 * (dst->nb[1])); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + for (int k = 0; k < nc; k++) { + const float x = std::min(src0_p[k], limit); + const float y = std::clamp(src1_p[k], -limit, limit); + const float out_glu = x / (1.f + expf(alpha * (-x))); + dst_p[k] = out_glu * (y + 1.f); + } + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = dst_p[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_oai(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_oai_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu_erf + +static void ggml_compute_forward_geglu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_erf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_erf_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_erf_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu_quick + +static void ggml_compute_forward_geglu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_quick(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_quick_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_quick_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_norm + +static void ggml_compute_forward_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float sum = 0.0; + ggml_vec_sum_f32(ne00, &sum, x); + float mean = sum / ne00; + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + float variance = 0; + +#ifdef GGML_USE_ACCELERATE + mean = -mean; + vDSP_vsadd(x, 1, &mean, y, 1, ne00); + vDSP_measqv(y, 1, &variance, ne00); +#else + variance = ggml_vec_cvar_f32(ne00, y, x, mean); +#endif //GGML_USE_ACCELERATE + + const float scale = 1.0f / sqrtf(variance + eps); + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_group_rms_norm + +static void ggml_compute_forward_rms_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sum = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum += (ggml_float) (x[i00] * x[i00]); + } + + const float mean = sum / ne00; + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + memcpy(y, x, ne00 * sizeof(float)); + // for (int i00 = 0; i00 < ne00; i00++) { + // y[i00] = x[i00]; + // } + + const float scale = 1.0f / sqrtf(mean + eps); + + // if you hit this, likely you got an inf somewhere earlier + assert(scale > 0.0f); + + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_rms_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rms_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_rms_norm_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // gradients from forward pass output + const ggml_tensor * src1 = dst->src[1]; // src1 from forward pass + + GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + // src1 is same shape as src0 => same indices + const int64_t i11 = i01; + const int64_t i12 = i02; + const int64_t i13 = i03; + + const float * dz = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + const float * x = (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13); + + ggml_float sum_xx = 0.0; + ggml_float sum_xdz = 0.0; + + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum_xx += (ggml_float) (x[i00] * x[i00]); + sum_xdz += (ggml_float) (x[i00] * dz[i00]); + } + + //const float mean = (float)(sum_xx)/ne00; + const float mean_eps = (float) (sum_xx) / ne00 + eps; + const float sum_eps = (float) (sum_xx) + eps * ne00; + //const float mean_xdz = (float)(sum_xdz)/ne00; + // we could cache rms from forward pass to improve performance. + // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms. + //const float rms = sqrtf(mean_eps); + const float rrms = 1.0f / sqrtf(mean_eps); + //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3) + + { + // z = rms_norm(x) + // + // rms_norm(src1) = + // scale( + // src1, + // div( + // 1, + // sqrt( + // add( + // scale( + // sum( + // sqr( + // src1)), + // (1.0/N)), + // eps)))); + + // postorder: + // ## op args grad + // 00 param src1 grad[#00] + // 01 const 1 + // 02 sqr (#00) grad[#02] + // 03 sum (#02) grad[#03] + // 04 const 1/N + // 05 scale (#03, #04) grad[#05] + // 06 const eps + // 07 add (#05, #06) grad[#07] + // 08 sqrt (#07) grad[#08] + // 09 div (#01,#08) grad[#09] + // 10 scale (#00,#09) grad[#10] + // + // backward pass, given grad[#10] + // #10: scale + // grad[#00] += scale(grad[#10],#09) + // grad[#09] += sum(mul(grad[#10],#00)) + // #09: div + // grad[#08] += neg(mul(grad[#09], div(#09,#08))) + // #08: sqrt + // grad[#07] += mul(grad[#08], div(0.5, #08)) + // #07: add + // grad[#05] += grad[#07] + // #05: scale + // grad[#03] += scale(grad[#05],#04) + // #03: sum + // grad[#02] += repeat(grad[#03], #02) + // #02: + // grad[#00] += scale(mul(#00, grad[#02]), 2.0) + // + // substitute and simplify: + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) + // grad[#02] = repeat(grad[#03], #02) + // grad[#02] = repeat(scale(grad[#05],#04), #02) + // grad[#02] = repeat(scale(grad[#07],#04), #02) + // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02) + // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02) + // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02) + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N))) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps))) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps)) + // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps)) + // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps)) + // a = b*c + d*e + // a = b*c*f/f + d*e*f/f + // a = (b*c*f + d*e*f)*(1/f) + // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c)) + // a = (b + d*e/c)*c + // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps) + // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms + // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms + // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms + // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms + // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms + // a = (dz + x*div(-mean_xdz,mean_eps))*rrms + // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms) + // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + } + // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + // post-order: + // dx := x + // dx := scale(dx,-mean_xdz/mean_eps) + // dx := add(dx, dz) + // dx := scale(dx, rrms) + float * dx = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + // dx[i00] = (x*(-sum_xdz/sum_eps) + dz) / sqrtf(mean_eps) + ggml_vec_cpy_f32(ne00, dx, x); + // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); + ggml_vec_scale_f32(ne00, dx, (float) (-sum_xdz) / sum_eps); + ggml_vec_acc_f32(ne00, dx, dz); + ggml_vec_scale_f32(ne00, dx, rrms); + } + } + } +} + +void ggml_compute_forward_rms_norm_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rms_norm_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_group_norm + +static void ggml_compute_forward_group_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + // TODO: optimize + + float eps; + memcpy(&eps, dst->op_params + 1, sizeof(float)); + + int n_channels = src0->ne[2]; + int n_groups = dst->op_params[0]; + int n_channels_per_group = (n_channels + n_groups - 1) / n_groups; + for (int i = ith; i < n_groups; i += nth) { + int start = i * n_channels_per_group; + int end = start + n_channels_per_group; + if (end > n_channels) { + end = n_channels; + } + int step = end - start; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + ggml_float sum = 0.0; + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sumr = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sumr += (ggml_float) x[i00]; + } + sum += sumr; + } + } + const float mean = sum / (ne00 * ne01 * step); + + ggml_float sum2 = 0.0; + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + ggml_float sumr = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + float v = x[i00] - mean; + y[i00] = v; + sumr += (ggml_float) (v * v); + } + sum2 += sumr; + } + } + const float variance = sum2 / (ne00 * ne01 * step); + const float scale = 1.0f / sqrtf(variance + eps); + + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + ggml_vec_scale_f32(ne00, y, scale); + } + } + } + } +} + +void ggml_compute_forward_group_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_group_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_l2_norm + +static void ggml_compute_forward_l2_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sum = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum += (ggml_float) (x[i00] * x[i00]); + } + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + memcpy(y, x, ne00 * sizeof(float)); + + const float scale = 1.0f / fmaxf(sqrtf(sum), eps); + + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_l2_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_l2_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_out_prod + +static void ggml_compute_forward_out_prod_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + GGML_ASSERT(ne2 % ne02 == 0); + GGML_ASSERT(ne3 % ne03 == 0); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + + if (ith == 0) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } + ggml_barrier(params->threadpool); + + // dst[:,:,:,:] = 0 + // for i2,i3: + // for i1: + // for i01: + // for i0: + // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] + + // parallelize by last three dimensions + + // total rows in dst + const int64_t nr = ne1 * ne2 * ne3; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + // block-tiling attempt + const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32); + const int64_t blck_1 = 16; + + // dps == dst per src0, used for group query attention + const int64_t dps2 = ne2 / ne02; + const int64_t dps3 = ne3 / ne03; + + for (int64_t bir = ir0; bir < ir1; bir += blck_1) { + const int64_t bir1 = MIN(bir + blck_1, ir1); + for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) { + const int64_t bne01 = MIN(bi01 + blck_0, ne01); + for (int64_t ir = bir; ir < bir1; ++ir) { + // dst indices + const int64_t i3 = ir / (ne2 * ne1); + const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; + const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + const int64_t i02 = i2 / dps2; + const int64_t i03 = i3 / dps3; + + //const int64_t i10 = i1; + const int64_t i12 = i2; + const int64_t i13 = i3; + +#if GGML_VEC_MAD_UNROLL > 2 + const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL); + for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); + } + for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32(ne0, d, s0, *s1); + } +#else + for (int64_t i01 = bi01; i01 < bne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32(ne0, d, s0, *s1); + } +#endif + } + } + } +} + +static void ggml_compute_forward_out_prod_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(ne03 == ne13); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // we don't support permuted src0 dim0 + GGML_ASSERT(nb00 == ggml_type_size(type)); + + // dst dim0 cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + + if (ith == 0) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } + ggml_barrier(params->threadpool); + + // parallelize by last three dimensions + + // total rows in dst + const int64_t nr = ne1 * ne2 * ne3; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + // dst[:,:,:,:] = 0 + // for i2,i3: + // for i1: + // for i01: + // for i0: + // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] + + float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + + for (int64_t ir = ir0; ir < ir1; ++ir) { + // dst indices + const int64_t i3 = ir / (ne2 * ne1); + const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; + const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + const int64_t i02 = i2; + const int64_t i03 = i3; + + //const int64_t i10 = i1; + const int64_t i12 = i2; + const int64_t i13 = i3; + + for (int64_t i01 = 0; i01 < ne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + dequantize_row_q(s0, wdata, ne0); + ggml_vec_mad_f32(ne0, d, wdata, *s1); + } + } +} + +void ggml_compute_forward_out_prod(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_out_prod_q_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + GGML_ABORT("fatal error"); // todo + // ggml_compute_forward_out_prod_f16_f32(params, dst); + } + case GGML_TYPE_F32: + { + ggml_compute_forward_out_prod_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_scale + +static void ggml_compute_forward_scale_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + float s; // scale factor + float b; // bias + + memcpy(&s, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&b, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const size_t nb01 = src0->nb[1]; + + const size_t nb1 = dst->nb[1]; + + if (b == 0.0f) { + for (int i1 = ir0; i1 < ir1; i1++) { + if (dst->data != src0->data) { + // src0 is same shape as dst => same indices + // TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy + memcpy((char *) dst->data + i1 * nb1, (char *) src0->data + i1 * nb01, nc * sizeof(float)); + } + ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1 * nb1), s); + } + } else { + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_mad1_f32(nc, (float *) ((char *) dst->data + i1 * nb1), (float *) ((char *) src0->data + i1 * nb1), + s, b); + } + } +} + +void ggml_compute_forward_scale(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_scale_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_set + +static void ggml_compute_forward_set_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during set + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during set + const size_t nb0 = ggml_element_size(src0); + + const int im0 = (ne10 == 0 ? 0 : ne10 - 1); + const int im1 = (ne11 == 0 ? 0 : ne11 - 1); + const int im2 = (ne12 == 0 ? 0 : ne12 - 1); + const int im3 = (ne13 == 0 ? 0 : ne13 - 1); + + GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); + + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + + ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); + } +} + +static void ggml_compute_forward_set_i32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during set + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during set + const size_t nb0 = ggml_element_size(src0); + + const int im0 = (ne10 == 0 ? 0 : ne10 - 1); + const int im1 = (ne11 == 0 ? 0 : ne11 - 1); + const int im2 = (ne12 == 0 ? 0 : ne12 - 1); + const int im3 = (ne13 == 0 ? 0 : ne13 - 1); + + GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); + + GGML_ASSERT(nb10 == sizeof(int32_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + + ggml_vec_cpy_i32(nc, (int32_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (int32_t *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); + } +} + +void ggml_compute_forward_set(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_set_f32(params, dst); + } + break; + case GGML_TYPE_I32: + { + ggml_compute_forward_set_i32(params, dst); + } + break; + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cpy + +void ggml_compute_forward_cpy(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_dup(params, dst); +} + +// ggml_compute_forward_cont + +void ggml_compute_forward_cont(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_dup(params, dst); +} + +// ggml_compute_forward_get_rows + +static void ggml_compute_forward_get_rows_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == ggml_type_size(type)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + dequantize_row_q((const void *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(ggml_fp16_t)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_cpu_fp16_to_fp32((const ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(ggml_bf16_t)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_cpu_bf16_to_fp32((const ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(float)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), + (float *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03)); + } +} + +void ggml_compute_forward_get_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_get_rows_q(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_get_rows_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_get_rows_bf16(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_get_rows_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } + + //static bool first = true; + //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + //if (first) { + // first = false; + //} else { + // for (int k = 0; k < dst->ne[1]; ++k) { + // for (int j = 0; j < dst->ne[0]/16; ++j) { + // for (int i = 0; i < 16; ++i) { + // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + // } + // printf("\n"); + // } + // printf("\n"); + // } + // printf("\n"); + // exit(0); + //} +} + +template +static void ggml_compute_forward_set_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ne01; + + assert(ne0 == nc); + assert(ne2 == ne02); + assert(ne3 == ne03); + assert(src0->type == GGML_TYPE_F32); + assert(ne02 % ne11 == 0); + assert(ne03 % ne12 == 0); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = std::min(ir0 + dr, nr); + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(dst->type)->from_float; + + for (int64_t i03 = 0; i03 < ne03; ++i03) { + for (int64_t i02 = 0; i02 < ne02; ++i02) { + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i03 % ne12; + const int64_t i11 = i02 % ne11; + const int64_t i10 = i; + + const int64_t i1 = *(idx_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i1 >= 0 && i1 < ne1); + + from_float((const float *) ((char *) src0->data + i * nb01 + i02 * nb02 + i03 * nb03), + ((char *) dst->data + i1 * nb1 + i02 * nb2 + i03 * nb3), nc); + } + } + } +} + +void ggml_compute_forward_set_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + if (src1->type == GGML_TYPE_I64) { + ggml_compute_forward_set_rows_f32(params, dst); + } else if (src1->type == GGML_TYPE_I32) { + ggml_compute_forward_set_rows_f32(params, dst); + } else { + GGML_ABORT("src1->type = %d (%s) not supported", src1->type, ggml_type_name(src1->type)); + } + } + break; + default: + { + GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); + } + } +} + +// ggml_compute_forward_get_rows_back + +static void ggml_compute_forward_get_rows_back_f32_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_is_contiguous(dst)); + + // ggml_compute_forward_dup_same_cont(params, opt0, dst); + + memset(dst->data, 0, ggml_nbytes(dst)); + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t)); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + for (int j = 0; j < nc; ++j) { + ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i * src0->nb[1]))[j]; + ((float *) ((char *) dst->data + r * dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); + } + } +} + +static void ggml_compute_forward_get_rows_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_is_contiguous(dst)); + + // ggml_compute_forward_dup_same_cont(params, opt0, dst); + + memset(dst->data, 0, ggml_nbytes(dst)); + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + ggml_vec_add_f32(nc, (float *) ((char *) dst->data + r * dst->nb[1]), + (float *) ((char *) dst->data + r * dst->nb[1]), + (float *) ((char *) src0->data + i * src0->nb[1])); + } +} + +void ggml_compute_forward_get_rows_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_get_rows_back_f32_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_get_rows_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } + + //static bool first = true; + //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + //if (first) { + // first = false; + //} else { + // for (int k = 0; k < dst->ne[1]; ++k) { + // for (int j = 0; j < dst->ne[0]/16; ++j) { + // for (int i = 0; i < 16; ++i) { + // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + // } + // printf("\n"); + // } + // printf("\n"); + // } + // printf("\n"); + // exit(0); + //} +} + +// ggml_compute_forward_diag + +static void ggml_compute_forward_diag_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + // TODO: handle transposed/permuted matrices + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne00 == ne0); + GGML_ASSERT(ne00 == ne1); + GGML_ASSERT(ne01 == 1); + GGML_ASSERT(ne02 == ne2); + GGML_ASSERT(ne03 == ne3); + + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb0 == sizeof(float)); + + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = 0; i2 < ne2; i2++) { + for (int i1 = 0; i1 < ne1; i1++) { + float * d = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + float * s = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02); + for (int i0 = 0; i0 < i1; i0++) { + d[i0] = 0; + } + d[i1] = s[i1]; + for (int i0 = i1 + 1; i0 < ne0; i0++) { + d[i0] = 0; + } + } + } + } +} + +void ggml_compute_forward_diag(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_diag_mask_inf + +static void ggml_compute_forward_diag_mask_f32(const ggml_compute_params * params, + ggml_tensor * dst, + const float value) { + const ggml_tensor * src0 = dst->src[0]; + + const int ith = params->ith; + const int nth = params->nth; + + const int n_past = ((int32_t *) dst->op_params)[0]; + const bool inplace = src0->data == dst->data; + + GGML_ASSERT(n_past >= 0); + + if (!inplace) { + if (ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + // TODO: handle transposed/permuted matrices + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + const int nr = src0->ne[1]; + const int nz = n / nr; + + GGML_ASSERT(dst->nb[0] == sizeof(float)); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + for (int k = 0; k < nz; k++) { + for (int j = ith; j < nr; j += nth) { + for (int i = n_past; i < nc; i++) { + if (i > n_past + j) { + *(float *) ((char *) dst->data + k * dst->nb[2] + j * dst->nb[1] + i * dst->nb[0]) = value; + } + } + } + } +} + +void ggml_compute_forward_diag_mask_inf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +void ggml_compute_forward_diag_mask_zero(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_mask_f32(params, dst, 0); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_soft_max + +static void ggml_compute_forward_soft_max_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + assert(ggml_is_contiguous(dst)); + assert(ggml_are_same_shape(src0, dst)); + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int64_t nb11 = src1 ? src1->nb[1] : 1; + const int64_t nb12 = src1 ? src1->nb[2] : 1; + const int64_t nb13 = src1 ? src1->nb[3] : 1; + + const int64_t ne12 = src1 ? src1->ne[2] : 1; + const int64_t ne13 = src1 ? src1->ne[3] : 1; + + // TODO: is this supposed to be ceil instead of floor? + // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 + const uint32_t n_head = ne02; + const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); + + const float m0 = powf(2.0f, -(max_bias) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + float * wp = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); + + // sinks + const float * sk = src2 ? (float *) ((char *) src2->data) : nullptr; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const int64_t i11 = i01; + const int64_t i12 = i02 % ne12; + const int64_t i13 = i03 % ne13; + + // ALiBi + const uint32_t h = i02; // head + const float slope = + (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; + + float * sp = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * dp = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + // broadcast the mask across rows + ggml_fp16_t * mp_f16 = + src1 ? (ggml_fp16_t *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; + float * mp_f32 = src1 ? (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; + + ggml_vec_cpy_f32(ne00, wp, sp); + ggml_vec_scale_f32(ne00, wp, scale); + if (mp_f32) { + if (use_f16) { + for (int i = 0; i < ne00; ++i) { + wp[i] += slope * GGML_CPU_FP16_TO_FP32(mp_f16[i]); + } + } else { + for (int i = 0; i < ne00; ++i) { + wp[i] += slope * mp_f32[i]; + } + } + } + +#ifndef NDEBUG + for (int i = 0; i < ne00; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(wp[i])); + } +#endif + + float max = -INFINITY; + ggml_vec_max_f32(ne00, &max, wp); + + // if we have sinks, make a correction as if they were included in the softmax + if (sk) { + max = MAX(max, sk[i02]); + } + + ggml_float sum = ggml_vec_soft_max_f32(ne00, dp, wp, max); + assert(sum > 0.0); + + if (sk) { + sum += (ggml_float) expf(sk[i02] - max); + } + + sum = 1.0 / sum; + ggml_vec_scale_f32(ne00, dp, sum); + +#ifndef NDEBUG + for (int i = 0; i < ne00; ++i) { + assert(!isnan(dp[i])); + assert(!isinf(dp[i])); + } +#endif + } + } + } +} + +void ggml_compute_forward_soft_max(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_soft_max_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_soft_max_ext_back + +static void ggml_compute_forward_soft_max_ext_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(src1)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_are_same_shape(src1, dst)); + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); + + GGML_ASSERT(max_bias == 0.0f); + + // TODO: handle transposed/permuted matrices + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dy = (float *) ((char *) src0->data + i1 * src0->nb[1]); + float * y = (float *) ((char *) src1->data + i1 * src1->nb[1]); + float * dx = (float *) ((char *) dst->data + i1 * dst->nb[1]); + +#ifndef NDEBUG + for (int i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(dy[i])); + assert(!isnan(y[i])); + } +#endif + // Jii = yi - yi*yi + // Jij = -yi*yj + // J = diag(y)-y.T*y + // dx = J * dy + // dxk = sum_i(Jki * dyi) + // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk + // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk + // dxk = sum_i(-yk*yi * dyi) + yk*dyk + // dxk = -yk * sum_i(yi * dyi) + yk*dyk + // dxk = -yk * dot(y, dy) + yk*dyk + // dxk = yk * (- dot(y, dy) + dyk) + // dxk = yk * (dyk - dot(y, dy)) + // + // post-order: + // dot_y_dy := dot(y, dy) + // dx := dy + // dx := dx - dot_y_dy + // dx := dx * y + + // linear runtime, no additional memory + float dot_y_dy = 0; + ggml_vec_dot_f32(nc, &dot_y_dy, 0, y, 0, dy, 0, 1); + ggml_vec_cpy_f32(nc, dx, dy); + ggml_vec_acc1_f32(nc, dx, -dot_y_dy); + ggml_vec_mul_f32(nc, dx, dx, y); + ggml_vec_scale_f32(nc, dx, scale); + +#ifndef NDEBUG + for (int i = 0; i < nc; ++i) { + assert(!isnan(dx[i])); + assert(!isinf(dx[i])); + } +#endif + } +} + +void ggml_compute_forward_soft_max_ext_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_soft_max_ext_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_clamp + +static void ggml_compute_forward_clamp_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + float min; + float max; + memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + const size_t nb00 = src0->nb[0]; + const size_t nb01 = src0->nb[1]; + + const size_t nb0 = dst->nb[0]; + const size_t nb1 = dst->nb[1]; + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + for (int j = ith; j < n; j += nth) { + float * dst_ptr = (float *) ((char *) dst->data + j * nb1); + float * src0_ptr = (float *) ((char *) src0->data + j * nb01); + + for (int i = 0; i < nc; i++) { + dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min); + } + } +} + +static void ggml_compute_forward_clamp_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + float min; + float max; + memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + const size_t nb00 = src0->nb[0]; + const size_t nb01 = src0->nb[1]; + + const size_t nb0 = dst->nb[0]; + const size_t nb1 = dst->nb[1]; + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + for (int j = ith; j < n; j += nth) { + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j * nb01); + + for (int i = 0; i < nc; i++) { + float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); + } + } +} + +void ggml_compute_forward_clamp(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_clamp_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_clamp_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_Q8_K: + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_I64: + case GGML_TYPE_F64: + case GGML_TYPE_COUNT: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rope + +static float rope_yarn_ramp(const float low, const float high, const int i0) { + const float y = (i0 / 2 - low) / MAX(0.001f, high - low); + return 1 - MIN(1, MAX(0, y)); +} + +// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn +// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. +static void rope_yarn(float theta_extrap, + float freq_scale, + float corr_dims[2], + int64_t i0, + float ext_factor, + float mscale, + float * cos_theta, + float * sin_theta) { + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta = theta_interp; + if (ext_factor != 0.0f) { + float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; + theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + } + *cos_theta = cosf(theta) * mscale; + *sin_theta = sinf(theta) * mscale; +} + +static void ggml_rope_cache_init(float theta_base, + float freq_scale, + const float * freq_factors, + float corr_dims[2], + int64_t ne0, + float ext_factor, + float mscale, + float * cache, + float sin_sign, + float theta_scale) { + // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py + float theta = theta_base; + for (int64_t i0 = 0; i0 < ne0; i0 += 2) { + const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; + rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); + cache[i0 + 1] *= sin_sign; + + theta *= theta_scale; + } +} + +static void ggml_mrope_cache_init(float theta_base_t, + float theta_base_h, + float theta_base_w, + float theta_base_e, + int sections[4], + bool is_imrope, + bool indep_sects, + float freq_scale, + const float * freq_factors, + float corr_dims[2], + int64_t ne0, + float ext_factor, + float mscale, + float * cache, + float sin_sign, + float theta_scale) { + // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py + float theta_t = theta_base_t; + float theta_h = theta_base_h; + float theta_w = theta_base_w; + float theta_e = theta_base_e; // extra position id for vision encoder + int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; + int sec_w = sections[1] + sections[0]; + int sec_e = sections[2] + sec_w; + GGML_ASSERT(sect_dims <= ne0); + + for (int64_t i0 = 0; i0 < ne0; i0 += 2) { + const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; + + int sector = (i0 / 2) % sect_dims; + if (indep_sects) { + // compute theta independently for each dim sections + // (i.e. reset corresponding theta when `i0` go from one section to another) + if (sector == 0) { + theta_t = theta_base_t; + } else if (sector == sections[0]) { + theta_h = theta_base_h; + ; + } else if (sector == sec_w) { + theta_w = theta_base_w; + } else if (sector == sec_e) { + theta_e = theta_base_e; + } + } + + float theta = theta_t; + if (is_imrope) { // qwen3vl apply interleaved mrope + if (sector % 3 == 1 && sector < 3 * sections[1]) { + theta = theta_h; + } else if (sector % 3 == 2 && sector < 3 * sections[2]) { + theta = theta_w; + } else if (sector % 3 == 0 && sector < 3 * sections[0]) { + theta = theta_t; + } else { + theta = theta_e; + } + } else { + if (sector >= sections[0] && sector < sec_w) { + theta = theta_h; + } else if (sector >= sec_w && sector < sec_w + sections[2]) { + theta = theta_w; + } else if (sector >= sec_w + sections[2]) { + theta = theta_e; + } + } + + rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); + cache[i0 + 1] *= sin_sign; + + theta_t *= theta_scale; + theta_w *= theta_scale; + theta_h *= theta_scale; + theta_e *= theta_scale; + } +} + +template +static void rotate_pairs(const int64_t n, + const int64_t n_offset, + const float * cache, + const T * src_data, + T * dst_data, + const int scale = 2) { + for (int64_t i0 = 0; i0 < n; i0 += 2) { + const int64_t ic = + i0 / scale; // hack for GGML_ROPE_TYPE_NORMAL, where we need ic = i0; for all other cases, ic = i0/2 + + const float cos_theta = cache[i0 + 0]; + const float sin_theta = cache[i0 + 1]; + + const T * const src = src_data + ic; + T * dst = dst_data + ic; + + const float x0 = type_conversion_table::to_f32(src[0]); + const float x1 = type_conversion_table::to_f32(src[n_offset]); + + dst[0] = type_conversion_table::from_f32(x0 * cos_theta - x1 * sin_theta); + dst[n_offset] = type_conversion_table::from_f32(x0 * sin_theta + x1 * cos_theta); + } +} + +template //float or ggml_fp16_t +static void ggml_compute_forward_rope_flt(const ggml_compute_params * params, ggml_tensor * dst, const bool forward) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_I32); + + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + int sections[4]; + + //const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; + //const int n_ctx = ((int32_t *) dst->op_params)[3]; + const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int) * 4); + + GGML_TENSOR_UNARY_OP_LOCALS + + //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); + //printf("n_past = %d, ne2 = %d\n", n_past, ne2); + + GGML_ASSERT(nb0 == nb00); + GGML_ASSERT(nb0 == sizeof(T)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(dst); + + GGML_ASSERT(n_dims <= ne0); + GGML_ASSERT(n_dims % 2 == 0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // row index used to determine which thread to use + int ir = 0; + + const float theta_scale = powf(freq_base, -2.0f / n_dims); + + float corr_dims[2]; + ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); + + const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope + const bool mrope_used = + mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, note: also true for vision (24 & 8 == true) and for imrope + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + + if (mrope_used) { + GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); + } + + if (is_vision) { + GGML_ASSERT(n_dims == ne0 / 2); + } + + const float * freq_factors = NULL; + if (src2 != NULL) { + GGML_ASSERT(src2->type == GGML_TYPE_F32); + GGML_ASSERT(src2->ne[0] >= n_dims / 2); + freq_factors = (const float *) src2->data; + } + + // backward process uses inverse rotation by cos and sin. + // cos and sin build a rotation matrix, where the inverse is the transpose. + // this essentially just switches the sign of sin. + const float sin_sign = forward ? 1.0f : -1.0f; + + const int32_t * pos = (const int32_t *) src1->data; + + for (int64_t i3 = 0; i3 < ne3; i3++) { // batch + for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len + + float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + if (!mrope_used) { + const int64_t p = pos[i2]; + ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, + sin_sign, theta_scale); + } else { + const int64_t p_t = pos[i2]; + const int64_t p_h = pos[i2 + ne2]; + const int64_t p_w = pos[i2 + ne2 * 2]; + const int64_t p_e = pos[i2 + ne2 * 3]; + ggml_mrope_cache_init(p_t, p_h, p_w, p_e, sections, is_imrope, is_vision, freq_scale, freq_factors, + corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + } + + for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads + if (ir++ < ir0) { + continue; + } + if (ir > ir1) { + break; + } + + T * src = (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + + switch (mode) { + case GGML_ROPE_TYPE_NORMAL: + rotate_pairs(n_dims, 1, cache, src, dst_data, 1); + break; + case GGML_ROPE_TYPE_NEOX: + case GGML_ROPE_TYPE_MROPE: + case GGML_ROPE_TYPE_IMROPE: + rotate_pairs(n_dims, n_dims / 2, cache, src, dst_data); + break; + case GGML_ROPE_TYPE_VISION: + rotate_pairs(ne0, n_dims, cache, src, dst_data); + break; + default: + GGML_ABORT("rope type not supported"); + } + + if (!is_vision) { + // fill the remain channels with data from src tensor + for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { + const T * const src = + (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + i0 * nb00); + T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } + } + } //attn-heads + } + } +} + +void ggml_compute_forward_rope(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_rope_flt(params, dst, true); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_rope_flt(params, dst, true); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rope_back + +void ggml_compute_forward_rope_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_rope_flt(params, dst, false); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_rope_flt(params, dst, false); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_conv_transpose_1d + +static void ggml_compute_forward_conv_transpose_1d_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i02 * nb02 + i01 * nb01); + ggml_fp16_t * dst_data = wdata + i01 * ne00 * ne02; + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i00 * ne02 + i02] = src[i00]; + } + } + } + } + + // permute source data (src1) from (L x Cin) to (Cin x L) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; + ggml_fp16_t * dst_data = wdata; + + for (int64_t i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i11 * nb11); + for (int64_t i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); + } + } + } + + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + + // total rows in dst + const int nr = ne1; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_fp16_t * const wdata_src = wdata + nk; + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *) ((char *) dst->data + i1 * nb1); + ggml_fp16_t * wdata_kernel = wdata + i1 * ne02 * ne00; + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i10 * ne11; + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f16(ne02, &v, 0, (ggml_fp16_t *) wdata_src + i1n, 0, + (ggml_fp16_t *) wdata_kernel + i00 * ne02, 0, 1); + dst_data[i10 * s0 + i00] += v; + } + } + } +} + +static void ggml_compute_forward_conv_transpose_1d_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02; + + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) + { + float * const wdata = (float *) params->wdata + 0; + + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * const src = (float *) ((char *) src0->data + i02 * nb02 + i01 * nb01); + float * dst_data = wdata + i01 * ne00 * ne02; + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i00 * ne02 + i02] = src[i00]; + } + } + } + } + + // prepare source data (src1) + { + float * const wdata = (float *) params->wdata + nk; + float * dst_data = wdata; + + for (int64_t i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i11 * nb11); + for (int64_t i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne11 + i11] = src[i10]; + } + } + } + + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + + // total rows in dst + const int nr = ne1; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * const wdata = (float *) params->wdata + 0; + float * const wdata_src = wdata + nk; + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *) ((char *) dst->data + i1 * nb1); + float * wdata_kernel = wdata + i1 * ne02 * ne00; + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i10 * ne11; + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f32(ne02, &v, 0, wdata_src + i1n, 0, wdata_kernel + i00 * ne02, 0, 1); + dst_data[i10 * s0 + i00] += v; + } + } + } +} + +void ggml_compute_forward_conv_transpose_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_conv_transpose_1d_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_im2col_f32 +// src0: kernel [OC, IC, KH, KW] +// src1: image [N, IC, IH, IW] +// dst: result [N, OH, OW, IC*KH*KW] +static void ggml_compute_forward_im2col_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne13 : ne12; + const int64_t IC = is_2D ? ne12 : ne11; + const int64_t IH = is_2D ? ne11 : 1; + const int64_t IW = ne10; + + const int64_t KH = is_2D ? ne01 : 1; + const int64_t KW = ne00; + + const int64_t OH = is_2D ? ne2 : 1; + const int64_t OW = ne1; + + int ofs0 = is_2D ? nb13 : nb12; + int ofs1 = is_2D ? nb12 : nb11; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + float * dst_data = wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + const float * const src_data = + (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] + + for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; + } else { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = (src_data[iih * IW + iiw]); + } + } + } + } + } + } + } + } +} + +// ggml_compute_forward_im2col_f16 +// src0: kernel [OC, IC, KH, KW] +// src1: image [N, IC, IH, IW] +// dst: result [N, OH, OW, IC*KH*KW] +static void ggml_compute_forward_im2col_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne13 : ne12; + const int64_t IC = is_2D ? ne12 : ne11; + const int64_t IH = is_2D ? ne11 : 1; + const int64_t IW = ne10; + + const int64_t KH = is_2D ? ne01 : 1; + const int64_t KW = ne00; + + const int64_t OH = is_2D ? ne2 : 1; + const int64_t OW = ne1; + + int ofs0 = is_2D ? nb13 : nb12; + int ofs1 = is_2D ? nb12 : nb11; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + ggml_fp16_t * dst_data = + wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + const float * const src_data = + (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] + + for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; + } else { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = + GGML_CPU_FP32_TO_FP16(src_data[iih * IW + iiw]); + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_im2col(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_im2col_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_im2col_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_im2col_back_f32 + +void ggml_compute_forward_im2col_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output + const ggml_tensor * src1 = dst->src[1]; // convolution kernel + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne3 : ne2; + const int64_t IC = is_2D ? ne2 : ne1; + const int64_t IH = is_2D ? ne1 : 1; + const int64_t IW = ne0; + + const int64_t KH = is_2D ? ne11 : 1; + const int64_t KW = ne10; + + const int64_t OH = is_2D ? ne02 : 1; + const int64_t OW = ne01; + + int ofs0 = is_2D ? nb3 : nb2; + int ofs1 = is_2D ? nb2 : nb1; + + GGML_ASSERT(nb0 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + for (int64_t iih = 0; iih < IH; iih++) { + for (int64_t iiw = 0; iiw < IW; iiw++) { + // micro kernel + float grad = 0.0f; + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + // For s0 > 1 some values were skipped over in the forward pass. + // These values have tmpw % s0 != 0 and need to be skipped in the backwards pass as well. + const int64_t tmpw = (iiw + p0 - ikw * d0); + if (tmpw % s0 != 0) { + continue; + } + const int64_t iow = tmpw / s0; + + // Equivalent logic as above except for s1. + int64_t ioh; + if (is_2D) { + const int64_t tmph = iih + p1 - ikh * d1; + + if (tmph % s1 != 0) { + continue; + } + + ioh = tmph / s1; + } else { + ioh = 0; + } + + if (iow < 0 || iow >= OW || ioh < 0 || ioh >= OH) { + continue; + } + + const float * const grad_in = + (const float *) src0->data + + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + grad += grad_in[iic * (KH * KW) + ikh * KW + ikw]; + } + } + float * dst_data = (float *) ((char *) wdata + (in * ofs0 + iic * ofs1)); // [IH, IW] + dst_data[iih * IW + iiw] = grad; + } + } + } + } + } +} + +// ggml_compute_forward_im2col_3d_f16 +// src0: kernel [OC*IC, KD, KH, KW] +// src1: image [N*IC, ID, IH, IW] +// dst: result [N*OD, OH, OW, IC * KD * KH * KW] +static void ggml_compute_forward_im2col_3d_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; + const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; + const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; + const int32_t IC = ((const int32_t *) (dst->op_params))[9]; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = ne13 / IC; + const int64_t ID = ne12; + const int64_t IH = ne11; + const int64_t IW = ne10; + + const int64_t OC = ne03 / IC; + GGML_UNUSED(OC); + const int64_t KD = ne02; + const int64_t KH = ne01; + const int64_t KW = ne00; + + const int64_t OD = ne3 / N; + const int64_t OH = ne2; + const int64_t OW = ne1; + const int64_t OH_OW = OH * OW; + const int64_t KD_KH_KW = KD * KH * KW; + const int64_t KH_KW = KH * KW; + const int64_t IC_KD_KH_KW = IC * KD * KH * KW; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iod = 0; iod < OD; iod++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + ggml_fp16_t * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * + IC_KD_KH_KW; // [IC, KD, KH, KW] + const float * const src_data = + (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] + + for (int64_t ikd = 0; ikd < KD; ikd++) { + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + const int64_t iid = iod * s2 + ikd * d2 - p2; + + if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || + iid < 0 || iid >= ID) { + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; + } else { + const float * const s = + (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + + iiw * nb10); // [ID, IH, IW] + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = + GGML_CPU_FP32_TO_FP16(*s); + } + } + } + } + } + } + } + } + } + } +} + +// ggml_compute_forward_im2col_3d_f32 +// src0: kernel [OC*IC, KD, KH, KW] +// src1: image [N*IC, ID, IH, IW] +// dst: result [N*OD, OH, OW, IC * KD * KH * KW] +static void ggml_compute_forward_im2col_3d_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; + const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; + const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; + const int32_t IC = ((const int32_t *) (dst->op_params))[9]; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = ne13 / IC; + const int64_t ID = ne12; + const int64_t IH = ne11; + const int64_t IW = ne10; + + const int64_t OC = ne03 / IC; + GGML_UNUSED(OC); + const int64_t KD = ne02; + const int64_t KH = ne01; + const int64_t KW = ne00; + + const int64_t OD = ne3 / N; + const int64_t OH = ne2; + const int64_t OW = ne1; + + const int64_t OH_OW = OH * OW; + const int64_t KD_KH_KW = KD * KH * KW; + const int64_t KH_KW = KH * KW; + const int64_t IC_KD_KH_KW = IC * KD * KH * KW; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iod = 0; iod < OD; iod++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + float * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * + IC_KD_KH_KW; // [IC, KD, KH, KW] + const float * const src_data = + (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] + + for (int64_t ikd = 0; ikd < KD; ikd++) { + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + const int64_t iid = iod * s2 + ikd * d2 - p2; + + if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || + iid < 0 || iid >= ID) { + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; + } else { + const float * const s = + (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + + iiw * nb10); // [ID, IH, IW] + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = *s; + } + } + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_im2col_3d(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_im2col_3d_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_im2col_3d_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_call_mul_mat(ggml_type type, + const ggml_compute_params * params, + int64_t m, + int64_t n, + int64_t k, + void * a, + void * b, + float * c) { + const ggml_type_traits * traits = ggml_get_type_traits(type); + struct ggml_tensor src1 = {}; + src1.type = type; + src1.ne[0] = k; + src1.ne[1] = m; + src1.ne[2] = 1; + src1.ne[3] = 1; + src1.nb[0] = traits->type_size; + src1.nb[1] = k * traits->type_size; + src1.nb[2] = src1.nb[1]; + src1.nb[3] = src1.nb[2]; + src1.data = a; + + struct ggml_tensor src0 = {}; + src0.type = type; + src0.ne[0] = k; + src0.ne[1] = n; + src0.ne[2] = 1; + src0.ne[3] = 1; + src0.nb[0] = traits->type_size; + src0.nb[1] = k * traits->type_size; + src0.nb[2] = src0.nb[1]; + src0.nb[3] = src0.nb[2]; + src0.data = b; + + struct ggml_tensor dst = {}; + dst.ne[0] = n; + dst.ne[1] = m; + dst.ne[2] = 1; + dst.ne[3] = 1; + dst.nb[0] = sizeof(float); + dst.nb[1] = n * sizeof(float); + dst.nb[2] = dst.nb[1]; + dst.nb[3] = dst.nb[2]; + dst.data = c; + dst.src[0] = &src0; + dst.src[1] = &src1; + + ggml_compute_forward_mul_mat(params, &dst); +} + +static inline int64_t ggml_wrap_around(int64_t coord, int64_t size) { + return (coord + size) % size; // adding size avoids negative number weirdness +} + +// ggml_compute_forward_conv_2d + +static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, + const ggml_tensor * kernel, // [KW, KH, IC, OC] + const ggml_tensor * src, // [W, H, C, N] + ggml_tensor * dst, // [OW, OH, OC, N] + ggml_type kernel_type) { + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); + GGML_ASSERT(kernel->type == kernel_type); + + const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); + + const int32_t stride_x = dst->op_params[0]; + const int32_t stride_y = dst->op_params[1]; + const int32_t pad_x = dst->op_params[2]; + const int32_t pad_y = dst->op_params[3]; + const int32_t dilation_x = dst->op_params[4]; + const int32_t dilation_y = dst->op_params[5]; + + const int64_t c_in = src->ne[2]; + const int64_t c_out = kernel->ne[3]; + GGML_ASSERT(c_in == kernel->ne[2]); + + const int64_t src_w = src->ne[0]; + const int64_t src_h = src->ne[1]; + const int64_t knl_w = kernel->ne[0]; + const int64_t knl_h = kernel->ne[1]; + const int64_t dst_w = dst->ne[0]; + const int64_t dst_h = dst->ne[1]; + + const float * src_data = (float *) src->data; + void * knl_data = kernel->data; + float * dst_data = (float *) dst->data; + + const int64_t knl_n = knl_w * knl_h * c_in; + const int64_t patch_total = dst->ne[3] * dst_w * dst_h; + + const int64_t space_per_patch = knl_n * traits->type_size + c_out * sizeof(float); + const int64_t batch_size = params->wsize / space_per_patch; + const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; + const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; + + GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); + + void * tmp = params->wdata; + + for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { + const int64_t patch_start_batch = batch_i * patches_per_batch; + const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); + const int64_t patch_n = patch_end_batch - patch_start_batch; + + const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth; + const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; + const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); + + //im2col for a patch + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t src_x = (p / dst_w) % dst_h; + const int64_t src_y = p % dst_w; + + const float * src_base = (const float *) ((const char *) src_data + batch_n * src->nb[3]); + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; + + for (int64_t ic = 0; ic < c_in; ++ic) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; + const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; + + int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; + + float src_val; + if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const float * src_ptr = (const float *) ((const char *) src_base + sx * src->nb[0] + + sy * src->nb[1] + ic * src->nb[2]); + src_val = *src_ptr; + } + + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } + } + } + } + } // patches handled by this thread + + ggml_barrier(params->threadpool); + + float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n * traits->type_size); + + GGML_ASSERT(gemm_output + patch_n * c_out <= (float *) tmp + params->wsize); + + // GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out] + ggml_call_mul_mat(kernel_type, params, patch_n, c_out, knl_n, tmp, knl_data, gemm_output); + + ggml_barrier(params->threadpool); + + //permute back [OC, N, OH, OW] to [N, OC, OH, OW] + const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth; + const int64_t permute_start = params->ith * permute_per_thread; + const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n); + + for (int64_t i = permute_start; i < permute_end; ++i) { + const int64_t p = patch_start_batch + i; + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t dst_y = (p / dst_w) % dst_h; + const int64_t dst_x = p % dst_w; + + for (int64_t oc = 0; oc < c_out; ++oc) { + const float value = gemm_output[i * c_out + oc]; + float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + + oc * dst->nb[2] + batch_n * dst->nb[3]); + *dst_ptr = value; + } + } + } +} + +void ggml_compute_forward_conv_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + ggml_compute_forward_conv_2d_impl(params, src0, src1, dst, src0->type); +} + +// ggml_compute_forward_conv_3d + +static void ggml_compute_forward_conv_3d_impl(const ggml_compute_params * params, + const ggml_tensor * kernel, + const ggml_tensor * src, + ggml_tensor * dst, + ggml_type kernel_type) { + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); + GGML_ASSERT(kernel->type == kernel_type); + + const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); + + const int32_t s0 = dst->op_params[0]; + const int32_t s1 = dst->op_params[1]; + const int32_t s2 = dst->op_params[2]; + const int32_t p0 = dst->op_params[3]; + const int32_t p1 = dst->op_params[4]; + const int32_t p2 = dst->op_params[5]; + const int32_t d0 = dst->op_params[6]; + const int32_t d1 = dst->op_params[7]; + const int32_t d2 = dst->op_params[8]; + const int32_t c = dst->op_params[9]; + const int32_t n = dst->op_params[10]; + const int32_t oc = dst->op_params[11]; + + const int64_t src_w = src->ne[0]; + const int64_t src_h = src->ne[1]; + const int64_t src_d = src->ne[2]; + const int64_t knl_w = kernel->ne[0]; + const int64_t knl_h = kernel->ne[1]; + const int64_t knl_d = kernel->ne[2]; + const int64_t dst_w = dst->ne[0]; + const int64_t dst_h = dst->ne[1]; + const int64_t dst_d = dst->ne[2]; + + const float * src_data = (float *) src->data; + void * knl_data = kernel->data; + float * dst_data = (float *) dst->data; + + const int64_t knl_n_per_channel = knl_w * knl_h * knl_d; + const int64_t knl_n_total = knl_n_per_channel * c; + const int64_t patch_total = n * dst_w * dst_h * dst_d; + + const int64_t space_per_patch = knl_n_total * traits->type_size + oc * sizeof(float); + const int64_t batch_size = params->wsize / space_per_patch; + const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; + const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; + + GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); + + void * tmp = params->wdata; + + for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { + const int64_t patch_start_batch = batch_i * patches_per_batch; + const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); + const int64_t patch_n_in_batch = patch_end_batch - patch_start_batch; + + const int64_t patch_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; + const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; + const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); + + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); + const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); + const int64_t batch_idx = p / (dst_w * dst_h * dst_d); + const int64_t dst_z = p_in_batch / (dst_w * dst_h); + const int64_t dst_y = p_in_depth / dst_w; + const int64_t dst_x = p_in_depth % dst_w; + + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n_total * traits->type_size; + + for (int64_t ic = 0; ic < c; ++ic) { + for (int64_t kz = 0; kz < knl_d; ++kz) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sz = dst_z * s2 + kz * d2 - p2; + const int64_t sy = dst_y * s1 + ky * d1 - p1; + const int64_t sx = dst_x * s0 + kx * d0 - p0; + + int64_t dst_idx = ic * knl_n_per_channel + kz * (knl_h * knl_w) + ky * knl_w + kx; + + float src_val; + if (sz < 0 || sz >= src_d || sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const int64_t cn_idx = batch_idx * c + ic; + const float * src_ptr = + (const float *) ((const char *) src_data + sx * src->nb[0] + sy * src->nb[1] + + sz * src->nb[2] + cn_idx * src->nb[3]); + src_val = *src_ptr; + } + + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } + } + } + } + } + } + + ggml_barrier(params->threadpool); + + float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n_total * traits->type_size); + ggml_call_mul_mat(kernel_type, params, patch_n_in_batch, oc, knl_n_total, tmp, knl_data, gemm_output); + + ggml_barrier(params->threadpool); + + const int64_t permute_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; + const int64_t permute_start = params->ith * permute_per_thread; + const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n_in_batch); + + for (int64_t i = permute_start; i < permute_end; ++i) { + const int64_t p = patch_start_batch + i; + const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); + const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); + const int64_t batch_idx = p / (dst_w * dst_h * dst_d); + const int64_t dst_z = p_in_batch / (dst_w * dst_h); + const int64_t dst_y = p_in_depth / dst_w; + const int64_t dst_x = p_in_depth % dst_w; + + for (int64_t ioc = 0; ioc < oc; ++ioc) { + const float value = gemm_output[i * oc + ioc]; + const int64_t ocn_idx = batch_idx * oc + ioc; + float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + + dst_z * dst->nb[2] + ocn_idx * dst->nb[3]); + *dst_ptr = value; + } + } + } +} + +void ggml_compute_forward_conv_3d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + ggml_compute_forward_conv_3d_impl(params, src0, src1, dst, src0->type); +} + +// ggml_compute_forward_conv_transpose_2d + +void ggml_compute_forward_conv_transpose_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02 * ne03; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i03 * nb03 + i02 * nb02); + ggml_fp16_t * dst_data = wdata + i02 * ne01 * ne00 * ne03; + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i01 * ne00 * ne03 + i00 * ne03 + i03] = src[i01 * ne00 + i00]; + } + } + } + } + } + + // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; + for (int i12 = 0; i12 < ne12; i12++) { + for (int i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i12 * nb12 + i11 * nb11); + ggml_fp16_t * dst_data = wdata + i11 * ne10 * ne12; + for (int i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); + } + } + } + } + + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t stride = ggml_get_op_params_i32(dst, 0); + + // total patches in dst + const int np = ne2; + + // patches per thread + const int dp = (np + nth - 1) / nth; + + // patch range for this thread + const int ip0 = dp * ith; + const int ip1 = MIN(ip0 + dp, np); + + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_fp16_t * const wdata_src = wdata + nk; + + for (int i2 = ip0; i2 < ip1; i2++) { // Cout + float * dst_data = (float *) ((char *) dst->data + i2 * nb2); + ggml_fp16_t * wdata_kernel = wdata + i2 * ne01 * ne00 * ne03; + for (int i11 = 0; i11 < ne11; i11++) { + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i11 * ne10 * ne12 + i10 * ne12; + for (int i01 = 0; i01 < ne01; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f16(ne03, &v, 0, wdata_src + i1n, 0, wdata_kernel + i01 * ne00 * ne03 + i00 * ne03, + 0, 1); + dst_data[(i11 * stride + i01) * ne0 + i10 * stride + i00] += v; + } + } + } + } + } +} + +// ggml_compute_forward_conv_2d_dw + +struct ggml_conv_2d_dw_params { + int64_t channels; + int64_t batch; + int64_t src_w; + int64_t src_h; + int64_t dst_w; + int64_t dst_h; + int64_t knl_w; + int64_t knl_h; + int stride_x; + int stride_y; + int pad_x; + int pad_y; + int dilation_x; + int dilation_y; +}; + +static void ggml_compute_forward_conv_2d_dw_cwhn(const ggml_compute_params * params, + const ggml_tensor * src, + const ggml_tensor * kernel, + ggml_tensor * dst, + const ggml_conv_2d_dw_params & p) { + const int64_t c = p.channels; + const float * knl_data = (const float *) kernel->data; + + const int64_t rows_total = p.dst_h * p.batch; + const int64_t rows_per_thread = (rows_total + params->nth - 1) / params->nth; + const int64_t row_start = params->ith * rows_per_thread; + const int64_t row_end = MIN(row_start + rows_per_thread, rows_total); + +#ifdef GGML_SIMD +# if defined(__ARM_FEATURE_SVE) + const int64_t pkg_size = svcntw(); +# else + const int64_t pkg_size = GGML_F32_EPR; +# endif + const int64_t pkg_count = c / pkg_size; + const int64_t c_pkg_end = pkg_count * pkg_size; +#else + const int64_t c_pkg_end = 0; +#endif + + for (int64_t row = row_start; row < row_end; ++row) { + const int64_t dst_y = row % p.dst_h; + const float * src_data = (const float *) src->data + (row / p.dst_h) * p.src_w * p.src_h * c; + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float * dst_data = (float *) dst->data + (row * p.dst_w + dst_x) * c; + const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; + const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; + +#ifdef GGML_SIMD + // Vectorized loop + for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); + GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); + sum = GGML_F32_VEC_FMA(sum, k, s); + } + } + GGML_F32_VEC_STORE(dst_data + c_i, sum); + } +#endif + // Scalar loop + for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] * + src_data[(src_y * p.src_w + src_x) * c + c_i]; + } + } + dst_data[c_i] = sum; + } + } + } +} + +static void ggml_compute_forward_conv_2d_dw_whcn(const ggml_compute_params * params, + const ggml_tensor * src, + const ggml_tensor * kernel, + ggml_tensor * dst, + const ggml_conv_2d_dw_params & p) { + const int64_t n = p.channels * p.batch; + const int64_t per_thread = (n + params->nth - 1) / params->nth; + const int64_t start = params->ith * per_thread; + const int64_t end = MIN(start + per_thread, n); + + for (int64_t i = start; i < end; ++i) { + const float * knl_data = (const float *) kernel->data + (i % p.channels) * p.knl_w * p.knl_h; + const float * src_data = (const float *) src->data + i * p.src_w * p.src_h; + float * dst_data = (float *) dst->data + i * p.dst_w * p.dst_h; + + for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + sum += knl_data[knl_y * p.knl_w + knl_x] * src_data[src_y * p.src_w + src_x]; + } + } + dst_data[dst_y * p.dst_w + dst_x] = sum; + } + } + } +} + +void ggml_compute_forward_conv_2d_dw(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * kernel = dst->src[0]; + const ggml_tensor * src = dst->src[1]; + ggml_conv_2d_dw_params p; + p.channels = src->ne[2]; + p.batch = src->ne[3]; + p.src_w = src->ne[0]; + p.src_h = src->ne[1]; + p.dst_w = dst->ne[0]; + p.dst_h = dst->ne[1]; + p.knl_w = kernel->ne[0]; + p.knl_h = kernel->ne[1]; + p.stride_x = dst->op_params[0]; + p.stride_y = dst->op_params[1]; + p.pad_x = dst->op_params[2]; + p.pad_y = dst->op_params[3]; + p.dilation_x = dst->op_params[4]; + p.dilation_y = dst->op_params[5]; + + GGML_ASSERT(kernel->ne[3] == p.channels); + GGML_ASSERT(dst->ne[3] == p.batch); + + if (ggml_is_contiguous(src)) { + ggml_compute_forward_conv_2d_dw_whcn(params, src, kernel, dst, p); + } else if (ggml_is_contiguous_channels(src)) { + // kernel should also have channels most contiguous in memory + GGML_ASSERT(kernel->nb[0] >= kernel->nb[2] && kernel->nb[1] >= kernel->nb[0]); + ggml_compute_forward_conv_2d_dw_cwhn(params, src, kernel, dst, p); + } else { + GGML_ABORT("non-contiguous memory layout not supported"); + } +} + +// ggml_compute_forward_pool_1d_sk_p0 + +static void ggml_compute_forward_pool_1d_sk_p0(const ggml_compute_params * params, + const ggml_op_pool op, + const int k, + ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + + assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const char * cdata = (const char *) src->data; + const char * const data_end = cdata + ggml_nbytes(src); + float * drow = (float *) dst->data; + + const int64_t rs = dst->ne[0]; + + while (cdata < data_end) { + const void * srow = (const void *) cdata; + int j = 0; + for (int64_t i = 0; i < rs; ++i) { + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] = 0; + break; + case GGML_OP_POOL_MAX: + drow[i] = -FLT_MAX; + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + for (int ki = 0; ki < k; ++ki) { + const float srow_j = (src->type == GGML_TYPE_F32) ? + ((const float *) srow)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] += srow_j; + break; + case GGML_OP_POOL_MAX: + if (srow_j > drow[i]) { + drow[i] = srow_j; + } + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + ++j; + } + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] /= k; + break; + case GGML_OP_POOL_MAX: + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + + cdata += src->nb[1]; + drow += rs; + } +} + +// ggml_compute_forward_pool_1d + +void ggml_compute_forward_pool_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int s0 = opts[2]; + const int p0 = opts[3]; + GGML_ASSERT(p0 == 0); // padding not supported + GGML_ASSERT(k0 == s0); // only s = k supported + + ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst); +} + +// ggml_compute_forward_pool_2d + +void ggml_compute_forward_pool_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + + assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; + const char * cdata = (const char *) src->data; + const char * const data_end = cdata + ggml_nbytes(src); + + const int64_t px = dst->ne[0]; + const int64_t py = dst->ne[1]; + const int64_t pa = px * py; + + float * dplane = (float *) dst->data; + + const int ka = k0 * k1; + const int offset0 = -p0; + const int offset1 = -p1; + + while (cdata < data_end) { + for (int oy = 0; oy < py; ++oy) { + float * const drow = dplane + oy * px; + for (int ox = 0; ox < px; ++ox) { + float * const out = drow + ox; + switch (op) { + case GGML_OP_POOL_AVG: + *out = 0; + break; + case GGML_OP_POOL_MAX: + *out = -FLT_MAX; + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + + const int ix = offset0 + ox * s0; + const int iy = offset1 + oy * s1; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= src->ne[1]) { + continue; + } + const void * srow = (const void *) (cdata + src->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= src->ne[0]) { + continue; + } + const float srow_j = (src->type == GGML_TYPE_F32) ? + ((const float *) srow)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); + switch (op) { + case GGML_OP_POOL_AVG: + *out += srow_j; + break; + case GGML_OP_POOL_MAX: + if (srow_j > *out) { + *out = srow_j; + } + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + } + switch (op) { + case GGML_OP_POOL_AVG: + *out /= ka; + break; + case GGML_OP_POOL_MAX: + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + } + + cdata += src->nb[2]; + dplane += pa; + } +} + +// ggml_compute_forward_pool_2d_back + +void ggml_compute_forward_pool_2d_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + const ggml_tensor * dstf = dst->src[1]; // forward tensor of dst + + assert(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; + + char * cdata = (char *) dst->data; + const char * cdataf = (const char *) dstf->data; + const char * const data_end = cdata + ggml_nbytes(dst); + + GGML_ASSERT(params->ith == 0); + memset(cdata, 0, ggml_nbytes(dst)); + + const int64_t px = src->ne[0]; + const int64_t py = src->ne[1]; + const int64_t pa = px * py; + + const float * splane = (const float *) src->data; + + const int ka = k0 * k1; + const int offset0 = -p0; + const int offset1 = -p1; + + while (cdata < data_end) { + for (int oy = 0; oy < py; ++oy) { + const float * const srow = splane + oy * px; + for (int ox = 0; ox < px; ++ox) { + const float grad0 = srow[ox]; + + const int ix = offset0 + ox * s0; + const int iy = offset1 + oy * s1; + + if (op == GGML_OP_POOL_MAX) { + float maxval = -FLT_MAX; + int kxmax = -1; + int kymax = -1; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= dst->ne[1]) { + continue; + } + const void * drowf = (const void *) (cdataf + dst->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= dst->ne[0]) { + continue; + } + + const float val = dst->type == GGML_TYPE_F32 ? + ((const float *) drowf)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); + if (val <= maxval) { + continue; + } + + maxval = val; + kxmax = kx; + kymax = ky; + } + } + + if (kxmax == -1 || kymax == -1) { + continue; + } + + void * drow = (void *) (cdata + dst->nb[1] * (iy + kymax)); + const int j = ix + kxmax; + if (dst->type == GGML_TYPE_F32) { + ((float *) drow)[j] += grad0; + } else { + ((ggml_fp16_t *) drow)[j] = + GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); + } + } else if (op == GGML_OP_POOL_AVG) { + const float grad = grad0 / ka; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= dst->ne[1]) { + continue; + } + void * drow = (void *) (cdata + dst->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= dst->ne[0]) { + continue; + } + + if (dst->type == GGML_TYPE_F32) { + ((float *) drow)[j] += grad; + } else { + ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); + } + } + } + } else { + GGML_ASSERT(false); + } + } + } + + cdata += dst->nb[2]; + cdataf += dst->nb[2]; + splane += pa; + } +} + +// ggml_compute_forward_upscale + +static void ggml_compute_forward_upscale_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float sf0 = (float) ne0 / src0->ne[0]; + float sf1 = (float) ne1 / src0->ne[1]; + float sf2 = (float) ne2 / src0->ne[2]; + float sf3 = (float) ne3 / src0->ne[3]; + float pixel_offset = 0.5f; + + const int32_t mode_flags = ggml_get_op_params_i32(dst, 0); + const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); + + if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { + pixel_offset = 0.0f; + sf0 = ne0 > 1 && ne00 > 1 ? (float) (ne0 - 1) / (ne00 - 1) : sf0; + sf1 = ne1 > 1 && ne01 > 1 ? (float) (ne1 - 1) / (ne01 - 1) : sf1; + } + + if (mode == GGML_SCALE_MODE_NEAREST) { + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const int64_t i01 = i1 / sf1; + for (int64_t i0 = 0; i0 < ne0; i0++) { + const int64_t i00 = i0 / sf0; + + const float * x = + (float *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } + } else if (mode == GGML_SCALE_MODE_BILINEAR) { + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; + int64_t y0 = (int64_t) floorf(y); + int64_t y1 = y0 + 1; + + y0 = std::max(int64_t(0), std::min(y0, ne01 - 1)); + y1 = std::max(int64_t(0), std::min(y1, ne01 - 1)); + + float dy = y - (float) y0; + dy = std::max(0.0f, std::min(dy, 1.0f)); + + for (int64_t i0 = 0; i0 < ne0; i0++) { + const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; + int64_t x0 = (int64_t) floorf(x); + int64_t x1 = x0 + 1; + + x0 = std::max(int64_t(0), std::min(x0, ne00 - 1)); + x1 = std::max(int64_t(0), std::min(x1, ne00 - 1)); + + float dx = x - (float) x0; + dx = std::max(0.0f, std::min(dx, 1.0f)); + + // fetch the four surrounding pixel values and interpolate + const float a = *(const float *) ((const char *) src0->data + x0 * nb00 + y0 * nb01 + + i02 * nb02 + i03 * nb03); + const float b = *(const float *) ((const char *) src0->data + x1 * nb00 + y0 * nb01 + + i02 * nb02 + i03 * nb03); + const float c = *(const float *) ((const char *) src0->data + x0 * nb00 + y1 * nb01 + + i02 * nb02 + i03 * nb03); + const float d = *(const float *) ((const char *) src0->data + x1 * nb00 + y1 * nb01 + + i02 * nb02 + i03 * nb03); + + const float val = a * (1 - dx) * (1 - dy) + b * dx * (1 - dy) + c * (1 - dx) * dy + d * dx * dy; + + float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + *y_dst = val; + } + } + } + } + } else if (mode == GGML_SCALE_MODE_BICUBIC) { + // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm + const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) + auto weight1 = [a](float x) { + return ((a + 2) * x - (a + 3)) * x * x + 1; + }; + auto weight2 = [a](float x) { + return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; + }; + auto bicubic = [=](float p0, float p1, float p2, float p3, float x) { + const float w0 = weight2(x + 1); + const float w1 = weight1(x + 0); + const float w2 = weight1(1 - x); + const float w3 = weight2(2 - x); + return p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3; + }; + + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; + const int64_t y0 = (int64_t) floorf(y); + const float dy = y - (float) y0; + + for (int64_t i0 = 0; i0 < ne0; i0++) { + const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; + const int64_t x0 = (int64_t) floorf(x); + const float dx = x - (float) x0; + + auto p = [=](int64_t x_off, int64_t y_off) -> float { + int64_t i00 = std::max(int64_t(0), std::min(x0 + x_off, ne00 - 1)); + int64_t i01 = std::max(int64_t(0), std::min(y0 + y_off, ne01 - 1)); + return *(const float *) ((const char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + + i03 * nb03); + }; + + const float val = bicubic(bicubic(p(-1, -1), p(0, -1), p(1, -1), p(2, -1), dx), + bicubic(p(-1, 0), p(0, 0), p(1, 0), p(2, 0), dx), + bicubic(p(-1, 1), p(0, 1), p(1, 1), p(2, 1), dx), + bicubic(p(-1, 2), p(0, 2), p(1, 2), p(2, 2), dx), dy); + + float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + *y_dst = val; + } + } + } + } + } else { + GGML_ABORT("unsupported upscale mode"); + } +} + +void ggml_compute_forward_upscale(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_upscale_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_pad + +static void ggml_compute_forward_pad_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float * dst_ptr = (float *) dst->data; + const int32_t lp0 = ggml_get_op_params_i32(dst, 0); + const int32_t rp0 = ggml_get_op_params_i32(dst, 1); + const int32_t lp1 = ggml_get_op_params_i32(dst, 2); + const int32_t rp1 = ggml_get_op_params_i32(dst, 3); + const int32_t lp2 = ggml_get_op_params_i32(dst, 4); + const int32_t rp2 = ggml_get_op_params_i32(dst, 5); + const int32_t lp3 = ggml_get_op_params_i32(dst, 6); + const int32_t rp3 = ggml_get_op_params_i32(dst, 7); + const int32_t circular = ggml_get_op_params_i32(dst, 8); + + // TODO: optimize + + if (circular == 0) { + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + for (int64_t i3 = 0; i3 < ne3; ++i3) { + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && + (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t src_idx = + (i3 - lp3) * nb03 + (i2 - lp2) * nb02 + (i1 - lp1) * nb01 + (i0 - lp0) * nb00; + const float * src_ptr = (const float *) ((char *) src0->data + src_idx); + dst_ptr[dst_idx] = *src_ptr; + } else { + dst_ptr[dst_idx] = 0; + } + } + } + } + } + } else { + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + for (int64_t i3 = 0; i3 < ne3; ++i3) { + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); + const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); + const int64_t src_i2 = ggml_wrap_around(i2 - lp2, ne02); + const int64_t src_i3 = ggml_wrap_around(i3 - lp3, ne03); + + const int64_t src_idx = src_i3 * nb03 + src_i2 * nb02 + src_i1 * nb01 + src_i0 * nb00; + + const float * src_ptr = (const float *) ((char *) src0->data + src_idx); + dst_ptr[dst_idx] = *src_ptr; + } + } + } + } + } +} + +void ggml_compute_forward_pad(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_pad_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_pad_reflect_1d + +void ggml_compute_forward_pad_reflect_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + const int32_t * opts = (const int32_t *) dst->op_params; + const int p0 = opts[0]; + const int p1 = opts[1]; + + GGML_TENSOR_UNARY_OP_LOCALS + + for (int64_t i3 = 0; i3 < ne3; i3++) { + for (int64_t i2 = 0; i2 < ne2; i2++) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + float * left = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + p0 * nb0); + float * right = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + (ne0 - p1 - 1) * nb0); + + ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01)); + + for (int i0 = 1; i0 <= p0; i0++) { + left[-i0] = left[i0]; + } + for (int i0 = 1; i0 <= p1; i0++) { + right[i0] = right[-i0]; + } + } + } + } +} + +// ggml_compute_forward_roll + +static int64_t ggml_wrap_index(int64_t i, int64_t ne) { + if (i < 0) { + return i + ne; + } else if (i >= ne) { + return i - ne; + } + return i; +} + +static void ggml_compute_forward_roll_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src_data = (const float *) src0->data; + float * dst_data = (float *) dst->data; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int s0 = ggml_get_op_params_i32(dst, 0); + const int s1 = ggml_get_op_params_i32(dst, 1); + const int s2 = ggml_get_op_params_i32(dst, 2); + const int s3 = ggml_get_op_params_i32(dst, 3); + + const int64_t total = ne1 * ne2 * ne3; + const int64_t per_thread = (total + params->nth) / params->nth; + const int64_t start = params->ith * per_thread; + const int64_t end = std::min(start + per_thread, total); + + for (int64_t i = start; i < end; ++i) { + const int64_t i1 = i % ne1; + const int64_t i2 = (i / ne1) % ne2; + const int64_t i3 = i / (ne2 * ne1); + float * dst_row = dst_data + (i3 * nb3 + i2 * nb2 + i1 * nb1) / sizeof(float); + + const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); + const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); + const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); + const float * src_row = src_data + (i03 * nb03 + i02 * nb02 + i01 * nb01) / sizeof(float); + + const int64_t s = ggml_wrap_index(-s0, ne00); + const int64_t n = ne00 - s; + ggml_vec_cpy_f32(n, dst_row, src_row + s); + ggml_vec_cpy_f32(s, dst_row + n, src_row); + } +} + +void ggml_compute_forward_roll(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_roll_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_arange + +static void ggml_compute_forward_arange_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const float start = ggml_get_op_params_f32(dst, 0); + const float stop = ggml_get_op_params_f32(dst, 1); + const float step = ggml_get_op_params_f32(dst, 2); + + const int64_t steps = (int64_t) ceilf((stop - start) / step); + + GGML_ASSERT(ggml_nelements(dst) == steps); + + for (int64_t i = ith; i < steps; i += nth) { + float value = start + step * i; + ((float *) dst->data)[i] = value; + } +} + +void ggml_compute_forward_arange(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_arange_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_timestep_embedding_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int dim = ggml_get_op_params_i32(dst, 0); + const int max_period = ggml_get_op_params_i32(dst, 1); + + int half = dim / 2; + + for (int64_t i = 0; i < ne00; i++) { + float * embed_data = (float *) ((char *) dst->data + i * nb1); + for (int64_t j = ith; j < half; j += nth) { + float timestep = ((float *) src0->data)[i]; + float freq = (float) expf(-logf(max_period) * j / half); + float arg = timestep * freq; + embed_data[j] = cosf(arg); + embed_data[j + half] = sinf(arg); + } + if (dim % 2 != 0 && ith == 0) { + embed_data[2 * half] = 0.f; + } + } +} + +void ggml_compute_forward_timestep_embedding(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_timestep_embedding_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_argsort + +template struct cmp_argsort { + const float * data; + + bool operator()(int32_t a, int32_t b) const { + if constexpr (order == GGML_SORT_ORDER_ASC) { + return data[a] < data[b]; + } else { + return data[a] > data[b]; + } + } +}; + +static void ggml_compute_forward_argsort_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nr = ggml_nrows(src0); + + ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0); + + for (int64_t i = ith; i < nr; i += nth) { + const float * src_data = (float *) ((char *) src0->data + i * nb01); + + int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); + + for (int64_t j = 0; j < ne0; j++) { + dst_data[j] = j; + } + + switch (order) { + case GGML_SORT_ORDER_ASC: + std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); + break; + + case GGML_SORT_ORDER_DESC: + std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); + break; + + default: + GGML_ABORT("invalid sort order"); + } + } +} + +void ggml_compute_forward_argsort(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_argsort_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_top_k + +struct cmp_top_k { + const float * data; + + bool operator()(int32_t a, int32_t b) const { return data[a] > data[b]; } +}; + +static void ggml_compute_forward_top_k_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nr = ggml_nrows(src0); + + const int top_k = ne0; + + int32_t * tmp = (int32_t *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + for (int64_t i = ith; i < nr; i += nth) { + const float * src_data = (float *) ((char *) src0->data + i * nb01); + + for (int64_t j = 0; j < ne00; j++) { + tmp[j] = j; + } + + std::partial_sort(tmp, tmp + top_k, tmp + ne00, cmp_top_k{ src_data }); + + int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); + + std::copy(tmp, tmp + top_k, dst_data); + + // emphasize that the order is not important + if (top_k > 1) { + std::swap(dst_data[0], dst_data[1]); + } + } +} + +void ggml_compute_forward_top_k(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_top_k_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_flash_attn_ext + +static void ggml_compute_forward_flash_attn_ext_f16_one_chunk(const ggml_compute_params * params, + ggml_tensor * dst, + int ir0, + int ir1) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + const ggml_tensor * mask = dst->src[3]; + const ggml_tensor * sinks = dst->src[4]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int64_t DK = nek0; + const int64_t DV = nev0; + const int64_t N = neq1; + + GGML_ASSERT(ne0 == DV); + GGML_ASSERT(ne2 == N); + + // input tensor rows must be contiguous + GGML_ASSERT(nbq0 == ggml_type_size(q->type)); + GGML_ASSERT(nbk0 == ggml_type_size(k->type)); + GGML_ASSERT(nbv0 == ggml_type_size(v->type)); + + GGML_ASSERT(neq0 == DK); + GGML_ASSERT(nek0 == DK); + GGML_ASSERT(nev0 == DV); + + GGML_ASSERT(neq1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // broadcast factors + const int64_t rk2 = neq2 / nek2; + const int64_t rk3 = neq3 / nek3; + + const int64_t rv2 = neq2 / nev2; + const int64_t rv3 = neq3 / nev3; + + // parallelize by q rows using ggml_vec_dot_f32 + + float scale = 1.0f; + float max_bias = 0.0f; + float logit_softcap = 0.0f; + + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); + memcpy(&logit_softcap, (float *) dst->op_params + 2, sizeof(float)); + + if (logit_softcap != 0) { + scale /= logit_softcap; + } + + const uint32_t n_head = neq2; + const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); + + const float m0 = powf(2.0f, -(max_bias) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + const ggml_type k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; + const ggml_from_float_t q_to_vec_dot = ggml_get_type_traits_cpu(k_vec_dot_type)->from_float; + const ggml_vec_dot_t kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; + const ggml_to_float_t v_to_float = ggml_get_type_traits(v->type)->to_float; + + GGML_ASSERT((q_to_vec_dot) && "fattn: unsupported K-type"); + GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float) && "fattn: unsupported V-type"); + + int ith = params->ith; + + // loop over n_batch and n_head + for (int ir = ir0; ir < ir1; ++ir) { + // q indices + const int iq3 = ir / (neq2 * neq1); + const int iq2 = (ir - iq3 * neq2 * neq1) / neq1; + const int iq1 = (ir - iq3 * neq2 * neq1 - iq2 * neq1); + + const uint32_t h = iq2; // head index + const float slope = + (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; + + float S = 0.0f; // sum + float M = -INFINITY; // maximum KQ value + + float * VKQ32 = + (float *) params->wdata + ith * (1 * DK + 2 * DV + CACHE_LINE_SIZE_F32); // FP32 VKQ accumulator + float * V32 = (VKQ32 + 1 * DV); // (temporary) FP32 V buffer + ggml_fp16_t * VKQ16 = (ggml_fp16_t *) (VKQ32 + 1 * DV); // (temporary) FP16 VKQ accumulator + ggml_fp16_t * Q_q = (ggml_fp16_t *) (VKQ32 + 2 * DV); // (temporary) buffer for Q converted to quantized/FP16 + + if (v->type == GGML_TYPE_F16) { + memset(VKQ16, 0, DV * sizeof(ggml_fp16_t)); + } else { + memset(VKQ32, 0, DV * sizeof(float)); + } + + const ggml_fp16_t * mp = + mask ? (ggml_fp16_t *) ((char *) mask->data + iq1 * mask->nb[1] + (iq2 % mask->ne[2]) * mask->nb[2] + + (iq3 % mask->ne[3]) * mask->nb[3]) : + NULL; + + // k indices + const int ik3 = iq3 / rk3; + const int ik2 = iq2 / rk2; + + // v indices + const int iv3 = iq3 / rv3; + const int iv2 = iq2 / rv2; + + const float * pq = (const float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)); + q_to_vec_dot(pq, Q_q, DK); + + // online softmax / attention + // loop over n_kv and n_head_kv + // ref: https://arxiv.org/pdf/2112.05682.pdf + for (int64_t ic = 0; ic < nek1; ++ic) { + const float mv = mp ? slope * GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; + if (mv == -INFINITY) { + continue; + } + + float s; // KQ value + + const char * k_data = (const char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3); + kq_vec_dot(DK, &s, 0, k_data, 0, Q_q, 0, 1); + + s = s * scale; // scale KQ value + + if (logit_softcap != 0.0f) { + s = logit_softcap * tanhf(s); + } + + s += mv; // apply mask + + const float Mold = M; + + float ms = 1.0f; // upon new higher max val, scale VKQ and KQ sum with this value + float vs = 1.0f; // post-softmax KQ value, expf(s - M) + + const char * v_data = ((const char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)); + + if (v->type == GGML_TYPE_F16) { + if (s > M) { + // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f + M = s; + ms = expf(Mold - M); + + // V = V*expf(Mold - M) + ggml_vec_scale_f16(DV, VKQ16, ms); + } else { + // no new maximum, ms == 1.0f, vs != 1.0f + vs = expf(s - M); + } + + // V += v*expf(s - M) + ggml_vec_mad_f16(DV, VKQ16, (const ggml_fp16_t *) v_data, vs); + } else { + if (s > M) { + // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f + M = s; + ms = expf(Mold - M); + + // V = V*expf(Mold - M) + ggml_vec_scale_f32(DV, VKQ32, ms); + } else { + // no new maximum, ms == 1.0f, vs != 1.0f + vs = expf(s - M); + } + + // V += v*expf(s - M) + if (v_to_float) { + v_to_float(v_data, V32, DV); + ggml_vec_mad_f32(DV, VKQ32, V32, vs); + } else { + // V is F32 + ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); + } + } + + S = S * ms + vs; // scale and increment sum with partial sum + } + + if (v->type == GGML_TYPE_F16) { + for (int64_t d = 0; d < DV; ++d) { + VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); + } + } + + // sinks + if (sinks) { + const float s = ((float *) ((char *) sinks->data))[h]; + + float ms = 1.0f; + float vs = 1.0f; + + if (s > M) { + ms = expf(M - s); + ggml_vec_scale_f32(DV, VKQ32, ms); + } else { + vs = expf(s - M); + } + + S = S * ms + vs; + } + + // V /= S + const float S_inv = S == 0.0f ? 0.0f : 1.0f / S; + ggml_vec_scale_f32(DV, VKQ32, S_inv); + + // dst indices + const int i1 = iq1; + const int i2 = iq2; + const int i3 = iq3; + + // original + //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float)); + + // permute(0, 2, 1, 3) + memcpy((char *) dst->data + (i3 * ne2 * ne1 + i2 + i1 * ne1) * nb1, VKQ32, nb1); + } +} + +static void ggml_compute_forward_flash_attn_ext_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int64_t DK = nek0; + const int64_t DV = nev0; + const int64_t N = neq1; + + GGML_ASSERT(ne0 == DV); + GGML_ASSERT(ne2 == N); + + // input tensor rows must be contiguous + GGML_ASSERT(nbq0 == ggml_type_size(q->type)); + GGML_ASSERT(nbk0 == ggml_type_size(k->type)); + GGML_ASSERT(nbv0 == ggml_type_size(v->type)); + + GGML_ASSERT(neq0 == DK); + GGML_ASSERT(nek0 == DK); + GGML_ASSERT(nev0 == DV); + + GGML_ASSERT(neq1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // parallelize by q rows using ggml_vec_dot_f32 + + // total rows in q + const int64_t nr = neq1 * neq2 * neq3; + + // rows per thread + const int ith = params->ith; + const int nth = params->nth; + + // disable for NUMA + const bool disable_chunking = ggml_is_numa(); + + // 4x chunks per thread + int nth_scaled = nth * 4; + int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; + int64_t nchunk = (nr + chunk_size - 1) / chunk_size; + + if (nth == 1 || nchunk < nth || disable_chunking) { + nchunk = nth; + } + + if (ith == 0) { + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + ggml_threadpool_chunk_set(params->threadpool, nth); + } + + ggml_barrier(params->threadpool); + + // The number of elements in each chunk + const int64_t dr = (nr + nchunk - 1) / nchunk; + + // The first chunk comes from our thread_id, the rest will get auto-assigned. + int current_chunk = ith; + + while (current_chunk < nchunk) { + const int64_t ir0 = dr * current_chunk; + const int64_t ir1 = MIN(ir0 + dr, nr); + + ggml_compute_forward_flash_attn_ext_f16_one_chunk(params, dst, ir0, ir1); + + current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); + } +} + +void ggml_compute_forward_flash_attn_ext(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->op_params[3]) { + case GGML_PREC_DEFAULT: + case GGML_PREC_F32: + { + // uses F32 accumulators + ggml_compute_forward_flash_attn_ext_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_flash_attn_back + +static void ggml_compute_forward_flash_attn_back_f32(const ggml_compute_params * params, + const bool masked, + ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + const ggml_tensor * d = dst->src[3]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ned, d, ne) + GGML_TENSOR_LOCALS(size_t, nbd, d, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t D = neq0; + const int64_t N = neq1; + const int64_t P = nek1 - N; + const int64_t M = P + N; + + const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); + const int mxDM = MAX(D, Mup); + + // GGML_ASSERT(ne0 == D); + // GGML_ASSERT(ne1 == N); + GGML_ASSERT(P >= 0); + + GGML_ASSERT(nbq0 == sizeof(float)); + GGML_ASSERT(nbk0 == sizeof(float)); + GGML_ASSERT(nbv0 == sizeof(float)); + + GGML_ASSERT(neq0 == D); + GGML_ASSERT(nek0 == D); + GGML_ASSERT(nev1 == D); + GGML_ASSERT(ned0 == D); + + GGML_ASSERT(neq1 == N); + GGML_ASSERT(nek1 == N + P); + GGML_ASSERT(nev1 == D); + GGML_ASSERT(ned1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + if (ith == 0) { + memset(dst->data, 0, nb0 * ne0 * ne1 * ne2 * ne3); + } + ggml_barrier(params->threadpool); + + const int64_t elem_q = ggml_nelements(q); + const int64_t elem_k = ggml_nelements(k); + + ggml_type result_type = dst->type; + GGML_ASSERT(ggml_blck_size(result_type) == 1); + const size_t tsize = ggml_type_size(result_type); + + const size_t offs_q = 0; + const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); + const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); + + void * grad_q = (char *) dst->data; + void * grad_k = (char *) dst->data + offs_k; + void * grad_v = (char *) dst->data + offs_v; + + const size_t nbgq1 = nb0 * neq0; + const size_t nbgq2 = nb0 * neq0 * neq1; + const size_t nbgq3 = nb0 * neq0 * neq1 * neq2; + + const size_t nbgk1 = nb0 * nek0; + const size_t nbgk2 = nb0 * nek0 * nek1; + const size_t nbgk3 = nb0 * nek0 * nek1 * neq2; + + const size_t nbgv1 = nb0 * nev0; + const size_t nbgv2 = nb0 * nev0 * nev1; + const size_t nbgv3 = nb0 * nev0 * nev1 * neq2; + + // parallelize by k rows using ggml_vec_dot_f32 + + // total rows in k + const int nr = nek2 * nek3; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const float scale = 1.0f / sqrtf(D); + + //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); + + // how often k2 (and v2) is repeated in q2 + int nrep = neq2 / nek2; + + for (int ir = ir0; ir < ir1; ++ir) { + // q indices + const int ik3 = ir / (nek2); + const int ik2 = ir - ik3 * nek2; + + const int iq3 = ik3; + const int id3 = ik3; + const int iv3 = ik3; + const int iv2 = ik2; + + for (int irep = 0; irep < nrep; ++irep) { + const int iq2 = ik2 + irep * nek2; + const int id2 = iq2; + + // (ik2 + irep*nek2) % nek2 == ik2 + for (int iq1 = 0; iq1 < neq1; ++iq1) { + const int id1 = iq1; + + // not sure about CACHE_LINE_SIZE_F32.. + // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset? + float * S = + (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 0 * (mxDM + CACHE_LINE_SIZE_F32); + float * SM = + (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 1 * (mxDM + CACHE_LINE_SIZE_F32); + + for (int i = M; i < Mup; ++i) { + S[i] = -INFINITY; + } + + const int64_t masked_begin = masked ? (P + iq1 + 1) : M; + for (int64_t ic = 0; ic < masked_begin; ++ic) { + // k indices + const int ik1 = ic; + + // S indices + const int i1 = ik1; + + ggml_vec_dot_f32(neq0, S + i1, 0, + (float *) ((char *) k->data + (ik1 * nbk1 + ik2 * nbk2 + ik3 * nbk3)), 0, + (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), 0, 1); + } + + // scale + ggml_vec_scale_f32(masked_begin, S, scale); + + for (int64_t i = masked_begin; i < M; i++) { + S[i] = -INFINITY; + } + + // softmax + // exclude known -INF S[..] values from max and loop + // dont forget to set their SM values to zero + { + float max = -INFINITY; + ggml_vec_max_f32(masked_begin, &max, S); + + ggml_float sum = 0.0; + { +#ifdef GGML_SOFT_MAX_ACCELERATE + max = -max; + vDSP_vsadd(SM, 1, &max, SM, 1, Mup); + vvexpf(SM, SM, &Mup); + ggml_vec_sum_f32(Mup, &sum, SM); +#else + sum = ggml_vec_soft_max_f32(Mup, SM, S, max); +#endif + } + + assert(sum > 0.0); + + sum = 1.0 / sum; + ggml_vec_scale_f32(masked_begin, SM, sum); + } + + // step-by-step explanation + { + // forward-process shape grads from backward process + // parallel_for ik2,ik3: + // for irep: + // iq2 = ik2 + irep*nek2 + // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur] + // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur] + // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur] + // for iq1: + // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur + // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur + // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4 + // S0 = -Inf [D,1,1,1] + // ~S1[i] = dot(kcur[:D,i], qcur) + // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale + // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P) + // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur + // ~S5[i] = dot(vcur[:,i], S4) + // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3] + // ~dst[i,iq1,iq2,iq3] = S5[i] ^ + // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3] + // dst backward-/ grad[dst] = d + // + // output gradients with their dependencies: + // + // grad[kcur] = grad[S1].T @ qcur + // grad[S1] = diag_mask_zero(grad[S3], P) * scale + // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // grad[S4] = grad[S5] @ vcur + // grad[S4] = d[:D,id1,id2,id3] @ vcur + // grad[qcur] = grad[S1] @ kcur + // grad[vcur] = grad[S5].T @ S4 + // grad[vcur] = d[:D,id1,id2,id3].T @ S4 + // + // in post-order: + // + // S1 = qcur @ kcur.T + // S2 = S1 * scale + // S3 = diag_mask_inf(S2, P) + // S4 = softmax(S3) + // grad[S4] = d[:D,id1,id2,id3] @ vcur + // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // grad[S1] = diag_mask_zero(grad[S3], P) * scale + // grad[qcur] = grad[S1] @ kcur + // grad[kcur] = grad[S1].T @ qcur + // grad[vcur] = d[:D,id1,id2,id3].T @ S4 + // + // using less variables (SM=S4): + // + // S = diag_mask_inf(qcur @ kcur.T * scale, P) + // SM = softmax(S) + // S = d[:D,iq1,iq2,iq3] @ vcur + // dot_SM_gradSM = dot(SM, S) + // S = SM * (S - dot(SM, S)) + // S = diag_mask_zero(S, P) * scale + // + // grad[q][:D,iq1,iq2,iq3] += S @ kcur + // grad[k][:D,:M,ik2,ik3] += S.T @ qcur + // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM + } + + // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] + // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] + // for ic: + // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3] + // exclude known future zero S[..] values from operation + ggml_vec_set_f32(masked_begin, S, 0); + for (int64_t ic = 0; ic < D; ++ic) { + ggml_vec_mad_f32( + masked_begin, S, (float *) ((char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)), + *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); + } + + // S = SM * (S - dot(SM, S)) + float dot_SM_gradSM = 0; + ggml_vec_dot_f32(masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1); + ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); + ggml_vec_mul_f32(masked_begin, S, S, SM); + + // S = diag_mask_zero(S, P) * scale + // already done by above ggml_vec_set_f32 + + // exclude known zero S[..] values from operation + ggml_vec_scale_f32(masked_begin, S, scale); + + // S shape [M,1] + // SM shape [M,1] + // kcur shape [D,M] + // qcur shape [D,1] + // vcur shape [M,D] + + // grad[q][:D,iq1,iq2,iq3] += S @ kcur + // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M] + // for ic: + // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3] + // exclude known zero S[..] values from loop + for (int64_t ic = 0; ic < masked_begin; ++ic) { + ggml_vec_mad_f32(D, (float *) ((char *) grad_q + (iq1 * nbgq1 + iq2 * nbgq2 + iq3 * nbgq3)), + (float *) ((char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3)), S[ic]); + } + + // grad[k][:D,:M,iq2,iq3] += S.T @ qcur + // for ic: + // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0] + // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0] + // exclude known zero S[..] values from loop + for (int64_t ic = 0; ic < masked_begin; ++ic) { + ggml_vec_mad_f32(D, (float *) ((char *) grad_k + (ic * nbgk1 + ik2 * nbgk2 + ik3 * nbgk3)), + (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), S[ic]); + } + + // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM + // for ic: + // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M] + // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M] + // exclude known zero SM[..] values from mad + for (int64_t ic = 0; ic < D; ++ic) { + ggml_vec_mad_f32( + masked_begin, (float *) ((char *) grad_v + (ic * nbgv1 + iv2 * nbgv2 + iv3 * nbgv3)), SM, + *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); + } + } + } + } +} + +void ggml_compute_forward_flash_attn_back(const ggml_compute_params * params, const bool masked, ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + + switch (q->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_flash_attn_back_f32(params, masked, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_ssm_conv + +static void ggml_compute_forward_ssm_conv_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // conv_x + const ggml_tensor * src1 = dst->src[1]; // conv1d.weight + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; // d_conv + const int ncs = src0->ne[0]; // d_conv - 1 + n_t + const int nr = src0->ne[1]; // d_inner + const int n_t = dst->ne[1]; // tokens per sequence + const int n_s = dst->ne[2]; // number of sequences in the batch + + GGML_ASSERT(dst->ne[0] == nr); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + const int ir = ir1 - ir0; + + for (int i3 = 0; i3 < n_s; ++i3) { + for (int i2 = 0; i2 < n_t; ++i2) { + // {d_conv - 1 + n_t, d_inner, n_seqs} + // sliding window + const float * s = (const float *) ((const char *) src0->data + ir0 * (src0->nb[1]) + i2 * (src0->nb[0]) + + i3 * (src0->nb[2])); // {d_conv, d_inner, n_s} + const float * c = (const float *) ((const char *) src1->data + ir0 * (src1->nb[1])); // {d_conv, d_inner} + float * x = (float *) ((char *) dst->data + ir0 * (dst->nb[0]) + i2 * (dst->nb[1]) + + i3 * (dst->nb[2])); // {d_inner, n_t, n_s} + + // TODO: transpose the output for smaller strides for big batches? + // d_inner + for (int i1 = 0; i1 < ir; ++i1) { + // rowwise dot product + // NOTE: not using ggml_vec_dot_f32, because its sum is in double precision + float sumf = 0.0f; + + // d_conv + for (int i0 = 0; i0 < nc; ++i0) { + sumf += s[i0 + i1 * ncs] * c[i0 + i1 * nc]; + } + x[i1] = sumf; + } + } + } +} + +void ggml_compute_forward_ssm_conv(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->src[0]->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_ssm_conv_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_ssm_scan + +static void ggml_compute_forward_ssm_scan_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // s {d_state, dim, n_head, n_seqs+} + const ggml_tensor * src1 = dst->src[1]; // x {dim, n_head, n_seq_tokens, n_seqs} + const ggml_tensor * src2 = dst->src[2]; // dt {n_head, n_seq_tokens, n_seqs} + const ggml_tensor * src3 = dst->src[3]; // A {d_state, n_head} or {1, n_head} + const ggml_tensor * src4 = dst->src[4]; // B {d_state, n_group, n_seq_tokens, n_seqs} + const ggml_tensor * src5 = dst->src[5]; // C {d_state, n_group, n_seq_tokens, n_seqs} + const ggml_tensor * src6 = dst->src[6]; // ids {n_seqs} + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nc = src0->ne[0]; // d_state + const int64_t nr = src0->ne[1]; // dim + const int64_t nh = src1->ne[1]; // n_head + const int64_t ng = src4->ne[1]; + const int64_t nt = src1->ne[2]; // number of tokens per sequence + const int64_t ns = src1->ne[3]; // number of sequences in the batch + + // can't use ggml_nbytes because src1 is not necessarily contiguous + const int64_t s_off = ggml_nelements(src1) * ggml_element_size(src1); + + GGML_ASSERT(ggml_nelements(src1) + nc * nr * nh * ns == ggml_nelements(dst)); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + GGML_ASSERT(src2->nb[0] == sizeof(float)); + GGML_ASSERT(src3->nb[0] == sizeof(float)); + GGML_ASSERT(src4->nb[0] == sizeof(float)); + GGML_ASSERT(src5->nb[0] == sizeof(float)); + GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); + GGML_ASSERT(nh % ng == 0); + + // heads per thread + const int dh = (nh + nth - 1) / nth; + + // head range for this thread + const int ih0 = dh * ith; + const int ih1 = MIN(ih0 + dh, nh); + + const int32_t * ids = (const int32_t *) src6->data; + + for (int i3 = 0; i3 < ns; ++i3) { + const float * s0 = + (const float *) ((const char *) src0->data + ids[i3] * (src0->nb[3])); // {d_state, dim, nh, ns} + float * s = (float *) ((char *) dst->data + i3 * (src0->nb[3]) + s_off); // {d_state, dim, nh, ns} + + for (int i2 = 0; i2 < nt; ++i2) { + const float * x = (const float *) ((const char *) src1->data + i2 * (src1->nb[2]) + + i3 * (src1->nb[3])); // {dim, nh, nt, ns} + const float * dt = + (const float *) ((const char *) src2->data + i2 * (src2->nb[1]) + i3 * (src2->nb[2])); // {nh, nt, ns} + const float * A = (const float *) ((const char *) src3->data); // {d_state, nh} or {1, nh} + const float * B = (const float *) ((const char *) src4->data + i2 * (src4->nb[2]) + + i3 * (src4->nb[3])); // {d_state, ng, nt, ns} + const float * C = (const float *) ((const char *) src5->data + i2 * (src5->nb[2]) + + i3 * (src5->nb[3])); // {d_state, ng, nt, ns} + float * y = (float *) ((char *) dst->data + i2 * (nh * nr * sizeof(float)) + + i3 * (nt * nh * nr * sizeof(float))); // {dim, nh, nt, ns} + + if (src3->ne[0] == 1) { + // Mamba-2 has a scalar decay factor per head; dA can be outside the state-wise loop + + // n_head + for (int h = ih0; h < ih1; ++h) { + // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 + const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); + const float dA = expf(dt_soft_plus * A[h]); + const int g = h / (nh / ng); // repeat_interleave + + // dim + for (int i1 = 0; i1 < nr; ++i1) { + const int ii = i1 + h * nr; + const float x_dt = x[ii] * dt_soft_plus; + float sumf = 0.0f; +#if defined(GGML_SIMD) +# if defined(__ARM_FEATURE_SVE) + const int ggml_f32_epr = svcntw(); + const int ggml_f32_step = 1 * ggml_f32_epr; + + const int np = (nc & ~(ggml_f32_step - 1)); + + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + + GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); + GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); + + for (int i = 0; i < np; i += ggml_f32_step) { + // TODO: maybe unroll more? + for (int j = 0; j < 1; j++) { + GGML_F32_VEC t0 = GGML_F32_VEC_LOAD(s0 + i + j * ggml_f32_epr + ii * nc); + GGML_F32_VEC t1 = GGML_F32_VEC_LOAD(B + i + j * ggml_f32_epr + g * nc); + GGML_F32_VEC t2 = GGML_F32_VEC_LOAD(C + i + j * ggml_f32_epr + g * nc); + + t0 = GGML_F32_VEC_MUL(t0, adA); + t1 = GGML_F32_VEC_MUL(t1, axdt); + + t0 = GGML_F32_VEC_ADD(t0, t1); + + sum = GGML_F32_VEC_FMA(sum, t0, t2); + + GGML_F32_VEC_STORE(s + i + j * ggml_f32_epr + ii * nc, t0); + } + } + + sumf = GGML_F32xt_REDUCE_ONE(sum); +# elif defined(__riscv_v_intrinsic) + // todo: RVV implementation + const int np = 0; +# else + const int np = (nc & ~(GGML_F32_STEP - 1)); + + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + + GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); + GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); + + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + GGML_F32_VEC az[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(s0 + i + j * GGML_F32_EPR + ii * nc); + ay[j] = GGML_F32_VEC_LOAD(B + i + j * GGML_F32_EPR + g * nc); + az[j] = GGML_F32_VEC_LOAD(C + i + j * GGML_F32_EPR + g * nc); + + ax[j] = GGML_F32_VEC_MUL(ax[j], adA); + ay[j] = GGML_F32_VEC_MUL(ay[j], axdt); + + ax[j] = GGML_F32_VEC_ADD(ax[j], ay[j]); + + sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], az[j]); + + GGML_F32_VEC_STORE(s + i + j * GGML_F32_EPR + ii * nc, ax[j]); + } + } + + // reduce sum0..sum3 to sum0 + GGML_F32_VEC_REDUCE(sumf, sum); +# endif +#else + const int np = 0; +#endif + // d_state + for (int i0 = np; i0 < nc; ++i0) { + const int i = i0 + ii * nc; + const int ig = i0 + g * nc; + // state = prev_state * dA + dB * x + const float state = (s0[i] * dA) + (B[ig] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[ig]; + s[i] = state; + } + y[ii] = sumf; + } + } + } else { + // Mamba-1 has an element-wise decay factor for the states + + // n_head + for (int h = ih0; h < ih1; ++h) { + // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 + const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); + const int g = h / (nh / ng); // repeat_interleave + + // dim + for (int i1 = 0; i1 < nr; ++i1) { + const int ii = i1 + h * nr; + const float x_dt = x[ii] * dt_soft_plus; +#if defined(__ARM_FEATURE_SVE) + svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); + svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); + svfloat32_t r1_vector = GGML_F32_VEC_ZERO; + + // d_state + // TODO: what happens when (d_state % svcntw()) != 0? + for (int64_t k = 0; k < nc; k += svcntw()) { + svfloat32_t vA = GGML_F32_VEC_LOAD(&A[h * nc + k]); + svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k + g * nc]); + svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k + g * nc]); + svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[ii * nc + k]); + + svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); + t1 = exp_ps_sve(svptrue_b32(), t1); + svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); + + vs0 = GGML_F32_VEC_FMA(t2, vs0, t1); + r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); + + GGML_F32_VEC_STORE(&s[ii * nc + k], vs0); + } + y[ii] = GGML_F32xt_REDUCE_ONE(r1_vector); +#else + float sumf = 0.0f; + // NOTE: can't really use GGML_SIMD here because d_state is usually 16 + // and also because expf is used within the loop. + // d_state + for (int i0 = 0; i0 < nc; ++i0) { + const int i = i0 + ii * nc; + const int ig = i0 + g * nc; + // state = prev_state * dA + dB * x + const float state = (s0[i] * expf(dt_soft_plus * A[i0 + h * nc])) + (B[ig] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[ig]; + s[i] = state; + } + y[ii] = sumf; +#endif + } + } + } + // use the output as the source when it's not the first token-wise iteration + s0 = s; + } + } +} + +void ggml_compute_forward_ssm_scan(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->src[0]->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_ssm_scan_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_win_part + +static void ggml_compute_forward_win_part_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + + const int32_t nep0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t nep1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t w = ((const int32_t *) (dst->op_params))[2]; + + assert(ne00 == ne0); + assert(ne3 == nep0 * nep1); + + // TODO: optimize / multi-thread + for (int py = 0; py < nep1; ++py) { + for (int px = 0; px < nep0; ++px) { + const int64_t i3 = py * nep0 + px; + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + const int64_t i02 = py * w + i2; + const int64_t i01 = px * w + i1; + const int64_t i00 = i0; + + const int64_t i = i3 * ne2 * ne1 * ne0 + i2 * ne1 * ne0 + i1 * ne0 + i0; + const int64_t j = i02 * ne01 * ne00 + i01 * ne00 + i00; + + if (py * w + i2 >= ne02 || px * w + i1 >= ne01) { + ((float *) dst->data)[i] = 0.0f; + } else { + ((float *) dst->data)[i] = ((float *) src0->data)[j]; + } + } + } + } + } + } +} + +void ggml_compute_forward_win_part(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_win_part_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_win_unpart + +static void ggml_compute_forward_win_unpart_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + + const int32_t w = ((const int32_t *) (dst->op_params))[0]; + + // padding + const int px = (w - ne1 % w) % w; + //const int py = (w - ne2%w)%w; + + const int npx = (px + ne1) / w; + //const int npy = (py + ne2)/w; + + assert(ne0 == ne00); + + // TODO: optimize / multi-thread + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + const int ip2 = i2 / w; + const int ip1 = i1 / w; + + const int64_t i02 = i2 % w; + const int64_t i01 = i1 % w; + const int64_t i00 = i0; + + const int64_t i = (ip2 * npx + ip1) * ne02 * ne01 * ne00 + i02 * ne01 * ne00 + i01 * ne00 + i00; + const int64_t j = i2 * ne1 * ne0 + i1 * ne0 + i0; + + ((float *) dst->data)[j] = ((float *) src0->data)[i]; + } + } + } +} + +void ggml_compute_forward_win_unpart(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_win_unpart_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +//gmml_compute_forward_unary + +void ggml_compute_forward_unary(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_unary_op op = ggml_get_unary_op(dst); + + switch (op) { + case GGML_UNARY_OP_ABS: + { + ggml_compute_forward_abs(params, dst); + } + break; + case GGML_UNARY_OP_SGN: + { + ggml_compute_forward_sgn(params, dst); + } + break; + case GGML_UNARY_OP_NEG: + { + ggml_compute_forward_neg(params, dst); + } + break; + case GGML_UNARY_OP_STEP: + { + ggml_compute_forward_step(params, dst); + } + break; + case GGML_UNARY_OP_TANH: + { + ggml_compute_forward_tanh(params, dst); + } + break; + case GGML_UNARY_OP_ELU: + { + ggml_compute_forward_elu(params, dst); + } + break; + case GGML_UNARY_OP_RELU: + { + ggml_compute_forward_relu(params, dst); + } + break; + case GGML_UNARY_OP_SIGMOID: + { + ggml_compute_forward_sigmoid(params, dst); + } + break; + case GGML_UNARY_OP_GELU: + { + ggml_compute_forward_gelu(params, dst); + } + break; + case GGML_UNARY_OP_GELU_ERF: + { + ggml_compute_forward_gelu_erf(params, dst); + } + break; + case GGML_UNARY_OP_GELU_QUICK: + { + ggml_compute_forward_gelu_quick(params, dst); + } + break; + case GGML_UNARY_OP_SILU: + { + ggml_compute_forward_silu(params, dst); + } + break; + case GGML_UNARY_OP_HARDSWISH: + { + ggml_compute_forward_hardswish(params, dst); + } + break; + case GGML_UNARY_OP_HARDSIGMOID: + { + ggml_compute_forward_hardsigmoid(params, dst); + } + break; + case GGML_UNARY_OP_EXP: + { + ggml_compute_forward_exp(params, dst); + } + break; + case GGML_UNARY_OP_FLOOR: + { + ggml_compute_forward_floor(params, dst); + } + break; + case GGML_UNARY_OP_CEIL: + { + ggml_compute_forward_ceil(params, dst); + } + break; + case GGML_UNARY_OP_ROUND: + { + ggml_compute_forward_round(params, dst); + } + break; + case GGML_UNARY_OP_TRUNC: + { + ggml_compute_forward_trunc(params, dst); + } + break; + case GGML_UNARY_OP_XIELU: + { + ggml_compute_forward_xielu(params, dst); + } + break; + case GGML_UNARY_OP_EXPM1: + { + ggml_compute_forward_expm1(params, dst); + } + break; + case GGML_UNARY_OP_SOFTPLUS: + { + ggml_compute_forward_softplus(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +//ggml_compute_forward_glu + +void ggml_compute_forward_glu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_glu_op op = ggml_get_glu_op(dst); + + switch (op) { + case GGML_GLU_OP_REGLU: + { + ggml_compute_forward_reglu(params, dst); + } + break; + case GGML_GLU_OP_GEGLU: + { + ggml_compute_forward_geglu(params, dst); + } + break; + case GGML_GLU_OP_SWIGLU: + { + ggml_compute_forward_swiglu(params, dst); + } + break; + case GGML_GLU_OP_SWIGLU_OAI: + { + ggml_compute_forward_swiglu_oai(params, dst); + } + break; + case GGML_GLU_OP_GEGLU_ERF: + { + ggml_compute_forward_geglu_erf(params, dst); + } + break; + case GGML_GLU_OP_GEGLU_QUICK: + { + ggml_compute_forward_geglu_quick(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_get_rel_pos + +static void ggml_compute_forward_get_rel_pos_f16(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322 + + GGML_TENSOR_UNARY_OP_LOCALS + + const int64_t w = ne1; + + ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data; + ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data; + + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + const int64_t pos = (w - i1 - 1) + i2; + for (int64_t i0 = 0; i0 < ne0; ++i0) { + dst_data[i2 * ne1 * ne0 + i1 * ne0 + i0] = src0_data[pos * ne00 + i0]; + } + } + } +} + +void ggml_compute_forward_get_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + { + ggml_compute_forward_get_rel_pos_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add_rel_pos + +static void ggml_compute_forward_add_rel_pos_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; + if (!inplace) { + if (params->ith == 0) { + memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359 + + float * src1_data = (float *) src1->data; + float * src2_data = (float *) src2->data; + float * dst_data = (float *) dst->data; + + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + const int64_t ne12 = src1->ne[2]; + const int64_t ne13 = src1->ne[3]; + + const int ith = params->ith; + const int nth = params->nth; + + // total patches in dst + const int np = ne13; + + // patches per thread + const int dp = (np + nth - 1) / nth; + + // patch range for this thread + const int ip0 = dp * ith; + const int ip1 = MIN(ip0 + dp, np); + + for (int64_t i13 = ip0; i13 < ip1; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { + const int64_t jp1 = i13 * ne12 * ne11 * ne10 + i12 * ne11 * ne10 + i11 * ne10; + for (int64_t i10 = 0; i10 < ne10; ++i10) { + const int64_t jp0 = jp1 + i10; + const float src1_e = src1_data[jp0]; + const float src2_e = src2_data[jp0]; + + const int64_t jdh = jp0 * ne10; + const int64_t jdw = jdh - (ne10 - 1) * i10; + + for (int64_t j = 0; j < ne10; ++j) { + dst_data[jdh + j] += src2_e; + dst_data[jdw + j * ne10] += src1_e; + } + } + } + } + } +} + +void ggml_compute_forward_add_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add_rel_pos_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rwkv_wkv6 + +static void ggml_compute_forward_rwkv_wkv6_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[5]->ne[1]; + const int64_t head_size = C / HEADS; + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * k = (float *) dst->src[0]->data; + float * v = (float *) dst->src[1]->data; + float * r = (float *) dst->src[2]->data; + float * time_faaaa = (float *) dst->src[3]->data; + float * time_decay = (float *) dst->src[4]->data; + + size_t t_stride = HEADS * head_size; // Same to C + + size_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + size_t h_stride_2d = head_size * head_size; + + if (ith == 0) { + memset(dst_data, 0, T * C * sizeof(float)); + } + ggml_barrier(params->threadpool); + +#if defined(__AVX__) && !defined(__AVX512F__) +# define GGML_F32X GGML_F32x8 +# define GGML_F32X_SET1 GGML_F32x8_SET1 +# define GGML_F32X_LOAD GGML_F32x8_LOAD +# define GGML_F32X_STORE GGML_F32x8_STORE +# define GGML_F32X_MUL GGML_F32x8_MUL +# define GGML_F32X_FMA GGML_F32x8_FMA +# define WKV_VECTOR_SIZE 8 +#elif defined(__AVX512F__) +# define GGML_F32X GGML_F32x16 +# define GGML_F32X_SET1 GGML_F32x16_SET1 +# define GGML_F32X_LOAD GGML_F32x16_LOAD +# define GGML_F32X_STORE GGML_F32x16_STORE +# define GGML_F32X_MUL GGML_F32x16_MUL +# define GGML_F32X_FMA GGML_F32x16_FMA +# define WKV_VECTOR_SIZE 16 +#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) +# define GGML_F32X GGML_F32xt +# define GGML_F32X_SET1 GGML_F32xt_SET1 +# define GGML_F32X_LOAD GGML_F32xt_LOAD +# define GGML_F32X_STORE GGML_F32xt_STORE +# define GGML_F32X_MUL GGML_F32xt_MUL +# define GGML_F32X_FMA GGML_F32xt_FMA +# define WKV_VECTOR_SIZE 8 +#elif defined(__ARM_NEON) && defined(__aarch64__) +# define GGML_F32X GGML_F32x4 +# define GGML_F32X_SET1 GGML_F32x4_SET1 +# define GGML_F32X_LOAD GGML_F32x4_LOAD +# define GGML_F32X_STORE GGML_F32x4_STORE +# define GGML_F32X_MUL GGML_F32x4_MUL +# define GGML_F32X_FMA GGML_F32x4_FMA +# define WKV_VECTOR_SIZE 4 +#endif + +#ifdef WKV_VECTOR_SIZE + int wkv_vector_size; +# if defined(__ARM_FEATURE_SVE) + wkv_vector_size = svcntw(); +# else + wkv_vector_size = WKV_VECTOR_SIZE; +# endif + const int64_t vec_count = head_size / wkv_vector_size; + + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_i_offset = h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float r_val = r[t_h_i_offset]; + float time_faaaa_val = time_faaaa[h_i_offset]; + float time_decay_val = time_decay[t_h_i_offset]; + + // Broadcast scalar values to vectors + GGML_F32X k_vec = GGML_F32X_SET1(k_val); + GGML_F32X r_vec = GGML_F32X_SET1(r_val); + GGML_F32X time_faaaa_vec = GGML_F32X_SET1(time_faaaa_val); + GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); + + for (int64_t j = 0; j < vec_count; j++) { + size_t base_j = j * wkv_vector_size; + size_t t_h_j_offset = t_h_offset + base_j; + size_t h_2d_i_j_offset = h_2d_i_offset + base_j; + + // Load x elements at once + GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); + GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); + GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); + + // Compute kv = v * k + GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); + + // Compute temp = kv * time_faaaa + prev_state + GGML_F32X temp_vec = GGML_F32X_FMA(prev_state_vec, kv_vec, time_faaaa_vec); + + // Update dst: dst += temp * r + dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, r_vec); + GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); + + // Update state: state = prev_state * time_decay + kv + GGML_F32X new_state_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, time_decay_vec); + GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], new_state_vec); + } + + // Handle remaining elements, this will not be used. + for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val * time_faaaa_val + prev_state_val; + dst_data[t_h_j_offset] += temp_val * r_val; + state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; + } + } + } + } + +#else + // basically fused operations: + // dst = r @ (time_faaaa * (k @ v) + state), + // state = time_decay * state + (k @ v), + // recursive through each token + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_i_offset = h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float r_val = r[t_h_i_offset]; + float time_faaaa_val = time_faaaa[h_i_offset]; + // RWKV v6: different time_decay for each token. + float time_decay_val = time_decay[t_h_i_offset]; + + for (int64_t j = 0; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val * time_faaaa_val + prev_state_val; + dst_data[t_h_j_offset] += temp_val * r_val; + state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; + } + } + } + } +#endif +} + +void ggml_compute_forward_rwkv_wkv6(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rwkv_wkv6_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gla + +static void ggml_compute_forward_gla_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[4]->ne[1]; + const int64_t head_size = C / HEADS; + const float scale = ggml_get_op_params_f32(dst, 0); + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * k = (float *) dst->src[0]->data; + float * v = (float *) dst->src[1]->data; + float * q = (float *) dst->src[2]->data; + float * g = (float *) dst->src[3]->data; + + size_t t_stride = HEADS * head_size; // Same to C + + size_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + size_t h_stride_2d = head_size * head_size; + + if (ith == 0) { + memset(dst_data, 0, T * C * sizeof(float)); + } + ggml_barrier(params->threadpool); + +#if defined(__AVX__) && !defined(__AVX512F__) +# define GGML_F32X GGML_F32x8 +# define GGML_F32X_SET1 GGML_F32x8_SET1 +# define GGML_F32X_LOAD GGML_F32x8_LOAD +# define GGML_F32X_STORE GGML_F32x8_STORE +# define GGML_F32X_MUL GGML_F32x8_MUL +# define GGML_F32X_FMA GGML_F32x8_FMA +# define GLA_VECTOR_SIZE 8 +#elif defined(__AVX512F__) +# define GGML_F32X GGML_F32x16 +# define GGML_F32X_SET1 GGML_F32x16_SET1 +# define GGML_F32X_LOAD GGML_F32x16_LOAD +# define GGML_F32X_STORE GGML_F32x16_STORE +# define GGML_F32X_MUL GGML_F32x16_MUL +# define GGML_F32X_FMA GGML_F32x16_FMA +# define GLA_VECTOR_SIZE 16 +#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) +# define GGML_F32X GGML_F32xt +# define GGML_F32X_SET1 GGML_F32xt_SET1 +# define GGML_F32X_LOAD GGML_F32xt_LOAD +# define GGML_F32X_STORE GGML_F32xt_STORE +# define GGML_F32X_MUL GGML_F32xt_MUL +# define GGML_F32X_FMA GGML_F32xt_FMA +# define GLA_VECTOR_SIZE 8 +#elif defined(__ARM_NEON) && defined(__aarch64__) +# define GGML_F32X GGML_F32x4 +# define GGML_F32X_SET1 GGML_F32x4_SET1 +# define GGML_F32X_LOAD GGML_F32x4_LOAD +# define GGML_F32X_STORE GGML_F32x4_STORE +# define GGML_F32X_MUL GGML_F32x4_MUL +# define GGML_F32X_FMA GGML_F32x4_FMA +# define GLA_VECTOR_SIZE 4 +#endif + +#ifdef GLA_VECTOR_SIZE + int gla_vector_size; +# if defined(__ARM_FEATURE_SVE) + gla_vector_size = svcntw(); +# else + gla_vector_size = GLA_VECTOR_SIZE; +# endif + const int64_t vec_count = head_size / gla_vector_size; + + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float q_val = q[t_h_i_offset] * scale; + float g_val = g[t_h_i_offset]; + + // Broadcast scalar values to vectors + GGML_F32X k_vec = GGML_F32X_SET1(k_val); + GGML_F32X q_vec = GGML_F32X_SET1(q_val); + GGML_F32X g_vec = GGML_F32X_SET1(g_val); + + for (int64_t j = 0; j < vec_count; j++) { + size_t base_j = j * gla_vector_size; + size_t t_h_j_offset = t_h_offset + base_j; + size_t h_2d_i_j_offset = h_2d_i_offset + base_j; + + // Load x elements at once + GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); + GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); + GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); + + // Compute kv = v * k + GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); + + // Compute temp = prev_state * g + kv + GGML_F32X temp_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, g_vec); + + // Update dst: dst += temp * q + dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, q_vec); + GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); + + // Update state + GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], temp_vec); + } + + // Handle remaining elements, this will not be used. + for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val + prev_state_val * g_val; + dst_data[t_h_j_offset] += temp_val * q_val; + state_cur[h_2d_i_j_offset] = temp_val; + } + } + } + } + +#else + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float q_val = q[t_h_i_offset] * scale; + float g_val = g[t_h_i_offset]; + + for (int64_t j = 0; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = prev_state_val * g_val + kv_val; + dst_data[t_h_j_offset] += temp_val * q_val; + state_cur[h_2d_i_j_offset] = temp_val; + } + } + } + } +#endif +} + +void ggml_compute_forward_gla(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gla_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_solve_tri_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + const struct ggml_tensor * src0 = dst->src[0]; // A (lower triangular) + const struct ggml_tensor * src1 = dst->src[1]; // B (RHS) + + GGML_TENSOR_BINARY_OP_LOCALS; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_ASSERT(ne00 == ne01); // A must be square + GGML_ASSERT(ne0 == ne10); // solution cols == B cols + GGML_ASSERT(ne1 == ne11); // solution rows == B rows + + GGML_ASSERT(ne02 == ne12 && ne12 == ne2); + GGML_ASSERT(ne03 == ne13 && ne13 == ne3); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t k = ne10; // number of RHS columns + const int64_t n = ne11; // A is n×n + const int64_t nr = + ne02 * ne03 * k; // we're parallelizing on columns here, so seq x token x column will be the unit + + // chunks per thread + const int64_t dr = (nr + nth - 1) / nth; + + // chunk range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + const float * A = (const float *) src0->data; // [n, n, B1, B2] + const float * B = (const float *) src1->data; // [n, k, B1, B2] + float * X = (float *) dst->data; // [n, k, B1, B2] + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * k); + const int64_t i02 = (ir - i03 * ne02 * k) / k; + const int64_t i01 = (ir - i03 * ne02 * k - i02 * k); + + const float * A_batch = A + i02 * nb02 / sizeof(float) + i03 * nb03 / sizeof(float); + const float * B_batch = B + i02 * nb12 / sizeof(float) + i03 * nb13 / sizeof(float); + + float * X_batch = X + i02 * nb2 / sizeof(float) + i03 * nb3 / sizeof(float); + + for (int64_t i00 = 0; i00 < n; ++i00) { + float sum = 0.0f; + for (int64_t t = 0; t < i00; ++t) { + sum += A_batch[i00 * n + t] * X_batch[t * k + i01]; + } + + const float diag = A_batch[i00 * n + i00]; + GGML_ASSERT(diag != 0.0f && "Zero diagonal in triangular matrix"); + X_batch[i00 * k + i01] = (B_batch[i00 * k + i01] - sum) / diag; + } + } +} + +void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { + ggml_compute_forward_solve_tri_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } +} + +// ggml_compute_forward_rwkv_wkv7 + +static void ggml_compute_forward_rwkv_wkv7_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[6]->ne[1]; + const int64_t head_size = C / HEADS; + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * r = (float *) dst->src[0]->data; + float * w = (float *) dst->src[1]->data; + float * k = (float *) dst->src[2]->data; + float * v = (float *) dst->src[3]->data; + float * a = (float *) dst->src[4]->data; + float * b = (float *) dst->src[5]->data; + + int64_t t_stride = HEADS * head_size; // Same to C + + int64_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + int64_t h_stride_2d = head_size * head_size; + +#if defined(GGML_SIMD) +# if defined(__ARM_FEATURE_SVE) || defined(__riscv_v_intrinsic) + // scalar Route to scalar implementation //TODO: Write SVE code and RVV code + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float v_val = v[t_h_i_offset]; + + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; + } + + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; + } + dst_data[t_h_i_offset] = result; + } + } + } +# else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t ii = 0; ii < head_size; ii++) { + int64_t t_h_i_offset = t_h_offset + ii; + int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + + GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + + float sa = 0; + { + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); + ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); + sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); + } + } + GGML_F32_VEC_REDUCE(sa, sum); + } + + GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + + int64_t j = 0; + GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + for (; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; + int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; + + GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); + GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); + GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); + GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); + + k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); + + GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); + // kv + s * decay + sa * b + state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); + state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); + GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); + + result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + } + } + GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); + + // There shouldn't be left-overs though. + for (; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v[t_h_i_offset] * k_val; + + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + } + } + } + } +# endif +#else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float v_val = v[t_h_i_offset]; + + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; + } + + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; + } + dst_data[t_h_i_offset] = result; + } + } + } +#endif +} + +void ggml_compute_forward_rwkv_wkv7(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rwkv_wkv7_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_map_custom1 + +void ggml_compute_forward_map_custom1(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + + struct ggml_map_custom1_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_map_custom2 + +void ggml_compute_forward_map_custom2(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + const ggml_tensor * b = dst->src[1]; + + struct ggml_map_custom2_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, b, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_map_custom3 + +void ggml_compute_forward_map_custom3(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + const ggml_tensor * b = dst->src[1]; + const ggml_tensor * c = dst->src[2]; + + struct ggml_map_custom3_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, b, c, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_custom + +void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + struct ggml_custom_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_cross_entropy_loss + +static void ggml_compute_forward_cross_entropy_loss_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); + GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); + GGML_ASSERT(ggml_are_same_shape(src0, src1)); + GGML_ASSERT(ggml_is_scalar(dst)); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + // TODO: handle transposed/permuted matrices + const int64_t nc = src0->ne[0]; + const int64_t nr = ggml_nrows(src0); + + const int ith = params->ith; + const int nth = params->nth; + + float * sums = (float *) params->wdata; + float * st = ((float *) params->wdata) + nth + ith * nc; + float sum_thread = 0.0f; + + GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + for (int64_t i1 = ir0; i1 < ir1; ++i1) { + const float * s0 = (const float *) ((const char *) src0->data + i1 * src0->nb[1]); + const float * s1 = (const float *) ((const char *) src1->data + i1 * src1->nb[1]); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(s0[i])); + assert(!isnan(s1[i])); + } +#endif + + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + const ggml_float sum_softmax = ggml_vec_log_soft_max_f32(nc, st, s0, max); + assert(sum_softmax >= 0.0); + + ggml_vec_add1_f32(nc, st, st, -sum_softmax); + ggml_vec_mul_f32(nc, st, st, s1); + + float sum_st = 0.0f; + ggml_vec_sum_f32(nc, &sum_st, st); + sum_thread += sum_st; + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + assert(!isnan(st[i])); + assert(!isinf(st[i])); + } +#endif + } + sums[ith] = sum_thread; + ggml_barrier(params->threadpool); + + if (ith == 0) { + float * dp = (float *) dst->data; + ggml_vec_sum_f32(nth, dp, sums); + dp[0] *= -1.0f / (float) nr; + } +} + +void ggml_compute_forward_cross_entropy_loss(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cross_entropy_loss_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cross_entropy_loss_back + +static void ggml_compute_forward_cross_entropy_loss_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; // gradient of forward pass output + const ggml_tensor * src0f = dst->src[1]; // src0 of forward pass + const ggml_tensor * src1f = dst->src[2]; // src1 of forward pass + + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_is_contiguous(src0f)); + GGML_ASSERT(ggml_is_contiguous(src1f)); + GGML_ASSERT(ggml_is_contiguous(grad)); + GGML_ASSERT(ggml_are_same_shape(src0f, src1f) && ggml_are_same_shape(src0f, dst)); + + const int64_t ith = params->ith; + const int64_t nth = params->nth; + + // TODO: handle transposed/permuted matrices + const int64_t nc = src0f->ne[0]; + const int64_t nr = ggml_nrows(src0f); + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + const float d_by_nr = ((const float *) grad->data)[0] / (float) nr; + + for (int64_t i1 = ir0; i1 < ir1; i1++) { + float * ds0 = (float *) ((char *) dst->data + i1 * dst->nb[1]); + const float * s0 = (const float *) ((const char *) src0f->data + i1 * src0f->nb[1]); + const float * s1 = (const float *) ((const char *) src1f->data + i1 * src1f->nb[1]); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(s0[i])); + assert(!isnan(s1[i])); + } +#endif + + // soft_max + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + const ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); + assert(sum > 0.0); + ggml_vec_scale_f32(nc, ds0, 1.0 / sum); + + // grad(src0f) = (softmax(src0f) - src1f) * grad(cross_entropy_loss(src0f, src1f)) / nr + ggml_vec_sub_f32(nc, ds0, ds0, s1); + ggml_vec_scale_f32(nc, ds0, d_by_nr); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + assert(!isnan(ds0[i])); + assert(!isinf(ds0[i])); + } +#endif + } +} + +void ggml_compute_forward_cross_entropy_loss_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cross_entropy_loss_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_opt_step_adamw_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src0_grad = dst->src[1]; + const ggml_tensor * src0_grad_m = dst->src[2]; + const ggml_tensor * src0_grad_v = dst->src[3]; + const ggml_tensor * adamw_params = dst->src[4]; + + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m)); + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v)); + GGML_ASSERT(ggml_nelements(adamw_params) == 7); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const float * adamw_params_ptr = ggml_get_data_f32(adamw_params); + + const float alpha = adamw_params_ptr[0]; + const float beta1 = adamw_params_ptr[1]; + const float beta2 = adamw_params_ptr[2]; + const float eps = adamw_params_ptr[3]; + const float wd = adamw_params_ptr[4]; + const float beta1h = adamw_params_ptr[5]; + const float beta2h = adamw_params_ptr[6]; + const float keep = 1.f - alpha * wd; + for (int ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; + + float * w = (float *) ((char *) src0->data + offset); // weight + const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad + float * m = (float *) ((char *) src0_grad_m->data + offset); + float * v = (float *) ((char *) src0_grad_v->data + offset); + + for (int i00 = 0; i00 < ne00; ++i00) { + m[i00] = m[i00] * beta1 + g[i00] * (1.0f - beta1); + v[i00] = v[i00] * beta2 + g[i00] * g[i00] * (1.0f - beta2); + + const float mh = m[i00] * beta1h; + const float vh = sqrtf(v[i00] * beta2h) + eps; + + // The weight decay is applied independently of the Adam momenta m and v. + // This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss. + // See: https://arxiv.org/pdf/1711.05101v3.pdf + w[i00] = w[i00] * keep - alpha * mh / vh; + } + } +} + +void ggml_compute_forward_opt_step_adamw(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_opt_step_adamw_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_opt_step_sgd_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src0_grad = dst->src[1]; + const ggml_tensor * sgd_params = dst->src[2]; + + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); + GGML_ASSERT(ggml_nelements(sgd_params) == 2); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // using adamw param subset we care about - alpha, wd - could have a separate struct + const float * sgd_params_ptr = ggml_get_data_f32(sgd_params); + const float alpha = sgd_params_ptr[0]; + const float keep = 1.f - alpha * sgd_params_ptr[1]; + + for (int ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; + + float * w = (float *) ((char *) src0->data + offset); // weight + const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad + + for (int i00 = 0; i00 < ne00; ++i00) { + w[i00] = w[i00] * keep - alpha * g[i00]; + } + } +} + +void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_opt_step_sgd_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error - sgd is F32 only"); + } + } +} From c3b3ed0f34ed8eabdbf0e87260234590bb801733 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 18:56:59 -0800 Subject: [PATCH 21/39] format --- ggml/src/ggml-cpu/ops.cpp | 20174 +++++++++++++++++++++++++++++++++++- 1 file changed, 20147 insertions(+), 27 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index a3a217493fb..f9fbe0cb607 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7560,37 +7560,17 @@ static void ggml_compute_forward_pad_f32( const int32_t rp2 = ggml_get_op_params_i32(dst, 5); const int32_t lp3 = ggml_get_op_params_i32(dst, 6); const int32_t rp3 = ggml_get_op_params_i32(dst, 7); - const int32_t circular = ggml_get_op_params_i32(dst, 8); + const bool circular = (bool)ggml_get_op_params_i32(dst, 8); // TODO: optimize - if (circular == 0) { - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - for (int64_t i3 = 0; i3 < ne3; ++i3) { - const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; - if ((i0 >= lp0 && i0 < ne0 - rp0) \ - && (i1 >= lp1 && i1 < ne1 - rp1) \ - && (i2 >= lp2 && i2 < ne2 - rp2) \ - && (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t src_idx = (i3 - lp3)*nb03 + (i2 - lp2)*nb02 + (i1 - lp1)*nb01 + (i0 - lp0)*nb00; - const float * src_ptr = (const float *)((char *) src0->data + src_idx); - dst_ptr[dst_idx] = *src_ptr; - } else { - dst_ptr[dst_idx] = 0; - } - } - } - } - } - } - else { - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - for (int64_t i3 = 0; i3 < ne3; ++i3) { + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + for (int64_t i3 = 0; i3 < ne3; ++i3) { + // circular means wrap around on a torus, so x and y loop around + if (circular) { const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); @@ -7606,6 +7586,19 @@ static void ggml_compute_forward_pad_f32( const float * src_ptr = (const float *)((char *) src0->data + src_idx); dst_ptr[dst_idx] = *src_ptr; } + else { + const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; + if ((i0 >= lp0 && i0 < ne0 - rp0) \ + && (i1 >= lp1 && i1 < ne1 - rp1) \ + && (i2 >= lp2 && i2 < ne2 - rp2) \ + && (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t src_idx = (i3 - lp3)*nb03 + (i2 - lp2)*nb02 + (i1 - lp1)*nb01 + (i0 - lp0)*nb00; + const float * src_ptr = (const float *)((char *) src0->data + src_idx); + dst_ptr[dst_idx] = *src_ptr; + } else { + dst_ptr[dst_idx] = 0; + } + } } } } @@ -20483,3 +20476,20130 @@ void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_ } } } +#include "ops.h" + +#include "binary-ops.h" +#include "ggml-cpu.h" +#include "ggml-impl.h" +#include "ggml.h" +#include "unary-ops.h" +#include "vec.h" + +#include +#include +#include +#include + +// ggml_compute_forward_dup + +static void ggml_compute_forward_dup_same_cont(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + GGML_ASSERT(src0->type == dst->type); + + const size_t nb0 = ggml_type_size(src0->type); + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by blocks + const int nk = ggml_nelements(src0) / ggml_blck_size(src0->type); + const int dr = (nk + nth - 1) / nth; + const int k0 = dr * ith; + const int k1 = MIN(k0 + dr, nk); + + if (k0 < k1) { + memcpy(((char *) dst->data + k0 * nb0), ((char *) src0->data + k0 * nb0), (k1 - k0) * nb0); + } +} + +template +static void ggml_compute_forward_dup_flt(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(!ggml_is_quantized(src0->type) && !ggml_is_quantized(dst->type)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // case: type & row size equal + if (src0->type == dst->type && ne00 == ne0 && nb00 == ggml_type_size(src0->type) && + nb0 == ggml_type_size(dst->type)) { + // copy by rows + const size_t rs = ne00 * nb00; + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ir0; i01 < ir1; i01++) { + memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); + } + } + } + return; + } + + // case: dst tensor is contiguous + if (ggml_is_contiguous(dst)) { + if (nb00 == sizeof(src_t)) { + if constexpr (std::is_same_v) { + // same type + size_t id = 0; + const size_t rs = ne00 * nb00; + char * dst_ptr = (char *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, rs); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + // casting between non-quantized types + size_t id = 0; + dst_t * dst_ptr = (dst_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const src_t * src0_ptr = + (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + for (int i00 = 0; i00 < ne00; i00++) { + float tmp = type_conversion_table::to_f32(src0_ptr[i00]); + dst_ptr[id] = type_conversion_table::from_f32(tmp); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } + } else { + //printf("%s: this is not optimal - fix me\n", __func__); + + size_t id = 0; + dst_t * dst_ptr = (dst_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const src_t * src0_ptr = + (src_t *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float tmp = type_conversion_table::to_f32(*src0_ptr); + dst_ptr[id] = type_conversion_table::from_f32(tmp); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } + return; + } + + // dst counters + int64_t i10 = 0; + int64_t i11 = 0; + int64_t i12 = 0; + int64_t i13 = 0; + + if constexpr (std::is_same_v) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + memcpy(dst_ptr, src0_ptr, sizeof(dst_t)); + + if (++i10 == ne00) { + i10 = 0; + if (++i11 == ne01) { + i11 = 0; + if (++i12 == ne02) { + i12 = 0; + if (++i13 == ne03) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + + } else { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + float tmp = type_conversion_table::to_f32(*(const src_t *) src0_ptr); + *(dst_t *) dst_ptr = type_conversion_table::from_f32(tmp); + + if (++i10 == ne0) { + i10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + } +} + +template +static void ggml_compute_forward_dup_to_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(!ggml_is_quantized(src0->type)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (ggml_is_contiguous(dst) && nb00 == sizeof(src_t) && ggml_get_type_traits_cpu(dst->type)->from_float) { + // casting non-quantized types --> intermediate f32 --> quantized + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; + float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + size_t id = 0; + size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); + char * dst_ptr = (char *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const src_t * src0_ptr = (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + for (int i00 = 0; i00 < ne00; i00++) { + src0_f32[i00] = type_conversion_table::to_f32(src0_ptr[i00]); + } + + quantize_row_q(src0_f32, dst_ptr + id, ne00); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + // printf("%s %s\n", ggml_type_name(src0->type), ggml_type_name(dst->type)); + GGML_ABORT("not implemented"); + } +} + +// A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy. +static void ggml_compute_forward_dup_bytes(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(src0->type == dst->type); + + GGML_TENSOR_UNARY_OP_LOCALS; + + if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) { + ggml_compute_forward_dup_same_cont(params, dst); + return; + } + + const size_t type_size = ggml_type_size(src0->type); + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (src0->type == dst->type && ggml_are_same_shape(src0, dst) && nb00 == type_size && nb0 == type_size) { + // copy by rows + const size_t rs = ggml_row_size(src0->type, ne00); + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ir0; i01 < ir1; i01++) { + memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); + } + } + } + return; + } + + if (ggml_is_contiguous(dst)) { + size_t id = 0; + char * dst_ptr = (char *) dst->data; + const size_t rs = ne00 * type_size; + + if (nb00 == type_size) { + // src0 is contigous on first dimension, copy by rows + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int64_t i01 = ir0; i01 < ir1; i01++) { + const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, rs); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + //printf("%s: this is not optimal - fix me\n", __func__); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + (char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, type_size); + + id += type_size; + } + } + id += rs * (ne01 - ir1); + } + } + } + + return; + } + + // dst counters + int64_t k10 = 0; + int64_t i11 = 0; + int64_t i12 = 0; + int64_t i13 = 0; + + // number of blocks in a row + const int64_t nk00 = ne00 / ggml_blck_size(src0->type); + const int64_t nk0 = ne0 / ggml_blck_size(dst->type); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + k10 += nk00 * ir0; + while (k10 >= nk0) { + k10 -= nk0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t k00 = 0; k00 < nk00; k00++) { + const char * src0_ptr = ((char *) src0->data + k00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + k10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + memcpy(dst_ptr, src0_ptr, type_size); + + if (++k10 == nk0) { + k10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + k10 += nk00 * (ne01 - ir1); + while (k10 >= nk0) { + k10 -= nk0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } +} + +static void ggml_compute_forward_dup_from_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + size_t qk = ggml_blck_size(type); + const int64_t nr = ggml_nelements(src1) / qk; + + // destination must be contiguous in the first dimension + GGML_ASSERT(nb10 == ggml_type_size(dst->type)); + // must either have first dimension large enough to hold a row, or fully contiguous + GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + uint32_t i = ir * qk; + + const int64_t i03 = i / (ne00 * ne01 * ne02); + const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); + const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; + const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; + const int64_t x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; + + const int64_t i13 = i / (ne10 * ne11 * ne12); + const int64_t i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); + const int64_t i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; + const int64_t i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; + const int64_t dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; + + dequantize_row_q((const void *) ((char *) src0->data + x_offset), (float *) ((char *) dst->data + dst_offset), + qk); + } +} + +void ggml_compute_forward_dup(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (src0->type == dst->type) { + ggml_compute_forward_dup_bytes(params, dst); + return; + } + + switch (src0->type) { + case GGML_TYPE_F16: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_BF16: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_F32: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_I32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_I32: + { + if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + GGML_ABORT("not implemented"); + } + } + break; + default: + { + if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_from_q(params, dst); + break; + } + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add + +static void ggml_compute_forward_add_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_type type = src0->type; + const ggml_type dtype = dst->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dtype)->from_float; + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ggml_is_quantized(src0->type)); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i03 = ir / (ne02 * ne01); + const int i02 = (ir - i03 * ne02 * ne01) / ne01; + const int i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + // src1 and dst are same shape as src0 => same indices + const int i13 = i03; + const int i12 = i02; + const int i11 = i01; + + const int i3 = i03; + const int i2 = i02; + const int i1 = i01; + + void * src0_row = (void *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * src1_row = (float *) ((char *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13)); + void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + assert(ne00 % 32 == 0); + + // unquantize row from src0 to temp buffer + dequantize_row_q(src0_row, wdata, ne00); + // add src1 + ggml_vec_acc_f32(ne00, wdata, src1_row); + // quantize row to dst + if (quantize_row_q != NULL) { + quantize_row_q(wdata, dst_row, ne00); + } else { + memcpy(dst_row, wdata, ne0 * nb0); + } + } +} + +void ggml_compute_forward_add(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + { + ggml_compute_forward_add_non_quantized(params, dst); + } + break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_add_q_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add_id + +static void ggml_compute_forward_add_id_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src2->type == GGML_TYPE_I32); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_TERNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + // src1 indices + const int i11 = *(int32_t *) ((char *) src2->data + i1 * nb20 + i2 * nb21); + + GGML_ASSERT(i11 >= 0 && i11 < ne11); + + ggml_vec_add_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), + (float *) ((char *) src1->data + i11 * nb11)); + } +} + +void ggml_compute_forward_add_id(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add_id_f32(params, dst); + } + break; + default: + { + GGML_ABORT("unsupported type for ggml_compute_forward_add_id: %s", ggml_type_name(src0->type)); + } + } +} + +// ggml_compute_forward_add1 + +static void ggml_compute_forward_add1_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + +#ifdef GGML_USE_ACCELERATE + GGML_UNUSED(ggml_vec_add1_f32); + + vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), 1, + (float *) ((char *) src1->data), 0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + 1, ne0); +#else + ggml_vec_add1_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), *(float *) src1->data); +#endif + } +} + +static void ggml_compute_forward_add1_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_f16_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F16); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(type)->from_float; + + // we don't support permuted src0 + GGML_ASSERT(nb00 == ggml_type_size(type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ggml_is_quantized(src0->type)); + GGML_ASSERT(dst->type == src0->type); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + void * src0_row = (void *) ((char *) src0->data + (i1 * nb01 + i2 * nb02 + i3 * nb03)); + void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb0)); + + assert(ne0 % 32 == 0); + + // unquantize row from src0 to temp buffer + dequantize_row_q(src0_row, wdata, ne0); + // add src1 + ggml_vec_acc1_f32(ne0, wdata, v); + // quantize row to dst + quantize_row_q(wdata, dst_row, ne0); + } +} + +static void ggml_compute_forward_add1_bf16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_bf16_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_BF16); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +void ggml_compute_forward_add1(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add1_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + if (src1->type == GGML_TYPE_F16) { + ggml_compute_forward_add1_f16_f16(params, dst); + } else if (src1->type == GGML_TYPE_F32) { + ggml_compute_forward_add1_f16_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } + } + break; + case GGML_TYPE_BF16: + { + if (src1->type == GGML_TYPE_BF16) { + ggml_compute_forward_add1_bf16_bf16(params, dst); + } else if (src1->type == GGML_TYPE_F32) { + ggml_compute_forward_add1_bf16_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } + } + break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_add1_q_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_acc + +static void ggml_compute_forward_acc_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during acc + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during acc + const size_t nb0 = ggml_element_size(src0); + + const size_t nb00 = nb0; + const size_t nb01 = nb1; + const size_t nb02 = nb2; + const size_t nb03 = nb3; + + GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb0 + (ne11 == 0 ? 0 : ne11 - 1) * nb1 + + (ne12 == 0 ? 0 : ne12 - 1) * nb2 + (ne13 == 0 ? 0 : ne13 - 1) * nb3 < + ggml_nbytes(dst)); + GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb00 + (ne11 == 0 ? 0 : ne11 - 1) * nb01 + + (ne12 == 0 ? 0 : ne12 - 1) * nb02 + (ne13 == 0 ? 0 : ne13 - 1) * nb03 < + ggml_nbytes(src0)); + + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + +#ifdef GGML_USE_ACCELERATE + vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), 1, + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11), 1, + (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), 1, nc); +#else + ggml_vec_add_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); +#endif + } +} + +void ggml_compute_forward_acc(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_acc_f32(params, dst); + } + break; + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_sum + +static void ggml_compute_forward_sum_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + assert(src0->nb[0] == sizeof(float)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + ggml_float sum = 0; + ggml_float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f32_ggf(ne00, &row_sum, + (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((float *) dst->data)[0] = sum; +} + +static void ggml_compute_forward_sum_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + + assert(src0->nb[0] == sizeof(ggml_fp16_t)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + float sum = 0; + float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f16_ggf(ne00, &row_sum, + (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); +} + +static void ggml_compute_forward_sum_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + + assert(src0->nb[0] == sizeof(ggml_bf16_t)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + float sum = 0; + float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_bf16_ggf(ne00, &row_sum, + (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum); +} + +void ggml_compute_forward_sum(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sum_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_sum_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_sum_bf16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cumsum + +static void ggml_compute_forward_cumsum_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne01); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + const auto [ir0, ir1] = get_thread_range(params, src0); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + float * src_row = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * dst_row = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + ggml_vec_cumsum_f32(ne00, dst_row, src_row); + } +} + +void ggml_compute_forward_cumsum(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cumsum_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_sum_rows + +static void ggml_compute_forward_sum_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne0 == 1); + GGML_ASSERT(ne1 == ne01); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + for (int64_t i3 = 0; i3 < ne03; i3++) { + for (int64_t i2 = 0; i2 < ne02; i2++) { + for (int64_t i1 = 0; i1 < ne01; i1++) { + float * src_row = (float *) ((char *) src0->data + i1 * nb01 + i2 * nb02 + i3 * nb03); + float * dst_row = (float *) ((char *) dst->data + i1 * nb1 + i2 * nb2 + i3 * nb3); + float row_sum = 0; + ggml_vec_sum_f32(ne00, &row_sum, src_row); + dst_row[0] = row_sum; + } + } + } +} + +void ggml_compute_forward_sum_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sum_rows_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_mean + +static void ggml_compute_forward_mean_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(src0->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + assert(ne0 == 1); + assert(ne1 == ne01); + assert(ne2 == ne02); + assert(ne3 == ne03); + + GGML_UNUSED(ne0); + GGML_UNUSED(ne1); + GGML_UNUSED(ne2); + GGML_UNUSED(ne3); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + + *(float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3) /= (float) ne00; + } + } + } +} + +void ggml_compute_forward_mean(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_mean_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_argmax + +static void ggml_compute_forward_argmax_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(src0->nb[0] == sizeof(float)); + assert(dst->nb[0] == sizeof(float)); + + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + + const size_t nb01 = src0->nb[1]; + const size_t nb0 = dst->nb[0]; + + for (int64_t i1 = 0; i1 < ne01; i1++) { + float * src = (float *) ((char *) src0->data + i1 * nb01); + int32_t * dst_ = (int32_t *) ((char *) dst->data + i1 * nb0); + int v = 0; + ggml_vec_argmax_f32(ne00, &v, src); + dst_[0] = v; + } +} + +void ggml_compute_forward_argmax(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_argmax_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_count_equal + +static void ggml_compute_forward_count_equal_i32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS; + + GGML_ASSERT(src0->type == GGML_TYPE_I32); + GGML_ASSERT(src1->type == GGML_TYPE_I32); + GGML_ASSERT(ggml_are_same_shape(src0, src1)); + GGML_ASSERT(ggml_is_scalar(dst)); + GGML_ASSERT(dst->type == GGML_TYPE_I64); + + const int64_t nr = ggml_nrows(src0); + + const int ith = params->ith; + const int nth = params->nth; + + int64_t * sums = (int64_t *) params->wdata; + int64_t sum_thread = 0; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne03) / ne01; + const int64_t i01 = ir - i03 * ne03 - i02 * ne02; + + const char * data0 = (const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01; + const char * data1 = (const char *) src1->data + i03 * nb13 + i02 * nb12 + i01 * nb11; + + for (int64_t i00 = 0; i00 < ne00; ++i00) { + const int32_t val0 = *((const int32_t *) (data0 + i00 * nb00)); + const int32_t val1 = *((const int32_t *) (data1 + i00 * nb10)); + + sum_thread += val0 == val1; + } + } + if (ith != 0) { + sums[ith] = sum_thread; + } + ggml_barrier(params->threadpool); + + if (ith != 0) { + return; + } + + for (int ith_other = 1; ith_other < nth; ++ith_other) { + sum_thread += sums[ith_other]; + } + *((int64_t *) dst->data) = sum_thread; +} + +void ggml_compute_forward_count_equal(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_I32: + { + ggml_compute_forward_count_equal_i32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_repeat + +static void ggml_compute_forward_repeat_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(src0, dst)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne0 / ne00); + const int nr1 = (int) (ne1 / ne01); + const int nr2 = (int) (ne2 / ne02); + const int nr3 = (int) (ne3 / ne03); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne03; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne02; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne01; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_vec_cpy_f32( + ne00, + (float *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + (i2 * ne02 + k2) * nb2 + + (i1 * ne01 + k1) * nb1 + (i0 * ne00) * nb0), + (float *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01)); + } + } + } + } + } + } + } +} + +static void ggml_compute_forward_repeat_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(src0, dst)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne0 / ne00); + const int nr1 = (int) (ne1 / ne01); + const int nr2 = (int) (ne2 / ne02); + const int nr3 = (int) (ne3 / ne03); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne03; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne02; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne01; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + + (i2 * ne02 + k2) * nb2 + (i1 * ne01 + k1) * nb1 + + (i0 * ne00) * nb0); + ggml_fp16_t * x = + (ggml_fp16_t *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01); + // ggml_vec_cpy_f16(ne00, y, x) + for (int i = 0; i < ne00; ++i) { + y[i] = x[i]; + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_repeat(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_I16: + { + ggml_compute_forward_repeat_f16(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_repeat_f32(params, dst); + } + break; + // TODO: templateify the implemenation and support for I64 + // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 + //case GGML_TYPE_I64: + // { + // ggml_compute_forward_repeat_i64(params, dst); + // } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_repeat_back + +static void ggml_compute_forward_repeat_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(dst, src0)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne00 / ne0); + const int nr1 = (int) (ne01 / ne1); + const int nr2 = (int) (ne02 / ne2); + const int nr3 = (int) (ne03 / ne3); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + if (ggml_is_contiguous(dst)) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } else { + for (int k3 = 0; k3 < ne3; k3++) { + for (int k2 = 0; k2 < ne2; k2++) { + for (int k1 = 0; k1 < ne1; k1++) { + ggml_vec_set_f32(ne0, (float *) ((char *) dst->data + k1 * nb1 + k2 * nb2 + k3 * nb3), 0); + } + } + } + } + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne3; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne2; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne1; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_vec_acc_f32( + ne0, (float *) ((char *) dst->data + (k3) *nb3 + (k2) *nb2 + (k1) *nb1), + (float *) ((char *) src0->data + (i3 * ne3 + k3) * nb03 + (i2 * ne2 + k2) * nb02 + + (i1 * ne1 + k1) * nb01 + (i0 * ne0) * nb00)); + } + } + } + } + } + } + } +} + +void ggml_compute_forward_repeat_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_repeat_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_concat + +static void ggml_compute_forward_concat_any(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + const size_t len = ggml_type_size(src0->type); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const char * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + (i3) *nb03; + } else { + x = (const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + (i2 - o[2]) * nb12 + + (i3 - o[3]) * nb13; + } + + char * y = (char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3; + + memcpy(y, x, len); + } + } + } + } +} + +static void ggml_compute_forward_concat_i8(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(int8_t)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const int8_t * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const int8_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const int8_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + int8_t * y = (int8_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +static void ggml_compute_forward_concat_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(ggml_fp16_t)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const ggml_fp16_t * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const ggml_fp16_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const ggml_fp16_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +static void ggml_compute_forward_concat_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const float * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const float *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const float *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +void ggml_compute_forward_concat(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_I16: + { + ggml_compute_forward_concat_f16(params, dst); + } + break; + case GGML_TYPE_I8: + { + ggml_compute_forward_concat_i8(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_concat_f32(params, dst); + } + break; + default: + { + ggml_compute_forward_concat_any(params, dst); + } + } +} + +// ggml_compute_forward_gelu + +static void ggml_compute_forward_gelu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_fill + +static void ggml_compute_forward_fill_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const float c = ggml_get_op_params_f32(dst, 0); + + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); + GGML_TENSOR_LOCALS(size_t, nb, dst, nb); + + const auto [ir0, ir1] = get_thread_range(params, dst); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne2 * ne1); + const int64_t i02 = (ir - i03 * ne2 * ne1) / ne1; + const int64_t i01 = (ir - i03 * ne2 * ne1 - i02 * ne1); + + float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); + + ggml_vec_set_f32(ne0, dst_ptr, c); + } +} + +void ggml_compute_forward_fill(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_fill_f32(params, dst); +} + +// ggml_compute_tri + +static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(ggml_is_contiguous(src0)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const auto [ir0, ir1] = get_thread_range(params, src0); + + bool (*bipred)(int, int); + + switch (ttype) { + case GGML_TRI_TYPE_LOWER: + bipred = [](int i, int r) { + return i < r; + }; + break; + case GGML_TRI_TYPE_LOWER_DIAG: + bipred = [](int i, int r) { + return i <= r; + }; + break; + case GGML_TRI_TYPE_UPPER: + bipred = [](int i, int r) { + return i > r; + }; + break; + case GGML_TRI_TYPE_UPPER_DIAG: + bipred = [](int i, int r) { + return i >= r; + }; + break; + default: + GGML_ABORT("invalid tri type"); + } + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const float * src_ptr = (const float *) ((const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01); + float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); + + for (int i0 = 0; i0 < ne0; ++i0) { + dst_ptr[i0] = bipred(i0, i01) ? src_ptr[i0] : 0.0f; + } + } +} + +void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_tri_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gelu_erf + +static void ggml_compute_forward_gelu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_erf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_erf_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_erf_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gelu_quick + +static void ggml_compute_forward_gelu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_quick(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_quick_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_quick_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_silu + +static void ggml_compute_forward_silu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_silu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_silu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_silu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_silu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_leaky_relu + +static void ggml_compute_forward_leaky_relu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + float negative_slope; + memcpy(&negative_slope, dst->op_params, sizeof(float)); + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_leaky_relu_f32(nc, (float *) ((char *) dst->data + i * (dst->nb[1])), + (float *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); + } +} + +static void ggml_compute_forward_leaky_relu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + float negative_slope; + memcpy(&negative_slope, dst->op_params, sizeof(float)); + + assert(dst->nb[0] == sizeof(ggml_fp16_t)); + assert(src0->nb[0] == sizeof(ggml_fp16_t)); + + for (int i = 0; i < n; i++) { + ggml_vec_leaky_relu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); + } +} + +void ggml_compute_forward_leaky_relu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_leaky_relu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_leaky_relu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_silu_back + +static void ggml_compute_forward_silu_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + assert(ggml_is_contiguous_1(grad)); + assert(ggml_is_contiguous_1(src1)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src1, dst)); + assert(ggml_are_same_shape(src1, grad)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; + const int nr = ggml_nrows(src1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_backward_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src1->data + i1 * (src1->nb[1])), + (float *) ((char *) grad->data + i1 * (grad->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_silu_back_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + assert(ggml_is_contiguous_1(grad)); + assert(ggml_is_contiguous_1(src1)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src1, dst)); + assert(ggml_are_same_shape(src1, grad)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; + const int nr = ggml_nrows(src1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_backward_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src1->data + i1 * (src1->nb[1])), + (ggml_fp16_t *) ((char *) grad->data + i1 * (grad->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +void ggml_compute_forward_silu_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_silu_back_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_silu_back_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_reglu + +static void ggml_compute_forward_reglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_reglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_reglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_reglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_reglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu + +static void ggml_compute_forward_geglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu + +static void ggml_compute_forward_swiglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_swiglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu_oai + +static void ggml_compute_forward_swiglu_oai_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + const float alpha = ggml_get_op_params_f32(dst, 2); + const float limit = ggml_get_op_params_f32(dst, 3); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + float * dst_p = (float *) ((char *) dst->data + i1 * (dst->nb[1])); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + for (int k = 0; k < nc; k++) { + const float x = std::min(src0_p[k], limit); + const float y = std::clamp(src1_p[k], -limit, limit); + const float out_glu = x / (1.f + expf(alpha * (-x))); + dst_p[k] = out_glu * (y + 1.f); + } + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = dst_p[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_oai(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_oai_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu_erf + +static void ggml_compute_forward_geglu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_erf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_erf_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_erf_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu_quick + +static void ggml_compute_forward_geglu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_quick(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_quick_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_quick_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_norm + +static void ggml_compute_forward_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float sum = 0.0; + ggml_vec_sum_f32(ne00, &sum, x); + float mean = sum / ne00; + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + float variance = 0; + +#ifdef GGML_USE_ACCELERATE + mean = -mean; + vDSP_vsadd(x, 1, &mean, y, 1, ne00); + vDSP_measqv(y, 1, &variance, ne00); +#else + variance = ggml_vec_cvar_f32(ne00, y, x, mean); +#endif //GGML_USE_ACCELERATE + + const float scale = 1.0f / sqrtf(variance + eps); + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_group_rms_norm + +static void ggml_compute_forward_rms_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sum = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum += (ggml_float) (x[i00] * x[i00]); + } + + const float mean = sum / ne00; + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + memcpy(y, x, ne00 * sizeof(float)); + // for (int i00 = 0; i00 < ne00; i00++) { + // y[i00] = x[i00]; + // } + + const float scale = 1.0f / sqrtf(mean + eps); + + // if you hit this, likely you got an inf somewhere earlier + assert(scale > 0.0f); + + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_rms_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rms_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_rms_norm_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // gradients from forward pass output + const ggml_tensor * src1 = dst->src[1]; // src1 from forward pass + + GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + // src1 is same shape as src0 => same indices + const int64_t i11 = i01; + const int64_t i12 = i02; + const int64_t i13 = i03; + + const float * dz = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + const float * x = (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13); + + ggml_float sum_xx = 0.0; + ggml_float sum_xdz = 0.0; + + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum_xx += (ggml_float) (x[i00] * x[i00]); + sum_xdz += (ggml_float) (x[i00] * dz[i00]); + } + + //const float mean = (float)(sum_xx)/ne00; + const float mean_eps = (float) (sum_xx) / ne00 + eps; + const float sum_eps = (float) (sum_xx) + eps * ne00; + //const float mean_xdz = (float)(sum_xdz)/ne00; + // we could cache rms from forward pass to improve performance. + // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms. + //const float rms = sqrtf(mean_eps); + const float rrms = 1.0f / sqrtf(mean_eps); + //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3) + + { + // z = rms_norm(x) + // + // rms_norm(src1) = + // scale( + // src1, + // div( + // 1, + // sqrt( + // add( + // scale( + // sum( + // sqr( + // src1)), + // (1.0/N)), + // eps)))); + + // postorder: + // ## op args grad + // 00 param src1 grad[#00] + // 01 const 1 + // 02 sqr (#00) grad[#02] + // 03 sum (#02) grad[#03] + // 04 const 1/N + // 05 scale (#03, #04) grad[#05] + // 06 const eps + // 07 add (#05, #06) grad[#07] + // 08 sqrt (#07) grad[#08] + // 09 div (#01,#08) grad[#09] + // 10 scale (#00,#09) grad[#10] + // + // backward pass, given grad[#10] + // #10: scale + // grad[#00] += scale(grad[#10],#09) + // grad[#09] += sum(mul(grad[#10],#00)) + // #09: div + // grad[#08] += neg(mul(grad[#09], div(#09,#08))) + // #08: sqrt + // grad[#07] += mul(grad[#08], div(0.5, #08)) + // #07: add + // grad[#05] += grad[#07] + // #05: scale + // grad[#03] += scale(grad[#05],#04) + // #03: sum + // grad[#02] += repeat(grad[#03], #02) + // #02: + // grad[#00] += scale(mul(#00, grad[#02]), 2.0) + // + // substitute and simplify: + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) + // grad[#02] = repeat(grad[#03], #02) + // grad[#02] = repeat(scale(grad[#05],#04), #02) + // grad[#02] = repeat(scale(grad[#07],#04), #02) + // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02) + // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02) + // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02) + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N))) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps))) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps)) + // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps)) + // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps)) + // a = b*c + d*e + // a = b*c*f/f + d*e*f/f + // a = (b*c*f + d*e*f)*(1/f) + // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c)) + // a = (b + d*e/c)*c + // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps) + // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms + // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms + // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms + // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms + // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms + // a = (dz + x*div(-mean_xdz,mean_eps))*rrms + // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms) + // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + } + // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + // post-order: + // dx := x + // dx := scale(dx,-mean_xdz/mean_eps) + // dx := add(dx, dz) + // dx := scale(dx, rrms) + float * dx = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + // dx[i00] = (x*(-sum_xdz/sum_eps) + dz) / sqrtf(mean_eps) + ggml_vec_cpy_f32(ne00, dx, x); + // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); + ggml_vec_scale_f32(ne00, dx, (float) (-sum_xdz) / sum_eps); + ggml_vec_acc_f32(ne00, dx, dz); + ggml_vec_scale_f32(ne00, dx, rrms); + } + } + } +} + +void ggml_compute_forward_rms_norm_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rms_norm_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_group_norm + +static void ggml_compute_forward_group_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + // TODO: optimize + + float eps; + memcpy(&eps, dst->op_params + 1, sizeof(float)); + + int n_channels = src0->ne[2]; + int n_groups = dst->op_params[0]; + int n_channels_per_group = (n_channels + n_groups - 1) / n_groups; + for (int i = ith; i < n_groups; i += nth) { + int start = i * n_channels_per_group; + int end = start + n_channels_per_group; + if (end > n_channels) { + end = n_channels; + } + int step = end - start; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + ggml_float sum = 0.0; + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sumr = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sumr += (ggml_float) x[i00]; + } + sum += sumr; + } + } + const float mean = sum / (ne00 * ne01 * step); + + ggml_float sum2 = 0.0; + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + ggml_float sumr = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + float v = x[i00] - mean; + y[i00] = v; + sumr += (ggml_float) (v * v); + } + sum2 += sumr; + } + } + const float variance = sum2 / (ne00 * ne01 * step); + const float scale = 1.0f / sqrtf(variance + eps); + + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + ggml_vec_scale_f32(ne00, y, scale); + } + } + } + } +} + +void ggml_compute_forward_group_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_group_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_l2_norm + +static void ggml_compute_forward_l2_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sum = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum += (ggml_float) (x[i00] * x[i00]); + } + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + memcpy(y, x, ne00 * sizeof(float)); + + const float scale = 1.0f / fmaxf(sqrtf(sum), eps); + + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_l2_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_l2_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_out_prod + +static void ggml_compute_forward_out_prod_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + GGML_ASSERT(ne2 % ne02 == 0); + GGML_ASSERT(ne3 % ne03 == 0); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + + if (ith == 0) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } + ggml_barrier(params->threadpool); + + // dst[:,:,:,:] = 0 + // for i2,i3: + // for i1: + // for i01: + // for i0: + // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] + + // parallelize by last three dimensions + + // total rows in dst + const int64_t nr = ne1 * ne2 * ne3; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + // block-tiling attempt + const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32); + const int64_t blck_1 = 16; + + // dps == dst per src0, used for group query attention + const int64_t dps2 = ne2 / ne02; + const int64_t dps3 = ne3 / ne03; + + for (int64_t bir = ir0; bir < ir1; bir += blck_1) { + const int64_t bir1 = MIN(bir + blck_1, ir1); + for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) { + const int64_t bne01 = MIN(bi01 + blck_0, ne01); + for (int64_t ir = bir; ir < bir1; ++ir) { + // dst indices + const int64_t i3 = ir / (ne2 * ne1); + const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; + const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + const int64_t i02 = i2 / dps2; + const int64_t i03 = i3 / dps3; + + //const int64_t i10 = i1; + const int64_t i12 = i2; + const int64_t i13 = i3; + +#if GGML_VEC_MAD_UNROLL > 2 + const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL); + for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); + } + for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32(ne0, d, s0, *s1); + } +#else + for (int64_t i01 = bi01; i01 < bne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32(ne0, d, s0, *s1); + } +#endif + } + } + } +} + +static void ggml_compute_forward_out_prod_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(ne03 == ne13); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // we don't support permuted src0 dim0 + GGML_ASSERT(nb00 == ggml_type_size(type)); + + // dst dim0 cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + + if (ith == 0) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } + ggml_barrier(params->threadpool); + + // parallelize by last three dimensions + + // total rows in dst + const int64_t nr = ne1 * ne2 * ne3; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + // dst[:,:,:,:] = 0 + // for i2,i3: + // for i1: + // for i01: + // for i0: + // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] + + float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + + for (int64_t ir = ir0; ir < ir1; ++ir) { + // dst indices + const int64_t i3 = ir / (ne2 * ne1); + const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; + const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + const int64_t i02 = i2; + const int64_t i03 = i3; + + //const int64_t i10 = i1; + const int64_t i12 = i2; + const int64_t i13 = i3; + + for (int64_t i01 = 0; i01 < ne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + dequantize_row_q(s0, wdata, ne0); + ggml_vec_mad_f32(ne0, d, wdata, *s1); + } + } +} + +void ggml_compute_forward_out_prod(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_out_prod_q_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + GGML_ABORT("fatal error"); // todo + // ggml_compute_forward_out_prod_f16_f32(params, dst); + } + case GGML_TYPE_F32: + { + ggml_compute_forward_out_prod_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_scale + +static void ggml_compute_forward_scale_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + float s; // scale factor + float b; // bias + + memcpy(&s, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&b, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const size_t nb01 = src0->nb[1]; + + const size_t nb1 = dst->nb[1]; + + if (b == 0.0f) { + for (int i1 = ir0; i1 < ir1; i1++) { + if (dst->data != src0->data) { + // src0 is same shape as dst => same indices + // TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy + memcpy((char *) dst->data + i1 * nb1, (char *) src0->data + i1 * nb01, nc * sizeof(float)); + } + ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1 * nb1), s); + } + } else { + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_mad1_f32(nc, (float *) ((char *) dst->data + i1 * nb1), (float *) ((char *) src0->data + i1 * nb1), + s, b); + } + } +} + +void ggml_compute_forward_scale(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_scale_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_set + +static void ggml_compute_forward_set_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during set + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during set + const size_t nb0 = ggml_element_size(src0); + + const int im0 = (ne10 == 0 ? 0 : ne10 - 1); + const int im1 = (ne11 == 0 ? 0 : ne11 - 1); + const int im2 = (ne12 == 0 ? 0 : ne12 - 1); + const int im3 = (ne13 == 0 ? 0 : ne13 - 1); + + GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); + + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + + ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); + } +} + +static void ggml_compute_forward_set_i32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during set + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during set + const size_t nb0 = ggml_element_size(src0); + + const int im0 = (ne10 == 0 ? 0 : ne10 - 1); + const int im1 = (ne11 == 0 ? 0 : ne11 - 1); + const int im2 = (ne12 == 0 ? 0 : ne12 - 1); + const int im3 = (ne13 == 0 ? 0 : ne13 - 1); + + GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); + + GGML_ASSERT(nb10 == sizeof(int32_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + + ggml_vec_cpy_i32(nc, (int32_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (int32_t *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); + } +} + +void ggml_compute_forward_set(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_set_f32(params, dst); + } + break; + case GGML_TYPE_I32: + { + ggml_compute_forward_set_i32(params, dst); + } + break; + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cpy + +void ggml_compute_forward_cpy(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_dup(params, dst); +} + +// ggml_compute_forward_cont + +void ggml_compute_forward_cont(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_dup(params, dst); +} + +// ggml_compute_forward_get_rows + +static void ggml_compute_forward_get_rows_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == ggml_type_size(type)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + dequantize_row_q((const void *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(ggml_fp16_t)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_cpu_fp16_to_fp32((const ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(ggml_bf16_t)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_cpu_bf16_to_fp32((const ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(float)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), + (float *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03)); + } +} + +void ggml_compute_forward_get_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_get_rows_q(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_get_rows_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_get_rows_bf16(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_get_rows_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } + + //static bool first = true; + //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + //if (first) { + // first = false; + //} else { + // for (int k = 0; k < dst->ne[1]; ++k) { + // for (int j = 0; j < dst->ne[0]/16; ++j) { + // for (int i = 0; i < 16; ++i) { + // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + // } + // printf("\n"); + // } + // printf("\n"); + // } + // printf("\n"); + // exit(0); + //} +} + +template +static void ggml_compute_forward_set_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ne01; + + assert(ne0 == nc); + assert(ne2 == ne02); + assert(ne3 == ne03); + assert(src0->type == GGML_TYPE_F32); + assert(ne02 % ne11 == 0); + assert(ne03 % ne12 == 0); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = std::min(ir0 + dr, nr); + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(dst->type)->from_float; + + for (int64_t i03 = 0; i03 < ne03; ++i03) { + for (int64_t i02 = 0; i02 < ne02; ++i02) { + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i03 % ne12; + const int64_t i11 = i02 % ne11; + const int64_t i10 = i; + + const int64_t i1 = *(idx_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i1 >= 0 && i1 < ne1); + + from_float((const float *) ((char *) src0->data + i * nb01 + i02 * nb02 + i03 * nb03), + ((char *) dst->data + i1 * nb1 + i02 * nb2 + i03 * nb3), nc); + } + } + } +} + +void ggml_compute_forward_set_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + if (src1->type == GGML_TYPE_I64) { + ggml_compute_forward_set_rows_f32(params, dst); + } else if (src1->type == GGML_TYPE_I32) { + ggml_compute_forward_set_rows_f32(params, dst); + } else { + GGML_ABORT("src1->type = %d (%s) not supported", src1->type, ggml_type_name(src1->type)); + } + } + break; + default: + { + GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); + } + } +} + +// ggml_compute_forward_get_rows_back + +static void ggml_compute_forward_get_rows_back_f32_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_is_contiguous(dst)); + + // ggml_compute_forward_dup_same_cont(params, opt0, dst); + + memset(dst->data, 0, ggml_nbytes(dst)); + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t)); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + for (int j = 0; j < nc; ++j) { + ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i * src0->nb[1]))[j]; + ((float *) ((char *) dst->data + r * dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); + } + } +} + +static void ggml_compute_forward_get_rows_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_is_contiguous(dst)); + + // ggml_compute_forward_dup_same_cont(params, opt0, dst); + + memset(dst->data, 0, ggml_nbytes(dst)); + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + ggml_vec_add_f32(nc, (float *) ((char *) dst->data + r * dst->nb[1]), + (float *) ((char *) dst->data + r * dst->nb[1]), + (float *) ((char *) src0->data + i * src0->nb[1])); + } +} + +void ggml_compute_forward_get_rows_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_get_rows_back_f32_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_get_rows_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } + + //static bool first = true; + //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + //if (first) { + // first = false; + //} else { + // for (int k = 0; k < dst->ne[1]; ++k) { + // for (int j = 0; j < dst->ne[0]/16; ++j) { + // for (int i = 0; i < 16; ++i) { + // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + // } + // printf("\n"); + // } + // printf("\n"); + // } + // printf("\n"); + // exit(0); + //} +} + +// ggml_compute_forward_diag + +static void ggml_compute_forward_diag_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + // TODO: handle transposed/permuted matrices + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne00 == ne0); + GGML_ASSERT(ne00 == ne1); + GGML_ASSERT(ne01 == 1); + GGML_ASSERT(ne02 == ne2); + GGML_ASSERT(ne03 == ne3); + + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb0 == sizeof(float)); + + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = 0; i2 < ne2; i2++) { + for (int i1 = 0; i1 < ne1; i1++) { + float * d = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + float * s = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02); + for (int i0 = 0; i0 < i1; i0++) { + d[i0] = 0; + } + d[i1] = s[i1]; + for (int i0 = i1 + 1; i0 < ne0; i0++) { + d[i0] = 0; + } + } + } + } +} + +void ggml_compute_forward_diag(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_diag_mask_inf + +static void ggml_compute_forward_diag_mask_f32(const ggml_compute_params * params, + ggml_tensor * dst, + const float value) { + const ggml_tensor * src0 = dst->src[0]; + + const int ith = params->ith; + const int nth = params->nth; + + const int n_past = ((int32_t *) dst->op_params)[0]; + const bool inplace = src0->data == dst->data; + + GGML_ASSERT(n_past >= 0); + + if (!inplace) { + if (ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + // TODO: handle transposed/permuted matrices + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + const int nr = src0->ne[1]; + const int nz = n / nr; + + GGML_ASSERT(dst->nb[0] == sizeof(float)); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + for (int k = 0; k < nz; k++) { + for (int j = ith; j < nr; j += nth) { + for (int i = n_past; i < nc; i++) { + if (i > n_past + j) { + *(float *) ((char *) dst->data + k * dst->nb[2] + j * dst->nb[1] + i * dst->nb[0]) = value; + } + } + } + } +} + +void ggml_compute_forward_diag_mask_inf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +void ggml_compute_forward_diag_mask_zero(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_mask_f32(params, dst, 0); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_soft_max + +static void ggml_compute_forward_soft_max_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + assert(ggml_is_contiguous(dst)); + assert(ggml_are_same_shape(src0, dst)); + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int64_t nb11 = src1 ? src1->nb[1] : 1; + const int64_t nb12 = src1 ? src1->nb[2] : 1; + const int64_t nb13 = src1 ? src1->nb[3] : 1; + + const int64_t ne12 = src1 ? src1->ne[2] : 1; + const int64_t ne13 = src1 ? src1->ne[3] : 1; + + // TODO: is this supposed to be ceil instead of floor? + // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 + const uint32_t n_head = ne02; + const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); + + const float m0 = powf(2.0f, -(max_bias) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + float * wp = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); + + // sinks + const float * sk = src2 ? (float *) ((char *) src2->data) : nullptr; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const int64_t i11 = i01; + const int64_t i12 = i02 % ne12; + const int64_t i13 = i03 % ne13; + + // ALiBi + const uint32_t h = i02; // head + const float slope = + (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; + + float * sp = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * dp = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + // broadcast the mask across rows + ggml_fp16_t * mp_f16 = + src1 ? (ggml_fp16_t *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; + float * mp_f32 = src1 ? (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; + + ggml_vec_cpy_f32(ne00, wp, sp); + ggml_vec_scale_f32(ne00, wp, scale); + if (mp_f32) { + if (use_f16) { + for (int i = 0; i < ne00; ++i) { + wp[i] += slope * GGML_CPU_FP16_TO_FP32(mp_f16[i]); + } + } else { + for (int i = 0; i < ne00; ++i) { + wp[i] += slope * mp_f32[i]; + } + } + } + +#ifndef NDEBUG + for (int i = 0; i < ne00; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(wp[i])); + } +#endif + + float max = -INFINITY; + ggml_vec_max_f32(ne00, &max, wp); + + // if we have sinks, make a correction as if they were included in the softmax + if (sk) { + max = MAX(max, sk[i02]); + } + + ggml_float sum = ggml_vec_soft_max_f32(ne00, dp, wp, max); + assert(sum > 0.0); + + if (sk) { + sum += (ggml_float) expf(sk[i02] - max); + } + + sum = 1.0 / sum; + ggml_vec_scale_f32(ne00, dp, sum); + +#ifndef NDEBUG + for (int i = 0; i < ne00; ++i) { + assert(!isnan(dp[i])); + assert(!isinf(dp[i])); + } +#endif + } + } + } +} + +void ggml_compute_forward_soft_max(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_soft_max_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_soft_max_ext_back + +static void ggml_compute_forward_soft_max_ext_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(src1)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_are_same_shape(src1, dst)); + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); + + GGML_ASSERT(max_bias == 0.0f); + + // TODO: handle transposed/permuted matrices + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dy = (float *) ((char *) src0->data + i1 * src0->nb[1]); + float * y = (float *) ((char *) src1->data + i1 * src1->nb[1]); + float * dx = (float *) ((char *) dst->data + i1 * dst->nb[1]); + +#ifndef NDEBUG + for (int i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(dy[i])); + assert(!isnan(y[i])); + } +#endif + // Jii = yi - yi*yi + // Jij = -yi*yj + // J = diag(y)-y.T*y + // dx = J * dy + // dxk = sum_i(Jki * dyi) + // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk + // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk + // dxk = sum_i(-yk*yi * dyi) + yk*dyk + // dxk = -yk * sum_i(yi * dyi) + yk*dyk + // dxk = -yk * dot(y, dy) + yk*dyk + // dxk = yk * (- dot(y, dy) + dyk) + // dxk = yk * (dyk - dot(y, dy)) + // + // post-order: + // dot_y_dy := dot(y, dy) + // dx := dy + // dx := dx - dot_y_dy + // dx := dx * y + + // linear runtime, no additional memory + float dot_y_dy = 0; + ggml_vec_dot_f32(nc, &dot_y_dy, 0, y, 0, dy, 0, 1); + ggml_vec_cpy_f32(nc, dx, dy); + ggml_vec_acc1_f32(nc, dx, -dot_y_dy); + ggml_vec_mul_f32(nc, dx, dx, y); + ggml_vec_scale_f32(nc, dx, scale); + +#ifndef NDEBUG + for (int i = 0; i < nc; ++i) { + assert(!isnan(dx[i])); + assert(!isinf(dx[i])); + } +#endif + } +} + +void ggml_compute_forward_soft_max_ext_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_soft_max_ext_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_clamp + +static void ggml_compute_forward_clamp_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + float min; + float max; + memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + const size_t nb00 = src0->nb[0]; + const size_t nb01 = src0->nb[1]; + + const size_t nb0 = dst->nb[0]; + const size_t nb1 = dst->nb[1]; + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + for (int j = ith; j < n; j += nth) { + float * dst_ptr = (float *) ((char *) dst->data + j * nb1); + float * src0_ptr = (float *) ((char *) src0->data + j * nb01); + + for (int i = 0; i < nc; i++) { + dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min); + } + } +} + +static void ggml_compute_forward_clamp_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + float min; + float max; + memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + const size_t nb00 = src0->nb[0]; + const size_t nb01 = src0->nb[1]; + + const size_t nb0 = dst->nb[0]; + const size_t nb1 = dst->nb[1]; + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + for (int j = ith; j < n; j += nth) { + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j * nb01); + + for (int i = 0; i < nc; i++) { + float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); + } + } +} + +void ggml_compute_forward_clamp(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_clamp_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_clamp_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_Q8_K: + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_I64: + case GGML_TYPE_F64: + case GGML_TYPE_COUNT: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rope + +static float rope_yarn_ramp(const float low, const float high, const int i0) { + const float y = (i0 / 2 - low) / MAX(0.001f, high - low); + return 1 - MIN(1, MAX(0, y)); +} + +// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn +// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. +static void rope_yarn(float theta_extrap, + float freq_scale, + float corr_dims[2], + int64_t i0, + float ext_factor, + float mscale, + float * cos_theta, + float * sin_theta) { + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta = theta_interp; + if (ext_factor != 0.0f) { + float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; + theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + } + *cos_theta = cosf(theta) * mscale; + *sin_theta = sinf(theta) * mscale; +} + +static void ggml_rope_cache_init(float theta_base, + float freq_scale, + const float * freq_factors, + float corr_dims[2], + int64_t ne0, + float ext_factor, + float mscale, + float * cache, + float sin_sign, + float theta_scale) { + // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py + float theta = theta_base; + for (int64_t i0 = 0; i0 < ne0; i0 += 2) { + const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; + rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); + cache[i0 + 1] *= sin_sign; + + theta *= theta_scale; + } +} + +static void ggml_mrope_cache_init(float theta_base_t, + float theta_base_h, + float theta_base_w, + float theta_base_e, + int sections[4], + bool is_imrope, + bool indep_sects, + float freq_scale, + const float * freq_factors, + float corr_dims[2], + int64_t ne0, + float ext_factor, + float mscale, + float * cache, + float sin_sign, + float theta_scale) { + // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py + float theta_t = theta_base_t; + float theta_h = theta_base_h; + float theta_w = theta_base_w; + float theta_e = theta_base_e; // extra position id for vision encoder + int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; + int sec_w = sections[1] + sections[0]; + int sec_e = sections[2] + sec_w; + GGML_ASSERT(sect_dims <= ne0); + + for (int64_t i0 = 0; i0 < ne0; i0 += 2) { + const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; + + int sector = (i0 / 2) % sect_dims; + if (indep_sects) { + // compute theta independently for each dim sections + // (i.e. reset corresponding theta when `i0` go from one section to another) + if (sector == 0) { + theta_t = theta_base_t; + } else if (sector == sections[0]) { + theta_h = theta_base_h; + ; + } else if (sector == sec_w) { + theta_w = theta_base_w; + } else if (sector == sec_e) { + theta_e = theta_base_e; + } + } + + float theta = theta_t; + if (is_imrope) { // qwen3vl apply interleaved mrope + if (sector % 3 == 1 && sector < 3 * sections[1]) { + theta = theta_h; + } else if (sector % 3 == 2 && sector < 3 * sections[2]) { + theta = theta_w; + } else if (sector % 3 == 0 && sector < 3 * sections[0]) { + theta = theta_t; + } else { + theta = theta_e; + } + } else { + if (sector >= sections[0] && sector < sec_w) { + theta = theta_h; + } else if (sector >= sec_w && sector < sec_w + sections[2]) { + theta = theta_w; + } else if (sector >= sec_w + sections[2]) { + theta = theta_e; + } + } + + rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); + cache[i0 + 1] *= sin_sign; + + theta_t *= theta_scale; + theta_w *= theta_scale; + theta_h *= theta_scale; + theta_e *= theta_scale; + } +} + +template +static void rotate_pairs(const int64_t n, + const int64_t n_offset, + const float * cache, + const T * src_data, + T * dst_data, + const int scale = 2) { + for (int64_t i0 = 0; i0 < n; i0 += 2) { + const int64_t ic = + i0 / scale; // hack for GGML_ROPE_TYPE_NORMAL, where we need ic = i0; for all other cases, ic = i0/2 + + const float cos_theta = cache[i0 + 0]; + const float sin_theta = cache[i0 + 1]; + + const T * const src = src_data + ic; + T * dst = dst_data + ic; + + const float x0 = type_conversion_table::to_f32(src[0]); + const float x1 = type_conversion_table::to_f32(src[n_offset]); + + dst[0] = type_conversion_table::from_f32(x0 * cos_theta - x1 * sin_theta); + dst[n_offset] = type_conversion_table::from_f32(x0 * sin_theta + x1 * cos_theta); + } +} + +template //float or ggml_fp16_t +static void ggml_compute_forward_rope_flt(const ggml_compute_params * params, ggml_tensor * dst, const bool forward) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_I32); + + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + int sections[4]; + + //const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; + //const int n_ctx = ((int32_t *) dst->op_params)[3]; + const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int) * 4); + + GGML_TENSOR_UNARY_OP_LOCALS + + //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); + //printf("n_past = %d, ne2 = %d\n", n_past, ne2); + + GGML_ASSERT(nb0 == nb00); + GGML_ASSERT(nb0 == sizeof(T)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(dst); + + GGML_ASSERT(n_dims <= ne0); + GGML_ASSERT(n_dims % 2 == 0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // row index used to determine which thread to use + int ir = 0; + + const float theta_scale = powf(freq_base, -2.0f / n_dims); + + float corr_dims[2]; + ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); + + const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope + const bool mrope_used = + mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, note: also true for vision (24 & 8 == true) and for imrope + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + + if (mrope_used) { + GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); + } + + if (is_vision) { + GGML_ASSERT(n_dims == ne0 / 2); + } + + const float * freq_factors = NULL; + if (src2 != NULL) { + GGML_ASSERT(src2->type == GGML_TYPE_F32); + GGML_ASSERT(src2->ne[0] >= n_dims / 2); + freq_factors = (const float *) src2->data; + } + + // backward process uses inverse rotation by cos and sin. + // cos and sin build a rotation matrix, where the inverse is the transpose. + // this essentially just switches the sign of sin. + const float sin_sign = forward ? 1.0f : -1.0f; + + const int32_t * pos = (const int32_t *) src1->data; + + for (int64_t i3 = 0; i3 < ne3; i3++) { // batch + for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len + + float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + if (!mrope_used) { + const int64_t p = pos[i2]; + ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, + sin_sign, theta_scale); + } else { + const int64_t p_t = pos[i2]; + const int64_t p_h = pos[i2 + ne2]; + const int64_t p_w = pos[i2 + ne2 * 2]; + const int64_t p_e = pos[i2 + ne2 * 3]; + ggml_mrope_cache_init(p_t, p_h, p_w, p_e, sections, is_imrope, is_vision, freq_scale, freq_factors, + corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + } + + for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads + if (ir++ < ir0) { + continue; + } + if (ir > ir1) { + break; + } + + T * src = (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + + switch (mode) { + case GGML_ROPE_TYPE_NORMAL: + rotate_pairs(n_dims, 1, cache, src, dst_data, 1); + break; + case GGML_ROPE_TYPE_NEOX: + case GGML_ROPE_TYPE_MROPE: + case GGML_ROPE_TYPE_IMROPE: + rotate_pairs(n_dims, n_dims / 2, cache, src, dst_data); + break; + case GGML_ROPE_TYPE_VISION: + rotate_pairs(ne0, n_dims, cache, src, dst_data); + break; + default: + GGML_ABORT("rope type not supported"); + } + + if (!is_vision) { + // fill the remain channels with data from src tensor + for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { + const T * const src = + (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + i0 * nb00); + T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } + } + } //attn-heads + } + } +} + +void ggml_compute_forward_rope(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_rope_flt(params, dst, true); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_rope_flt(params, dst, true); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rope_back + +void ggml_compute_forward_rope_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_rope_flt(params, dst, false); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_rope_flt(params, dst, false); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_conv_transpose_1d + +static void ggml_compute_forward_conv_transpose_1d_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i02 * nb02 + i01 * nb01); + ggml_fp16_t * dst_data = wdata + i01 * ne00 * ne02; + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i00 * ne02 + i02] = src[i00]; + } + } + } + } + + // permute source data (src1) from (L x Cin) to (Cin x L) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; + ggml_fp16_t * dst_data = wdata; + + for (int64_t i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i11 * nb11); + for (int64_t i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); + } + } + } + + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + + // total rows in dst + const int nr = ne1; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_fp16_t * const wdata_src = wdata + nk; + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *) ((char *) dst->data + i1 * nb1); + ggml_fp16_t * wdata_kernel = wdata + i1 * ne02 * ne00; + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i10 * ne11; + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f16(ne02, &v, 0, (ggml_fp16_t *) wdata_src + i1n, 0, + (ggml_fp16_t *) wdata_kernel + i00 * ne02, 0, 1); + dst_data[i10 * s0 + i00] += v; + } + } + } +} + +static void ggml_compute_forward_conv_transpose_1d_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02; + + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) + { + float * const wdata = (float *) params->wdata + 0; + + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * const src = (float *) ((char *) src0->data + i02 * nb02 + i01 * nb01); + float * dst_data = wdata + i01 * ne00 * ne02; + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i00 * ne02 + i02] = src[i00]; + } + } + } + } + + // prepare source data (src1) + { + float * const wdata = (float *) params->wdata + nk; + float * dst_data = wdata; + + for (int64_t i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i11 * nb11); + for (int64_t i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne11 + i11] = src[i10]; + } + } + } + + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + + // total rows in dst + const int nr = ne1; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * const wdata = (float *) params->wdata + 0; + float * const wdata_src = wdata + nk; + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *) ((char *) dst->data + i1 * nb1); + float * wdata_kernel = wdata + i1 * ne02 * ne00; + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i10 * ne11; + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f32(ne02, &v, 0, wdata_src + i1n, 0, wdata_kernel + i00 * ne02, 0, 1); + dst_data[i10 * s0 + i00] += v; + } + } + } +} + +void ggml_compute_forward_conv_transpose_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_conv_transpose_1d_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_im2col_f32 +// src0: kernel [OC, IC, KH, KW] +// src1: image [N, IC, IH, IW] +// dst: result [N, OH, OW, IC*KH*KW] +static void ggml_compute_forward_im2col_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne13 : ne12; + const int64_t IC = is_2D ? ne12 : ne11; + const int64_t IH = is_2D ? ne11 : 1; + const int64_t IW = ne10; + + const int64_t KH = is_2D ? ne01 : 1; + const int64_t KW = ne00; + + const int64_t OH = is_2D ? ne2 : 1; + const int64_t OW = ne1; + + int ofs0 = is_2D ? nb13 : nb12; + int ofs1 = is_2D ? nb12 : nb11; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + float * dst_data = wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + const float * const src_data = + (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] + + for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; + } else { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = (src_data[iih * IW + iiw]); + } + } + } + } + } + } + } + } +} + +// ggml_compute_forward_im2col_f16 +// src0: kernel [OC, IC, KH, KW] +// src1: image [N, IC, IH, IW] +// dst: result [N, OH, OW, IC*KH*KW] +static void ggml_compute_forward_im2col_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne13 : ne12; + const int64_t IC = is_2D ? ne12 : ne11; + const int64_t IH = is_2D ? ne11 : 1; + const int64_t IW = ne10; + + const int64_t KH = is_2D ? ne01 : 1; + const int64_t KW = ne00; + + const int64_t OH = is_2D ? ne2 : 1; + const int64_t OW = ne1; + + int ofs0 = is_2D ? nb13 : nb12; + int ofs1 = is_2D ? nb12 : nb11; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + ggml_fp16_t * dst_data = + wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + const float * const src_data = + (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] + + for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; + } else { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = + GGML_CPU_FP32_TO_FP16(src_data[iih * IW + iiw]); + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_im2col(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_im2col_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_im2col_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_im2col_back_f32 + +void ggml_compute_forward_im2col_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output + const ggml_tensor * src1 = dst->src[1]; // convolution kernel + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne3 : ne2; + const int64_t IC = is_2D ? ne2 : ne1; + const int64_t IH = is_2D ? ne1 : 1; + const int64_t IW = ne0; + + const int64_t KH = is_2D ? ne11 : 1; + const int64_t KW = ne10; + + const int64_t OH = is_2D ? ne02 : 1; + const int64_t OW = ne01; + + int ofs0 = is_2D ? nb3 : nb2; + int ofs1 = is_2D ? nb2 : nb1; + + GGML_ASSERT(nb0 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + for (int64_t iih = 0; iih < IH; iih++) { + for (int64_t iiw = 0; iiw < IW; iiw++) { + // micro kernel + float grad = 0.0f; + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + // For s0 > 1 some values were skipped over in the forward pass. + // These values have tmpw % s0 != 0 and need to be skipped in the backwards pass as well. + const int64_t tmpw = (iiw + p0 - ikw * d0); + if (tmpw % s0 != 0) { + continue; + } + const int64_t iow = tmpw / s0; + + // Equivalent logic as above except for s1. + int64_t ioh; + if (is_2D) { + const int64_t tmph = iih + p1 - ikh * d1; + + if (tmph % s1 != 0) { + continue; + } + + ioh = tmph / s1; + } else { + ioh = 0; + } + + if (iow < 0 || iow >= OW || ioh < 0 || ioh >= OH) { + continue; + } + + const float * const grad_in = + (const float *) src0->data + + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + grad += grad_in[iic * (KH * KW) + ikh * KW + ikw]; + } + } + float * dst_data = (float *) ((char *) wdata + (in * ofs0 + iic * ofs1)); // [IH, IW] + dst_data[iih * IW + iiw] = grad; + } + } + } + } + } +} + +// ggml_compute_forward_im2col_3d_f16 +// src0: kernel [OC*IC, KD, KH, KW] +// src1: image [N*IC, ID, IH, IW] +// dst: result [N*OD, OH, OW, IC * KD * KH * KW] +static void ggml_compute_forward_im2col_3d_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; + const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; + const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; + const int32_t IC = ((const int32_t *) (dst->op_params))[9]; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = ne13 / IC; + const int64_t ID = ne12; + const int64_t IH = ne11; + const int64_t IW = ne10; + + const int64_t OC = ne03 / IC; + GGML_UNUSED(OC); + const int64_t KD = ne02; + const int64_t KH = ne01; + const int64_t KW = ne00; + + const int64_t OD = ne3 / N; + const int64_t OH = ne2; + const int64_t OW = ne1; + const int64_t OH_OW = OH * OW; + const int64_t KD_KH_KW = KD * KH * KW; + const int64_t KH_KW = KH * KW; + const int64_t IC_KD_KH_KW = IC * KD * KH * KW; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iod = 0; iod < OD; iod++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + ggml_fp16_t * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * + IC_KD_KH_KW; // [IC, KD, KH, KW] + const float * const src_data = + (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] + + for (int64_t ikd = 0; ikd < KD; ikd++) { + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + const int64_t iid = iod * s2 + ikd * d2 - p2; + + if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || + iid < 0 || iid >= ID) { + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; + } else { + const float * const s = + (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + + iiw * nb10); // [ID, IH, IW] + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = + GGML_CPU_FP32_TO_FP16(*s); + } + } + } + } + } + } + } + } + } + } +} + +// ggml_compute_forward_im2col_3d_f32 +// src0: kernel [OC*IC, KD, KH, KW] +// src1: image [N*IC, ID, IH, IW] +// dst: result [N*OD, OH, OW, IC * KD * KH * KW] +static void ggml_compute_forward_im2col_3d_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; + const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; + const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; + const int32_t IC = ((const int32_t *) (dst->op_params))[9]; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = ne13 / IC; + const int64_t ID = ne12; + const int64_t IH = ne11; + const int64_t IW = ne10; + + const int64_t OC = ne03 / IC; + GGML_UNUSED(OC); + const int64_t KD = ne02; + const int64_t KH = ne01; + const int64_t KW = ne00; + + const int64_t OD = ne3 / N; + const int64_t OH = ne2; + const int64_t OW = ne1; + + const int64_t OH_OW = OH * OW; + const int64_t KD_KH_KW = KD * KH * KW; + const int64_t KH_KW = KH * KW; + const int64_t IC_KD_KH_KW = IC * KD * KH * KW; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iod = 0; iod < OD; iod++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + float * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * + IC_KD_KH_KW; // [IC, KD, KH, KW] + const float * const src_data = + (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] + + for (int64_t ikd = 0; ikd < KD; ikd++) { + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + const int64_t iid = iod * s2 + ikd * d2 - p2; + + if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || + iid < 0 || iid >= ID) { + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; + } else { + const float * const s = + (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + + iiw * nb10); // [ID, IH, IW] + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = *s; + } + } + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_im2col_3d(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_im2col_3d_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_im2col_3d_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_call_mul_mat(ggml_type type, + const ggml_compute_params * params, + int64_t m, + int64_t n, + int64_t k, + void * a, + void * b, + float * c) { + const ggml_type_traits * traits = ggml_get_type_traits(type); + struct ggml_tensor src1 = {}; + src1.type = type; + src1.ne[0] = k; + src1.ne[1] = m; + src1.ne[2] = 1; + src1.ne[3] = 1; + src1.nb[0] = traits->type_size; + src1.nb[1] = k * traits->type_size; + src1.nb[2] = src1.nb[1]; + src1.nb[3] = src1.nb[2]; + src1.data = a; + + struct ggml_tensor src0 = {}; + src0.type = type; + src0.ne[0] = k; + src0.ne[1] = n; + src0.ne[2] = 1; + src0.ne[3] = 1; + src0.nb[0] = traits->type_size; + src0.nb[1] = k * traits->type_size; + src0.nb[2] = src0.nb[1]; + src0.nb[3] = src0.nb[2]; + src0.data = b; + + struct ggml_tensor dst = {}; + dst.ne[0] = n; + dst.ne[1] = m; + dst.ne[2] = 1; + dst.ne[3] = 1; + dst.nb[0] = sizeof(float); + dst.nb[1] = n * sizeof(float); + dst.nb[2] = dst.nb[1]; + dst.nb[3] = dst.nb[2]; + dst.data = c; + dst.src[0] = &src0; + dst.src[1] = &src1; + + ggml_compute_forward_mul_mat(params, &dst); +} + +static inline int64_t ggml_wrap_around(int64_t coord, int64_t size) { + return (coord + size) % size; // adding size avoids negative number weirdness +} + +// ggml_compute_forward_conv_2d + +static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, + const ggml_tensor * kernel, // [KW, KH, IC, OC] + const ggml_tensor * src, // [W, H, C, N] + ggml_tensor * dst, // [OW, OH, OC, N] + ggml_type kernel_type) { + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); + GGML_ASSERT(kernel->type == kernel_type); + + const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); + + const int32_t stride_x = dst->op_params[0]; + const int32_t stride_y = dst->op_params[1]; + const int32_t pad_x = dst->op_params[2]; + const int32_t pad_y = dst->op_params[3]; + const int32_t dilation_x = dst->op_params[4]; + const int32_t dilation_y = dst->op_params[5]; + + const int64_t c_in = src->ne[2]; + const int64_t c_out = kernel->ne[3]; + GGML_ASSERT(c_in == kernel->ne[2]); + + const int64_t src_w = src->ne[0]; + const int64_t src_h = src->ne[1]; + const int64_t knl_w = kernel->ne[0]; + const int64_t knl_h = kernel->ne[1]; + const int64_t dst_w = dst->ne[0]; + const int64_t dst_h = dst->ne[1]; + + const float * src_data = (float *) src->data; + void * knl_data = kernel->data; + float * dst_data = (float *) dst->data; + + const int64_t knl_n = knl_w * knl_h * c_in; + const int64_t patch_total = dst->ne[3] * dst_w * dst_h; + + const int64_t space_per_patch = knl_n * traits->type_size + c_out * sizeof(float); + const int64_t batch_size = params->wsize / space_per_patch; + const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; + const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; + + GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); + + void * tmp = params->wdata; + + for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { + const int64_t patch_start_batch = batch_i * patches_per_batch; + const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); + const int64_t patch_n = patch_end_batch - patch_start_batch; + + const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth; + const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; + const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); + + //im2col for a patch + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t src_x = (p / dst_w) % dst_h; + const int64_t src_y = p % dst_w; + + const float * src_base = (const float *) ((const char *) src_data + batch_n * src->nb[3]); + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; + + for (int64_t ic = 0; ic < c_in; ++ic) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; + const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; + + int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; + + float src_val; + if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const float * src_ptr = (const float *) ((const char *) src_base + sx * src->nb[0] + + sy * src->nb[1] + ic * src->nb[2]); + src_val = *src_ptr; + } + + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } + } + } + } + } // patches handled by this thread + + ggml_barrier(params->threadpool); + + float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n * traits->type_size); + + GGML_ASSERT(gemm_output + patch_n * c_out <= (float *) tmp + params->wsize); + + // GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out] + ggml_call_mul_mat(kernel_type, params, patch_n, c_out, knl_n, tmp, knl_data, gemm_output); + + ggml_barrier(params->threadpool); + + //permute back [OC, N, OH, OW] to [N, OC, OH, OW] + const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth; + const int64_t permute_start = params->ith * permute_per_thread; + const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n); + + for (int64_t i = permute_start; i < permute_end; ++i) { + const int64_t p = patch_start_batch + i; + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t dst_y = (p / dst_w) % dst_h; + const int64_t dst_x = p % dst_w; + + for (int64_t oc = 0; oc < c_out; ++oc) { + const float value = gemm_output[i * c_out + oc]; + float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + + oc * dst->nb[2] + batch_n * dst->nb[3]); + *dst_ptr = value; + } + } + } +} + +void ggml_compute_forward_conv_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + ggml_compute_forward_conv_2d_impl(params, src0, src1, dst, src0->type); +} + +// ggml_compute_forward_conv_3d + +static void ggml_compute_forward_conv_3d_impl(const ggml_compute_params * params, + const ggml_tensor * kernel, + const ggml_tensor * src, + ggml_tensor * dst, + ggml_type kernel_type) { + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); + GGML_ASSERT(kernel->type == kernel_type); + + const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); + + const int32_t s0 = dst->op_params[0]; + const int32_t s1 = dst->op_params[1]; + const int32_t s2 = dst->op_params[2]; + const int32_t p0 = dst->op_params[3]; + const int32_t p1 = dst->op_params[4]; + const int32_t p2 = dst->op_params[5]; + const int32_t d0 = dst->op_params[6]; + const int32_t d1 = dst->op_params[7]; + const int32_t d2 = dst->op_params[8]; + const int32_t c = dst->op_params[9]; + const int32_t n = dst->op_params[10]; + const int32_t oc = dst->op_params[11]; + + const int64_t src_w = src->ne[0]; + const int64_t src_h = src->ne[1]; + const int64_t src_d = src->ne[2]; + const int64_t knl_w = kernel->ne[0]; + const int64_t knl_h = kernel->ne[1]; + const int64_t knl_d = kernel->ne[2]; + const int64_t dst_w = dst->ne[0]; + const int64_t dst_h = dst->ne[1]; + const int64_t dst_d = dst->ne[2]; + + const float * src_data = (float *) src->data; + void * knl_data = kernel->data; + float * dst_data = (float *) dst->data; + + const int64_t knl_n_per_channel = knl_w * knl_h * knl_d; + const int64_t knl_n_total = knl_n_per_channel * c; + const int64_t patch_total = n * dst_w * dst_h * dst_d; + + const int64_t space_per_patch = knl_n_total * traits->type_size + oc * sizeof(float); + const int64_t batch_size = params->wsize / space_per_patch; + const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; + const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; + + GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); + + void * tmp = params->wdata; + + for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { + const int64_t patch_start_batch = batch_i * patches_per_batch; + const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); + const int64_t patch_n_in_batch = patch_end_batch - patch_start_batch; + + const int64_t patch_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; + const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; + const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); + + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); + const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); + const int64_t batch_idx = p / (dst_w * dst_h * dst_d); + const int64_t dst_z = p_in_batch / (dst_w * dst_h); + const int64_t dst_y = p_in_depth / dst_w; + const int64_t dst_x = p_in_depth % dst_w; + + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n_total * traits->type_size; + + for (int64_t ic = 0; ic < c; ++ic) { + for (int64_t kz = 0; kz < knl_d; ++kz) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sz = dst_z * s2 + kz * d2 - p2; + const int64_t sy = dst_y * s1 + ky * d1 - p1; + const int64_t sx = dst_x * s0 + kx * d0 - p0; + + int64_t dst_idx = ic * knl_n_per_channel + kz * (knl_h * knl_w) + ky * knl_w + kx; + + float src_val; + if (sz < 0 || sz >= src_d || sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const int64_t cn_idx = batch_idx * c + ic; + const float * src_ptr = + (const float *) ((const char *) src_data + sx * src->nb[0] + sy * src->nb[1] + + sz * src->nb[2] + cn_idx * src->nb[3]); + src_val = *src_ptr; + } + + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } + } + } + } + } + } + + ggml_barrier(params->threadpool); + + float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n_total * traits->type_size); + ggml_call_mul_mat(kernel_type, params, patch_n_in_batch, oc, knl_n_total, tmp, knl_data, gemm_output); + + ggml_barrier(params->threadpool); + + const int64_t permute_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; + const int64_t permute_start = params->ith * permute_per_thread; + const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n_in_batch); + + for (int64_t i = permute_start; i < permute_end; ++i) { + const int64_t p = patch_start_batch + i; + const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); + const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); + const int64_t batch_idx = p / (dst_w * dst_h * dst_d); + const int64_t dst_z = p_in_batch / (dst_w * dst_h); + const int64_t dst_y = p_in_depth / dst_w; + const int64_t dst_x = p_in_depth % dst_w; + + for (int64_t ioc = 0; ioc < oc; ++ioc) { + const float value = gemm_output[i * oc + ioc]; + const int64_t ocn_idx = batch_idx * oc + ioc; + float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + + dst_z * dst->nb[2] + ocn_idx * dst->nb[3]); + *dst_ptr = value; + } + } + } +} + +void ggml_compute_forward_conv_3d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + ggml_compute_forward_conv_3d_impl(params, src0, src1, dst, src0->type); +} + +// ggml_compute_forward_conv_transpose_2d + +void ggml_compute_forward_conv_transpose_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02 * ne03; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i03 * nb03 + i02 * nb02); + ggml_fp16_t * dst_data = wdata + i02 * ne01 * ne00 * ne03; + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i01 * ne00 * ne03 + i00 * ne03 + i03] = src[i01 * ne00 + i00]; + } + } + } + } + } + + // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; + for (int i12 = 0; i12 < ne12; i12++) { + for (int i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i12 * nb12 + i11 * nb11); + ggml_fp16_t * dst_data = wdata + i11 * ne10 * ne12; + for (int i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); + } + } + } + } + + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t stride = ggml_get_op_params_i32(dst, 0); + + // total patches in dst + const int np = ne2; + + // patches per thread + const int dp = (np + nth - 1) / nth; + + // patch range for this thread + const int ip0 = dp * ith; + const int ip1 = MIN(ip0 + dp, np); + + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_fp16_t * const wdata_src = wdata + nk; + + for (int i2 = ip0; i2 < ip1; i2++) { // Cout + float * dst_data = (float *) ((char *) dst->data + i2 * nb2); + ggml_fp16_t * wdata_kernel = wdata + i2 * ne01 * ne00 * ne03; + for (int i11 = 0; i11 < ne11; i11++) { + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i11 * ne10 * ne12 + i10 * ne12; + for (int i01 = 0; i01 < ne01; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f16(ne03, &v, 0, wdata_src + i1n, 0, wdata_kernel + i01 * ne00 * ne03 + i00 * ne03, + 0, 1); + dst_data[(i11 * stride + i01) * ne0 + i10 * stride + i00] += v; + } + } + } + } + } +} + +// ggml_compute_forward_conv_2d_dw + +struct ggml_conv_2d_dw_params { + int64_t channels; + int64_t batch; + int64_t src_w; + int64_t src_h; + int64_t dst_w; + int64_t dst_h; + int64_t knl_w; + int64_t knl_h; + int stride_x; + int stride_y; + int pad_x; + int pad_y; + int dilation_x; + int dilation_y; +}; + +static void ggml_compute_forward_conv_2d_dw_cwhn(const ggml_compute_params * params, + const ggml_tensor * src, + const ggml_tensor * kernel, + ggml_tensor * dst, + const ggml_conv_2d_dw_params & p) { + const int64_t c = p.channels; + const float * knl_data = (const float *) kernel->data; + + const int64_t rows_total = p.dst_h * p.batch; + const int64_t rows_per_thread = (rows_total + params->nth - 1) / params->nth; + const int64_t row_start = params->ith * rows_per_thread; + const int64_t row_end = MIN(row_start + rows_per_thread, rows_total); + +#ifdef GGML_SIMD +# if defined(__ARM_FEATURE_SVE) + const int64_t pkg_size = svcntw(); +# else + const int64_t pkg_size = GGML_F32_EPR; +# endif + const int64_t pkg_count = c / pkg_size; + const int64_t c_pkg_end = pkg_count * pkg_size; +#else + const int64_t c_pkg_end = 0; +#endif + + for (int64_t row = row_start; row < row_end; ++row) { + const int64_t dst_y = row % p.dst_h; + const float * src_data = (const float *) src->data + (row / p.dst_h) * p.src_w * p.src_h * c; + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float * dst_data = (float *) dst->data + (row * p.dst_w + dst_x) * c; + const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; + const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; + +#ifdef GGML_SIMD + // Vectorized loop + for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); + GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); + sum = GGML_F32_VEC_FMA(sum, k, s); + } + } + GGML_F32_VEC_STORE(dst_data + c_i, sum); + } +#endif + // Scalar loop + for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] * + src_data[(src_y * p.src_w + src_x) * c + c_i]; + } + } + dst_data[c_i] = sum; + } + } + } +} + +static void ggml_compute_forward_conv_2d_dw_whcn(const ggml_compute_params * params, + const ggml_tensor * src, + const ggml_tensor * kernel, + ggml_tensor * dst, + const ggml_conv_2d_dw_params & p) { + const int64_t n = p.channels * p.batch; + const int64_t per_thread = (n + params->nth - 1) / params->nth; + const int64_t start = params->ith * per_thread; + const int64_t end = MIN(start + per_thread, n); + + for (int64_t i = start; i < end; ++i) { + const float * knl_data = (const float *) kernel->data + (i % p.channels) * p.knl_w * p.knl_h; + const float * src_data = (const float *) src->data + i * p.src_w * p.src_h; + float * dst_data = (float *) dst->data + i * p.dst_w * p.dst_h; + + for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + sum += knl_data[knl_y * p.knl_w + knl_x] * src_data[src_y * p.src_w + src_x]; + } + } + dst_data[dst_y * p.dst_w + dst_x] = sum; + } + } + } +} + +void ggml_compute_forward_conv_2d_dw(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * kernel = dst->src[0]; + const ggml_tensor * src = dst->src[1]; + ggml_conv_2d_dw_params p; + p.channels = src->ne[2]; + p.batch = src->ne[3]; + p.src_w = src->ne[0]; + p.src_h = src->ne[1]; + p.dst_w = dst->ne[0]; + p.dst_h = dst->ne[1]; + p.knl_w = kernel->ne[0]; + p.knl_h = kernel->ne[1]; + p.stride_x = dst->op_params[0]; + p.stride_y = dst->op_params[1]; + p.pad_x = dst->op_params[2]; + p.pad_y = dst->op_params[3]; + p.dilation_x = dst->op_params[4]; + p.dilation_y = dst->op_params[5]; + + GGML_ASSERT(kernel->ne[3] == p.channels); + GGML_ASSERT(dst->ne[3] == p.batch); + + if (ggml_is_contiguous(src)) { + ggml_compute_forward_conv_2d_dw_whcn(params, src, kernel, dst, p); + } else if (ggml_is_contiguous_channels(src)) { + // kernel should also have channels most contiguous in memory + GGML_ASSERT(kernel->nb[0] >= kernel->nb[2] && kernel->nb[1] >= kernel->nb[0]); + ggml_compute_forward_conv_2d_dw_cwhn(params, src, kernel, dst, p); + } else { + GGML_ABORT("non-contiguous memory layout not supported"); + } +} + +// ggml_compute_forward_pool_1d_sk_p0 + +static void ggml_compute_forward_pool_1d_sk_p0(const ggml_compute_params * params, + const ggml_op_pool op, + const int k, + ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + + assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const char * cdata = (const char *) src->data; + const char * const data_end = cdata + ggml_nbytes(src); + float * drow = (float *) dst->data; + + const int64_t rs = dst->ne[0]; + + while (cdata < data_end) { + const void * srow = (const void *) cdata; + int j = 0; + for (int64_t i = 0; i < rs; ++i) { + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] = 0; + break; + case GGML_OP_POOL_MAX: + drow[i] = -FLT_MAX; + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + for (int ki = 0; ki < k; ++ki) { + const float srow_j = (src->type == GGML_TYPE_F32) ? + ((const float *) srow)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] += srow_j; + break; + case GGML_OP_POOL_MAX: + if (srow_j > drow[i]) { + drow[i] = srow_j; + } + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + ++j; + } + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] /= k; + break; + case GGML_OP_POOL_MAX: + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + + cdata += src->nb[1]; + drow += rs; + } +} + +// ggml_compute_forward_pool_1d + +void ggml_compute_forward_pool_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int s0 = opts[2]; + const int p0 = opts[3]; + GGML_ASSERT(p0 == 0); // padding not supported + GGML_ASSERT(k0 == s0); // only s = k supported + + ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst); +} + +// ggml_compute_forward_pool_2d + +void ggml_compute_forward_pool_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + + assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; + const char * cdata = (const char *) src->data; + const char * const data_end = cdata + ggml_nbytes(src); + + const int64_t px = dst->ne[0]; + const int64_t py = dst->ne[1]; + const int64_t pa = px * py; + + float * dplane = (float *) dst->data; + + const int ka = k0 * k1; + const int offset0 = -p0; + const int offset1 = -p1; + + while (cdata < data_end) { + for (int oy = 0; oy < py; ++oy) { + float * const drow = dplane + oy * px; + for (int ox = 0; ox < px; ++ox) { + float * const out = drow + ox; + switch (op) { + case GGML_OP_POOL_AVG: + *out = 0; + break; + case GGML_OP_POOL_MAX: + *out = -FLT_MAX; + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + + const int ix = offset0 + ox * s0; + const int iy = offset1 + oy * s1; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= src->ne[1]) { + continue; + } + const void * srow = (const void *) (cdata + src->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= src->ne[0]) { + continue; + } + const float srow_j = (src->type == GGML_TYPE_F32) ? + ((const float *) srow)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); + switch (op) { + case GGML_OP_POOL_AVG: + *out += srow_j; + break; + case GGML_OP_POOL_MAX: + if (srow_j > *out) { + *out = srow_j; + } + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + } + switch (op) { + case GGML_OP_POOL_AVG: + *out /= ka; + break; + case GGML_OP_POOL_MAX: + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + } + + cdata += src->nb[2]; + dplane += pa; + } +} + +// ggml_compute_forward_pool_2d_back + +void ggml_compute_forward_pool_2d_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + const ggml_tensor * dstf = dst->src[1]; // forward tensor of dst + + assert(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; + + char * cdata = (char *) dst->data; + const char * cdataf = (const char *) dstf->data; + const char * const data_end = cdata + ggml_nbytes(dst); + + GGML_ASSERT(params->ith == 0); + memset(cdata, 0, ggml_nbytes(dst)); + + const int64_t px = src->ne[0]; + const int64_t py = src->ne[1]; + const int64_t pa = px * py; + + const float * splane = (const float *) src->data; + + const int ka = k0 * k1; + const int offset0 = -p0; + const int offset1 = -p1; + + while (cdata < data_end) { + for (int oy = 0; oy < py; ++oy) { + const float * const srow = splane + oy * px; + for (int ox = 0; ox < px; ++ox) { + const float grad0 = srow[ox]; + + const int ix = offset0 + ox * s0; + const int iy = offset1 + oy * s1; + + if (op == GGML_OP_POOL_MAX) { + float maxval = -FLT_MAX; + int kxmax = -1; + int kymax = -1; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= dst->ne[1]) { + continue; + } + const void * drowf = (const void *) (cdataf + dst->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= dst->ne[0]) { + continue; + } + + const float val = dst->type == GGML_TYPE_F32 ? + ((const float *) drowf)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); + if (val <= maxval) { + continue; + } + + maxval = val; + kxmax = kx; + kymax = ky; + } + } + + if (kxmax == -1 || kymax == -1) { + continue; + } + + void * drow = (void *) (cdata + dst->nb[1] * (iy + kymax)); + const int j = ix + kxmax; + if (dst->type == GGML_TYPE_F32) { + ((float *) drow)[j] += grad0; + } else { + ((ggml_fp16_t *) drow)[j] = + GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); + } + } else if (op == GGML_OP_POOL_AVG) { + const float grad = grad0 / ka; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= dst->ne[1]) { + continue; + } + void * drow = (void *) (cdata + dst->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= dst->ne[0]) { + continue; + } + + if (dst->type == GGML_TYPE_F32) { + ((float *) drow)[j] += grad; + } else { + ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); + } + } + } + } else { + GGML_ASSERT(false); + } + } + } + + cdata += dst->nb[2]; + cdataf += dst->nb[2]; + splane += pa; + } +} + +// ggml_compute_forward_upscale + +static void ggml_compute_forward_upscale_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float sf0 = (float) ne0 / src0->ne[0]; + float sf1 = (float) ne1 / src0->ne[1]; + float sf2 = (float) ne2 / src0->ne[2]; + float sf3 = (float) ne3 / src0->ne[3]; + float pixel_offset = 0.5f; + + const int32_t mode_flags = ggml_get_op_params_i32(dst, 0); + const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); + + if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { + pixel_offset = 0.0f; + sf0 = ne0 > 1 && ne00 > 1 ? (float) (ne0 - 1) / (ne00 - 1) : sf0; + sf1 = ne1 > 1 && ne01 > 1 ? (float) (ne1 - 1) / (ne01 - 1) : sf1; + } + + if (mode == GGML_SCALE_MODE_NEAREST) { + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const int64_t i01 = i1 / sf1; + for (int64_t i0 = 0; i0 < ne0; i0++) { + const int64_t i00 = i0 / sf0; + + const float * x = + (float *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } + } else if (mode == GGML_SCALE_MODE_BILINEAR) { + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; + int64_t y0 = (int64_t) floorf(y); + int64_t y1 = y0 + 1; + + y0 = std::max(int64_t(0), std::min(y0, ne01 - 1)); + y1 = std::max(int64_t(0), std::min(y1, ne01 - 1)); + + float dy = y - (float) y0; + dy = std::max(0.0f, std::min(dy, 1.0f)); + + for (int64_t i0 = 0; i0 < ne0; i0++) { + const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; + int64_t x0 = (int64_t) floorf(x); + int64_t x1 = x0 + 1; + + x0 = std::max(int64_t(0), std::min(x0, ne00 - 1)); + x1 = std::max(int64_t(0), std::min(x1, ne00 - 1)); + + float dx = x - (float) x0; + dx = std::max(0.0f, std::min(dx, 1.0f)); + + // fetch the four surrounding pixel values and interpolate + const float a = *(const float *) ((const char *) src0->data + x0 * nb00 + y0 * nb01 + + i02 * nb02 + i03 * nb03); + const float b = *(const float *) ((const char *) src0->data + x1 * nb00 + y0 * nb01 + + i02 * nb02 + i03 * nb03); + const float c = *(const float *) ((const char *) src0->data + x0 * nb00 + y1 * nb01 + + i02 * nb02 + i03 * nb03); + const float d = *(const float *) ((const char *) src0->data + x1 * nb00 + y1 * nb01 + + i02 * nb02 + i03 * nb03); + + const float val = a * (1 - dx) * (1 - dy) + b * dx * (1 - dy) + c * (1 - dx) * dy + d * dx * dy; + + float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + *y_dst = val; + } + } + } + } + } else if (mode == GGML_SCALE_MODE_BICUBIC) { + // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm + const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) + auto weight1 = [a](float x) { + return ((a + 2) * x - (a + 3)) * x * x + 1; + }; + auto weight2 = [a](float x) { + return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; + }; + auto bicubic = [=](float p0, float p1, float p2, float p3, float x) { + const float w0 = weight2(x + 1); + const float w1 = weight1(x + 0); + const float w2 = weight1(1 - x); + const float w3 = weight2(2 - x); + return p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3; + }; + + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; + const int64_t y0 = (int64_t) floorf(y); + const float dy = y - (float) y0; + + for (int64_t i0 = 0; i0 < ne0; i0++) { + const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; + const int64_t x0 = (int64_t) floorf(x); + const float dx = x - (float) x0; + + auto p = [=](int64_t x_off, int64_t y_off) -> float { + int64_t i00 = std::max(int64_t(0), std::min(x0 + x_off, ne00 - 1)); + int64_t i01 = std::max(int64_t(0), std::min(y0 + y_off, ne01 - 1)); + return *(const float *) ((const char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + + i03 * nb03); + }; + + const float val = bicubic(bicubic(p(-1, -1), p(0, -1), p(1, -1), p(2, -1), dx), + bicubic(p(-1, 0), p(0, 0), p(1, 0), p(2, 0), dx), + bicubic(p(-1, 1), p(0, 1), p(1, 1), p(2, 1), dx), + bicubic(p(-1, 2), p(0, 2), p(1, 2), p(2, 2), dx), dy); + + float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + *y_dst = val; + } + } + } + } + } else { + GGML_ABORT("unsupported upscale mode"); + } +} + +void ggml_compute_forward_upscale(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_upscale_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_pad + +static void ggml_compute_forward_pad_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float * dst_ptr = (float *) dst->data; + const int32_t lp0 = ggml_get_op_params_i32(dst, 0); + const int32_t rp0 = ggml_get_op_params_i32(dst, 1); + const int32_t lp1 = ggml_get_op_params_i32(dst, 2); + const int32_t rp1 = ggml_get_op_params_i32(dst, 3); + const int32_t lp2 = ggml_get_op_params_i32(dst, 4); + const int32_t rp2 = ggml_get_op_params_i32(dst, 5); + const int32_t lp3 = ggml_get_op_params_i32(dst, 6); + const int32_t rp3 = ggml_get_op_params_i32(dst, 7); + const bool circular = (bool) ggml_get_op_params_i32(dst, 8); + + // TODO: optimize + + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + for (int64_t i3 = 0; i3 < ne3; ++i3) { + // circular means wrap around on a torus, so x and y loop around + if (circular) { + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); + const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); + const int64_t src_i2 = ggml_wrap_around(i2 - lp2, ne02); + const int64_t src_i3 = ggml_wrap_around(i3 - lp3, ne03); + + const int64_t src_idx = src_i3 * nb03 + src_i2 * nb02 + src_i1 * nb01 + src_i0 * nb00; + + const float * src_ptr = (const float *) ((char *) src0->data + src_idx); + dst_ptr[dst_idx] = *src_ptr; + } else { + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && + (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t src_idx = + (i3 - lp3) * nb03 + (i2 - lp2) * nb02 + (i1 - lp1) * nb01 + (i0 - lp0) * nb00; + const float * src_ptr = (const float *) ((char *) src0->data + src_idx); + dst_ptr[dst_idx] = *src_ptr; + } else { + dst_ptr[dst_idx] = 0; + } + } + } + } + } + } +} + +void ggml_compute_forward_pad(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_pad_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_pad_reflect_1d + +void ggml_compute_forward_pad_reflect_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + const int32_t * opts = (const int32_t *) dst->op_params; + const int p0 = opts[0]; + const int p1 = opts[1]; + + GGML_TENSOR_UNARY_OP_LOCALS + + for (int64_t i3 = 0; i3 < ne3; i3++) { + for (int64_t i2 = 0; i2 < ne2; i2++) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + float * left = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + p0 * nb0); + float * right = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + (ne0 - p1 - 1) * nb0); + + ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01)); + + for (int i0 = 1; i0 <= p0; i0++) { + left[-i0] = left[i0]; + } + for (int i0 = 1; i0 <= p1; i0++) { + right[i0] = right[-i0]; + } + } + } + } +} + +// ggml_compute_forward_roll + +static int64_t ggml_wrap_index(int64_t i, int64_t ne) { + if (i < 0) { + return i + ne; + } else if (i >= ne) { + return i - ne; + } + return i; +} + +static void ggml_compute_forward_roll_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src_data = (const float *) src0->data; + float * dst_data = (float *) dst->data; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int s0 = ggml_get_op_params_i32(dst, 0); + const int s1 = ggml_get_op_params_i32(dst, 1); + const int s2 = ggml_get_op_params_i32(dst, 2); + const int s3 = ggml_get_op_params_i32(dst, 3); + + const int64_t total = ne1 * ne2 * ne3; + const int64_t per_thread = (total + params->nth) / params->nth; + const int64_t start = params->ith * per_thread; + const int64_t end = std::min(start + per_thread, total); + + for (int64_t i = start; i < end; ++i) { + const int64_t i1 = i % ne1; + const int64_t i2 = (i / ne1) % ne2; + const int64_t i3 = i / (ne2 * ne1); + float * dst_row = dst_data + (i3 * nb3 + i2 * nb2 + i1 * nb1) / sizeof(float); + + const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); + const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); + const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); + const float * src_row = src_data + (i03 * nb03 + i02 * nb02 + i01 * nb01) / sizeof(float); + + const int64_t s = ggml_wrap_index(-s0, ne00); + const int64_t n = ne00 - s; + ggml_vec_cpy_f32(n, dst_row, src_row + s); + ggml_vec_cpy_f32(s, dst_row + n, src_row); + } +} + +void ggml_compute_forward_roll(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_roll_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_arange + +static void ggml_compute_forward_arange_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const float start = ggml_get_op_params_f32(dst, 0); + const float stop = ggml_get_op_params_f32(dst, 1); + const float step = ggml_get_op_params_f32(dst, 2); + + const int64_t steps = (int64_t) ceilf((stop - start) / step); + + GGML_ASSERT(ggml_nelements(dst) == steps); + + for (int64_t i = ith; i < steps; i += nth) { + float value = start + step * i; + ((float *) dst->data)[i] = value; + } +} + +void ggml_compute_forward_arange(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_arange_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_timestep_embedding_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int dim = ggml_get_op_params_i32(dst, 0); + const int max_period = ggml_get_op_params_i32(dst, 1); + + int half = dim / 2; + + for (int64_t i = 0; i < ne00; i++) { + float * embed_data = (float *) ((char *) dst->data + i * nb1); + for (int64_t j = ith; j < half; j += nth) { + float timestep = ((float *) src0->data)[i]; + float freq = (float) expf(-logf(max_period) * j / half); + float arg = timestep * freq; + embed_data[j] = cosf(arg); + embed_data[j + half] = sinf(arg); + } + if (dim % 2 != 0 && ith == 0) { + embed_data[2 * half] = 0.f; + } + } +} + +void ggml_compute_forward_timestep_embedding(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_timestep_embedding_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_argsort + +template struct cmp_argsort { + const float * data; + + bool operator()(int32_t a, int32_t b) const { + if constexpr (order == GGML_SORT_ORDER_ASC) { + return data[a] < data[b]; + } else { + return data[a] > data[b]; + } + } +}; + +static void ggml_compute_forward_argsort_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nr = ggml_nrows(src0); + + ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0); + + for (int64_t i = ith; i < nr; i += nth) { + const float * src_data = (float *) ((char *) src0->data + i * nb01); + + int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); + + for (int64_t j = 0; j < ne0; j++) { + dst_data[j] = j; + } + + switch (order) { + case GGML_SORT_ORDER_ASC: + std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); + break; + + case GGML_SORT_ORDER_DESC: + std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); + break; + + default: + GGML_ABORT("invalid sort order"); + } + } +} + +void ggml_compute_forward_argsort(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_argsort_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_top_k + +struct cmp_top_k { + const float * data; + + bool operator()(int32_t a, int32_t b) const { return data[a] > data[b]; } +}; + +static void ggml_compute_forward_top_k_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nr = ggml_nrows(src0); + + const int top_k = ne0; + + int32_t * tmp = (int32_t *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + for (int64_t i = ith; i < nr; i += nth) { + const float * src_data = (float *) ((char *) src0->data + i * nb01); + + for (int64_t j = 0; j < ne00; j++) { + tmp[j] = j; + } + + std::partial_sort(tmp, tmp + top_k, tmp + ne00, cmp_top_k{ src_data }); + + int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); + + std::copy(tmp, tmp + top_k, dst_data); + + // emphasize that the order is not important + if (top_k > 1) { + std::swap(dst_data[0], dst_data[1]); + } + } +} + +void ggml_compute_forward_top_k(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_top_k_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_flash_attn_ext + +static void ggml_compute_forward_flash_attn_ext_f16_one_chunk(const ggml_compute_params * params, + ggml_tensor * dst, + int ir0, + int ir1) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + const ggml_tensor * mask = dst->src[3]; + const ggml_tensor * sinks = dst->src[4]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int64_t DK = nek0; + const int64_t DV = nev0; + const int64_t N = neq1; + + GGML_ASSERT(ne0 == DV); + GGML_ASSERT(ne2 == N); + + // input tensor rows must be contiguous + GGML_ASSERT(nbq0 == ggml_type_size(q->type)); + GGML_ASSERT(nbk0 == ggml_type_size(k->type)); + GGML_ASSERT(nbv0 == ggml_type_size(v->type)); + + GGML_ASSERT(neq0 == DK); + GGML_ASSERT(nek0 == DK); + GGML_ASSERT(nev0 == DV); + + GGML_ASSERT(neq1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // broadcast factors + const int64_t rk2 = neq2 / nek2; + const int64_t rk3 = neq3 / nek3; + + const int64_t rv2 = neq2 / nev2; + const int64_t rv3 = neq3 / nev3; + + // parallelize by q rows using ggml_vec_dot_f32 + + float scale = 1.0f; + float max_bias = 0.0f; + float logit_softcap = 0.0f; + + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); + memcpy(&logit_softcap, (float *) dst->op_params + 2, sizeof(float)); + + if (logit_softcap != 0) { + scale /= logit_softcap; + } + + const uint32_t n_head = neq2; + const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); + + const float m0 = powf(2.0f, -(max_bias) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + const ggml_type k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; + const ggml_from_float_t q_to_vec_dot = ggml_get_type_traits_cpu(k_vec_dot_type)->from_float; + const ggml_vec_dot_t kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; + const ggml_to_float_t v_to_float = ggml_get_type_traits(v->type)->to_float; + + GGML_ASSERT((q_to_vec_dot) && "fattn: unsupported K-type"); + GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float) && "fattn: unsupported V-type"); + + int ith = params->ith; + + // loop over n_batch and n_head + for (int ir = ir0; ir < ir1; ++ir) { + // q indices + const int iq3 = ir / (neq2 * neq1); + const int iq2 = (ir - iq3 * neq2 * neq1) / neq1; + const int iq1 = (ir - iq3 * neq2 * neq1 - iq2 * neq1); + + const uint32_t h = iq2; // head index + const float slope = + (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; + + float S = 0.0f; // sum + float M = -INFINITY; // maximum KQ value + + float * VKQ32 = + (float *) params->wdata + ith * (1 * DK + 2 * DV + CACHE_LINE_SIZE_F32); // FP32 VKQ accumulator + float * V32 = (VKQ32 + 1 * DV); // (temporary) FP32 V buffer + ggml_fp16_t * VKQ16 = (ggml_fp16_t *) (VKQ32 + 1 * DV); // (temporary) FP16 VKQ accumulator + ggml_fp16_t * Q_q = (ggml_fp16_t *) (VKQ32 + 2 * DV); // (temporary) buffer for Q converted to quantized/FP16 + + if (v->type == GGML_TYPE_F16) { + memset(VKQ16, 0, DV * sizeof(ggml_fp16_t)); + } else { + memset(VKQ32, 0, DV * sizeof(float)); + } + + const ggml_fp16_t * mp = + mask ? (ggml_fp16_t *) ((char *) mask->data + iq1 * mask->nb[1] + (iq2 % mask->ne[2]) * mask->nb[2] + + (iq3 % mask->ne[3]) * mask->nb[3]) : + NULL; + + // k indices + const int ik3 = iq3 / rk3; + const int ik2 = iq2 / rk2; + + // v indices + const int iv3 = iq3 / rv3; + const int iv2 = iq2 / rv2; + + const float * pq = (const float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)); + q_to_vec_dot(pq, Q_q, DK); + + // online softmax / attention + // loop over n_kv and n_head_kv + // ref: https://arxiv.org/pdf/2112.05682.pdf + for (int64_t ic = 0; ic < nek1; ++ic) { + const float mv = mp ? slope * GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; + if (mv == -INFINITY) { + continue; + } + + float s; // KQ value + + const char * k_data = (const char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3); + kq_vec_dot(DK, &s, 0, k_data, 0, Q_q, 0, 1); + + s = s * scale; // scale KQ value + + if (logit_softcap != 0.0f) { + s = logit_softcap * tanhf(s); + } + + s += mv; // apply mask + + const float Mold = M; + + float ms = 1.0f; // upon new higher max val, scale VKQ and KQ sum with this value + float vs = 1.0f; // post-softmax KQ value, expf(s - M) + + const char * v_data = ((const char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)); + + if (v->type == GGML_TYPE_F16) { + if (s > M) { + // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f + M = s; + ms = expf(Mold - M); + + // V = V*expf(Mold - M) + ggml_vec_scale_f16(DV, VKQ16, ms); + } else { + // no new maximum, ms == 1.0f, vs != 1.0f + vs = expf(s - M); + } + + // V += v*expf(s - M) + ggml_vec_mad_f16(DV, VKQ16, (const ggml_fp16_t *) v_data, vs); + } else { + if (s > M) { + // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f + M = s; + ms = expf(Mold - M); + + // V = V*expf(Mold - M) + ggml_vec_scale_f32(DV, VKQ32, ms); + } else { + // no new maximum, ms == 1.0f, vs != 1.0f + vs = expf(s - M); + } + + // V += v*expf(s - M) + if (v_to_float) { + v_to_float(v_data, V32, DV); + ggml_vec_mad_f32(DV, VKQ32, V32, vs); + } else { + // V is F32 + ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); + } + } + + S = S * ms + vs; // scale and increment sum with partial sum + } + + if (v->type == GGML_TYPE_F16) { + for (int64_t d = 0; d < DV; ++d) { + VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); + } + } + + // sinks + if (sinks) { + const float s = ((float *) ((char *) sinks->data))[h]; + + float ms = 1.0f; + float vs = 1.0f; + + if (s > M) { + ms = expf(M - s); + ggml_vec_scale_f32(DV, VKQ32, ms); + } else { + vs = expf(s - M); + } + + S = S * ms + vs; + } + + // V /= S + const float S_inv = S == 0.0f ? 0.0f : 1.0f / S; + ggml_vec_scale_f32(DV, VKQ32, S_inv); + + // dst indices + const int i1 = iq1; + const int i2 = iq2; + const int i3 = iq3; + + // original + //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float)); + + // permute(0, 2, 1, 3) + memcpy((char *) dst->data + (i3 * ne2 * ne1 + i2 + i1 * ne1) * nb1, VKQ32, nb1); + } +} + +static void ggml_compute_forward_flash_attn_ext_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int64_t DK = nek0; + const int64_t DV = nev0; + const int64_t N = neq1; + + GGML_ASSERT(ne0 == DV); + GGML_ASSERT(ne2 == N); + + // input tensor rows must be contiguous + GGML_ASSERT(nbq0 == ggml_type_size(q->type)); + GGML_ASSERT(nbk0 == ggml_type_size(k->type)); + GGML_ASSERT(nbv0 == ggml_type_size(v->type)); + + GGML_ASSERT(neq0 == DK); + GGML_ASSERT(nek0 == DK); + GGML_ASSERT(nev0 == DV); + + GGML_ASSERT(neq1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // parallelize by q rows using ggml_vec_dot_f32 + + // total rows in q + const int64_t nr = neq1 * neq2 * neq3; + + // rows per thread + const int ith = params->ith; + const int nth = params->nth; + + // disable for NUMA + const bool disable_chunking = ggml_is_numa(); + + // 4x chunks per thread + int nth_scaled = nth * 4; + int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; + int64_t nchunk = (nr + chunk_size - 1) / chunk_size; + + if (nth == 1 || nchunk < nth || disable_chunking) { + nchunk = nth; + } + + if (ith == 0) { + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + ggml_threadpool_chunk_set(params->threadpool, nth); + } + + ggml_barrier(params->threadpool); + + // The number of elements in each chunk + const int64_t dr = (nr + nchunk - 1) / nchunk; + + // The first chunk comes from our thread_id, the rest will get auto-assigned. + int current_chunk = ith; + + while (current_chunk < nchunk) { + const int64_t ir0 = dr * current_chunk; + const int64_t ir1 = MIN(ir0 + dr, nr); + + ggml_compute_forward_flash_attn_ext_f16_one_chunk(params, dst, ir0, ir1); + + current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); + } +} + +void ggml_compute_forward_flash_attn_ext(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->op_params[3]) { + case GGML_PREC_DEFAULT: + case GGML_PREC_F32: + { + // uses F32 accumulators + ggml_compute_forward_flash_attn_ext_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_flash_attn_back + +static void ggml_compute_forward_flash_attn_back_f32(const ggml_compute_params * params, + const bool masked, + ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + const ggml_tensor * d = dst->src[3]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ned, d, ne) + GGML_TENSOR_LOCALS(size_t, nbd, d, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t D = neq0; + const int64_t N = neq1; + const int64_t P = nek1 - N; + const int64_t M = P + N; + + const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); + const int mxDM = MAX(D, Mup); + + // GGML_ASSERT(ne0 == D); + // GGML_ASSERT(ne1 == N); + GGML_ASSERT(P >= 0); + + GGML_ASSERT(nbq0 == sizeof(float)); + GGML_ASSERT(nbk0 == sizeof(float)); + GGML_ASSERT(nbv0 == sizeof(float)); + + GGML_ASSERT(neq0 == D); + GGML_ASSERT(nek0 == D); + GGML_ASSERT(nev1 == D); + GGML_ASSERT(ned0 == D); + + GGML_ASSERT(neq1 == N); + GGML_ASSERT(nek1 == N + P); + GGML_ASSERT(nev1 == D); + GGML_ASSERT(ned1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + if (ith == 0) { + memset(dst->data, 0, nb0 * ne0 * ne1 * ne2 * ne3); + } + ggml_barrier(params->threadpool); + + const int64_t elem_q = ggml_nelements(q); + const int64_t elem_k = ggml_nelements(k); + + ggml_type result_type = dst->type; + GGML_ASSERT(ggml_blck_size(result_type) == 1); + const size_t tsize = ggml_type_size(result_type); + + const size_t offs_q = 0; + const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); + const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); + + void * grad_q = (char *) dst->data; + void * grad_k = (char *) dst->data + offs_k; + void * grad_v = (char *) dst->data + offs_v; + + const size_t nbgq1 = nb0 * neq0; + const size_t nbgq2 = nb0 * neq0 * neq1; + const size_t nbgq3 = nb0 * neq0 * neq1 * neq2; + + const size_t nbgk1 = nb0 * nek0; + const size_t nbgk2 = nb0 * nek0 * nek1; + const size_t nbgk3 = nb0 * nek0 * nek1 * neq2; + + const size_t nbgv1 = nb0 * nev0; + const size_t nbgv2 = nb0 * nev0 * nev1; + const size_t nbgv3 = nb0 * nev0 * nev1 * neq2; + + // parallelize by k rows using ggml_vec_dot_f32 + + // total rows in k + const int nr = nek2 * nek3; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const float scale = 1.0f / sqrtf(D); + + //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); + + // how often k2 (and v2) is repeated in q2 + int nrep = neq2 / nek2; + + for (int ir = ir0; ir < ir1; ++ir) { + // q indices + const int ik3 = ir / (nek2); + const int ik2 = ir - ik3 * nek2; + + const int iq3 = ik3; + const int id3 = ik3; + const int iv3 = ik3; + const int iv2 = ik2; + + for (int irep = 0; irep < nrep; ++irep) { + const int iq2 = ik2 + irep * nek2; + const int id2 = iq2; + + // (ik2 + irep*nek2) % nek2 == ik2 + for (int iq1 = 0; iq1 < neq1; ++iq1) { + const int id1 = iq1; + + // not sure about CACHE_LINE_SIZE_F32.. + // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset? + float * S = + (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 0 * (mxDM + CACHE_LINE_SIZE_F32); + float * SM = + (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 1 * (mxDM + CACHE_LINE_SIZE_F32); + + for (int i = M; i < Mup; ++i) { + S[i] = -INFINITY; + } + + const int64_t masked_begin = masked ? (P + iq1 + 1) : M; + for (int64_t ic = 0; ic < masked_begin; ++ic) { + // k indices + const int ik1 = ic; + + // S indices + const int i1 = ik1; + + ggml_vec_dot_f32(neq0, S + i1, 0, + (float *) ((char *) k->data + (ik1 * nbk1 + ik2 * nbk2 + ik3 * nbk3)), 0, + (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), 0, 1); + } + + // scale + ggml_vec_scale_f32(masked_begin, S, scale); + + for (int64_t i = masked_begin; i < M; i++) { + S[i] = -INFINITY; + } + + // softmax + // exclude known -INF S[..] values from max and loop + // dont forget to set their SM values to zero + { + float max = -INFINITY; + ggml_vec_max_f32(masked_begin, &max, S); + + ggml_float sum = 0.0; + { +#ifdef GGML_SOFT_MAX_ACCELERATE + max = -max; + vDSP_vsadd(SM, 1, &max, SM, 1, Mup); + vvexpf(SM, SM, &Mup); + ggml_vec_sum_f32(Mup, &sum, SM); +#else + sum = ggml_vec_soft_max_f32(Mup, SM, S, max); +#endif + } + + assert(sum > 0.0); + + sum = 1.0 / sum; + ggml_vec_scale_f32(masked_begin, SM, sum); + } + + // step-by-step explanation + { + // forward-process shape grads from backward process + // parallel_for ik2,ik3: + // for irep: + // iq2 = ik2 + irep*nek2 + // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur] + // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur] + // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur] + // for iq1: + // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur + // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur + // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4 + // S0 = -Inf [D,1,1,1] + // ~S1[i] = dot(kcur[:D,i], qcur) + // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale + // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P) + // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur + // ~S5[i] = dot(vcur[:,i], S4) + // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3] + // ~dst[i,iq1,iq2,iq3] = S5[i] ^ + // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3] + // dst backward-/ grad[dst] = d + // + // output gradients with their dependencies: + // + // grad[kcur] = grad[S1].T @ qcur + // grad[S1] = diag_mask_zero(grad[S3], P) * scale + // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // grad[S4] = grad[S5] @ vcur + // grad[S4] = d[:D,id1,id2,id3] @ vcur + // grad[qcur] = grad[S1] @ kcur + // grad[vcur] = grad[S5].T @ S4 + // grad[vcur] = d[:D,id1,id2,id3].T @ S4 + // + // in post-order: + // + // S1 = qcur @ kcur.T + // S2 = S1 * scale + // S3 = diag_mask_inf(S2, P) + // S4 = softmax(S3) + // grad[S4] = d[:D,id1,id2,id3] @ vcur + // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // grad[S1] = diag_mask_zero(grad[S3], P) * scale + // grad[qcur] = grad[S1] @ kcur + // grad[kcur] = grad[S1].T @ qcur + // grad[vcur] = d[:D,id1,id2,id3].T @ S4 + // + // using less variables (SM=S4): + // + // S = diag_mask_inf(qcur @ kcur.T * scale, P) + // SM = softmax(S) + // S = d[:D,iq1,iq2,iq3] @ vcur + // dot_SM_gradSM = dot(SM, S) + // S = SM * (S - dot(SM, S)) + // S = diag_mask_zero(S, P) * scale + // + // grad[q][:D,iq1,iq2,iq3] += S @ kcur + // grad[k][:D,:M,ik2,ik3] += S.T @ qcur + // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM + } + + // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] + // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] + // for ic: + // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3] + // exclude known future zero S[..] values from operation + ggml_vec_set_f32(masked_begin, S, 0); + for (int64_t ic = 0; ic < D; ++ic) { + ggml_vec_mad_f32( + masked_begin, S, (float *) ((char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)), + *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); + } + + // S = SM * (S - dot(SM, S)) + float dot_SM_gradSM = 0; + ggml_vec_dot_f32(masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1); + ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); + ggml_vec_mul_f32(masked_begin, S, S, SM); + + // S = diag_mask_zero(S, P) * scale + // already done by above ggml_vec_set_f32 + + // exclude known zero S[..] values from operation + ggml_vec_scale_f32(masked_begin, S, scale); + + // S shape [M,1] + // SM shape [M,1] + // kcur shape [D,M] + // qcur shape [D,1] + // vcur shape [M,D] + + // grad[q][:D,iq1,iq2,iq3] += S @ kcur + // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M] + // for ic: + // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3] + // exclude known zero S[..] values from loop + for (int64_t ic = 0; ic < masked_begin; ++ic) { + ggml_vec_mad_f32(D, (float *) ((char *) grad_q + (iq1 * nbgq1 + iq2 * nbgq2 + iq3 * nbgq3)), + (float *) ((char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3)), S[ic]); + } + + // grad[k][:D,:M,iq2,iq3] += S.T @ qcur + // for ic: + // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0] + // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0] + // exclude known zero S[..] values from loop + for (int64_t ic = 0; ic < masked_begin; ++ic) { + ggml_vec_mad_f32(D, (float *) ((char *) grad_k + (ic * nbgk1 + ik2 * nbgk2 + ik3 * nbgk3)), + (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), S[ic]); + } + + // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM + // for ic: + // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M] + // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M] + // exclude known zero SM[..] values from mad + for (int64_t ic = 0; ic < D; ++ic) { + ggml_vec_mad_f32( + masked_begin, (float *) ((char *) grad_v + (ic * nbgv1 + iv2 * nbgv2 + iv3 * nbgv3)), SM, + *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); + } + } + } + } +} + +void ggml_compute_forward_flash_attn_back(const ggml_compute_params * params, const bool masked, ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + + switch (q->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_flash_attn_back_f32(params, masked, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_ssm_conv + +static void ggml_compute_forward_ssm_conv_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // conv_x + const ggml_tensor * src1 = dst->src[1]; // conv1d.weight + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; // d_conv + const int ncs = src0->ne[0]; // d_conv - 1 + n_t + const int nr = src0->ne[1]; // d_inner + const int n_t = dst->ne[1]; // tokens per sequence + const int n_s = dst->ne[2]; // number of sequences in the batch + + GGML_ASSERT(dst->ne[0] == nr); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + const int ir = ir1 - ir0; + + for (int i3 = 0; i3 < n_s; ++i3) { + for (int i2 = 0; i2 < n_t; ++i2) { + // {d_conv - 1 + n_t, d_inner, n_seqs} + // sliding window + const float * s = (const float *) ((const char *) src0->data + ir0 * (src0->nb[1]) + i2 * (src0->nb[0]) + + i3 * (src0->nb[2])); // {d_conv, d_inner, n_s} + const float * c = (const float *) ((const char *) src1->data + ir0 * (src1->nb[1])); // {d_conv, d_inner} + float * x = (float *) ((char *) dst->data + ir0 * (dst->nb[0]) + i2 * (dst->nb[1]) + + i3 * (dst->nb[2])); // {d_inner, n_t, n_s} + + // TODO: transpose the output for smaller strides for big batches? + // d_inner + for (int i1 = 0; i1 < ir; ++i1) { + // rowwise dot product + // NOTE: not using ggml_vec_dot_f32, because its sum is in double precision + float sumf = 0.0f; + + // d_conv + for (int i0 = 0; i0 < nc; ++i0) { + sumf += s[i0 + i1 * ncs] * c[i0 + i1 * nc]; + } + x[i1] = sumf; + } + } + } +} + +void ggml_compute_forward_ssm_conv(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->src[0]->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_ssm_conv_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_ssm_scan + +static void ggml_compute_forward_ssm_scan_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // s {d_state, dim, n_head, n_seqs+} + const ggml_tensor * src1 = dst->src[1]; // x {dim, n_head, n_seq_tokens, n_seqs} + const ggml_tensor * src2 = dst->src[2]; // dt {n_head, n_seq_tokens, n_seqs} + const ggml_tensor * src3 = dst->src[3]; // A {d_state, n_head} or {1, n_head} + const ggml_tensor * src4 = dst->src[4]; // B {d_state, n_group, n_seq_tokens, n_seqs} + const ggml_tensor * src5 = dst->src[5]; // C {d_state, n_group, n_seq_tokens, n_seqs} + const ggml_tensor * src6 = dst->src[6]; // ids {n_seqs} + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nc = src0->ne[0]; // d_state + const int64_t nr = src0->ne[1]; // dim + const int64_t nh = src1->ne[1]; // n_head + const int64_t ng = src4->ne[1]; + const int64_t nt = src1->ne[2]; // number of tokens per sequence + const int64_t ns = src1->ne[3]; // number of sequences in the batch + + // can't use ggml_nbytes because src1 is not necessarily contiguous + const int64_t s_off = ggml_nelements(src1) * ggml_element_size(src1); + + GGML_ASSERT(ggml_nelements(src1) + nc * nr * nh * ns == ggml_nelements(dst)); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + GGML_ASSERT(src2->nb[0] == sizeof(float)); + GGML_ASSERT(src3->nb[0] == sizeof(float)); + GGML_ASSERT(src4->nb[0] == sizeof(float)); + GGML_ASSERT(src5->nb[0] == sizeof(float)); + GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); + GGML_ASSERT(nh % ng == 0); + + // heads per thread + const int dh = (nh + nth - 1) / nth; + + // head range for this thread + const int ih0 = dh * ith; + const int ih1 = MIN(ih0 + dh, nh); + + const int32_t * ids = (const int32_t *) src6->data; + + for (int i3 = 0; i3 < ns; ++i3) { + const float * s0 = + (const float *) ((const char *) src0->data + ids[i3] * (src0->nb[3])); // {d_state, dim, nh, ns} + float * s = (float *) ((char *) dst->data + i3 * (src0->nb[3]) + s_off); // {d_state, dim, nh, ns} + + for (int i2 = 0; i2 < nt; ++i2) { + const float * x = (const float *) ((const char *) src1->data + i2 * (src1->nb[2]) + + i3 * (src1->nb[3])); // {dim, nh, nt, ns} + const float * dt = + (const float *) ((const char *) src2->data + i2 * (src2->nb[1]) + i3 * (src2->nb[2])); // {nh, nt, ns} + const float * A = (const float *) ((const char *) src3->data); // {d_state, nh} or {1, nh} + const float * B = (const float *) ((const char *) src4->data + i2 * (src4->nb[2]) + + i3 * (src4->nb[3])); // {d_state, ng, nt, ns} + const float * C = (const float *) ((const char *) src5->data + i2 * (src5->nb[2]) + + i3 * (src5->nb[3])); // {d_state, ng, nt, ns} + float * y = (float *) ((char *) dst->data + i2 * (nh * nr * sizeof(float)) + + i3 * (nt * nh * nr * sizeof(float))); // {dim, nh, nt, ns} + + if (src3->ne[0] == 1) { + // Mamba-2 has a scalar decay factor per head; dA can be outside the state-wise loop + + // n_head + for (int h = ih0; h < ih1; ++h) { + // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 + const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); + const float dA = expf(dt_soft_plus * A[h]); + const int g = h / (nh / ng); // repeat_interleave + + // dim + for (int i1 = 0; i1 < nr; ++i1) { + const int ii = i1 + h * nr; + const float x_dt = x[ii] * dt_soft_plus; + float sumf = 0.0f; +#if defined(GGML_SIMD) +# if defined(__ARM_FEATURE_SVE) + const int ggml_f32_epr = svcntw(); + const int ggml_f32_step = 1 * ggml_f32_epr; + + const int np = (nc & ~(ggml_f32_step - 1)); + + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + + GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); + GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); + + for (int i = 0; i < np; i += ggml_f32_step) { + // TODO: maybe unroll more? + for (int j = 0; j < 1; j++) { + GGML_F32_VEC t0 = GGML_F32_VEC_LOAD(s0 + i + j * ggml_f32_epr + ii * nc); + GGML_F32_VEC t1 = GGML_F32_VEC_LOAD(B + i + j * ggml_f32_epr + g * nc); + GGML_F32_VEC t2 = GGML_F32_VEC_LOAD(C + i + j * ggml_f32_epr + g * nc); + + t0 = GGML_F32_VEC_MUL(t0, adA); + t1 = GGML_F32_VEC_MUL(t1, axdt); + + t0 = GGML_F32_VEC_ADD(t0, t1); + + sum = GGML_F32_VEC_FMA(sum, t0, t2); + + GGML_F32_VEC_STORE(s + i + j * ggml_f32_epr + ii * nc, t0); + } + } + + sumf = GGML_F32xt_REDUCE_ONE(sum); +# elif defined(__riscv_v_intrinsic) + // todo: RVV implementation + const int np = 0; +# else + const int np = (nc & ~(GGML_F32_STEP - 1)); + + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + + GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); + GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); + + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + GGML_F32_VEC az[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(s0 + i + j * GGML_F32_EPR + ii * nc); + ay[j] = GGML_F32_VEC_LOAD(B + i + j * GGML_F32_EPR + g * nc); + az[j] = GGML_F32_VEC_LOAD(C + i + j * GGML_F32_EPR + g * nc); + + ax[j] = GGML_F32_VEC_MUL(ax[j], adA); + ay[j] = GGML_F32_VEC_MUL(ay[j], axdt); + + ax[j] = GGML_F32_VEC_ADD(ax[j], ay[j]); + + sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], az[j]); + + GGML_F32_VEC_STORE(s + i + j * GGML_F32_EPR + ii * nc, ax[j]); + } + } + + // reduce sum0..sum3 to sum0 + GGML_F32_VEC_REDUCE(sumf, sum); +# endif +#else + const int np = 0; +#endif + // d_state + for (int i0 = np; i0 < nc; ++i0) { + const int i = i0 + ii * nc; + const int ig = i0 + g * nc; + // state = prev_state * dA + dB * x + const float state = (s0[i] * dA) + (B[ig] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[ig]; + s[i] = state; + } + y[ii] = sumf; + } + } + } else { + // Mamba-1 has an element-wise decay factor for the states + + // n_head + for (int h = ih0; h < ih1; ++h) { + // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 + const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); + const int g = h / (nh / ng); // repeat_interleave + + // dim + for (int i1 = 0; i1 < nr; ++i1) { + const int ii = i1 + h * nr; + const float x_dt = x[ii] * dt_soft_plus; +#if defined(__ARM_FEATURE_SVE) + svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); + svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); + svfloat32_t r1_vector = GGML_F32_VEC_ZERO; + + // d_state + // TODO: what happens when (d_state % svcntw()) != 0? + for (int64_t k = 0; k < nc; k += svcntw()) { + svfloat32_t vA = GGML_F32_VEC_LOAD(&A[h * nc + k]); + svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k + g * nc]); + svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k + g * nc]); + svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[ii * nc + k]); + + svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); + t1 = exp_ps_sve(svptrue_b32(), t1); + svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); + + vs0 = GGML_F32_VEC_FMA(t2, vs0, t1); + r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); + + GGML_F32_VEC_STORE(&s[ii * nc + k], vs0); + } + y[ii] = GGML_F32xt_REDUCE_ONE(r1_vector); +#else + float sumf = 0.0f; + // NOTE: can't really use GGML_SIMD here because d_state is usually 16 + // and also because expf is used within the loop. + // d_state + for (int i0 = 0; i0 < nc; ++i0) { + const int i = i0 + ii * nc; + const int ig = i0 + g * nc; + // state = prev_state * dA + dB * x + const float state = (s0[i] * expf(dt_soft_plus * A[i0 + h * nc])) + (B[ig] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[ig]; + s[i] = state; + } + y[ii] = sumf; +#endif + } + } + } + // use the output as the source when it's not the first token-wise iteration + s0 = s; + } + } +} + +void ggml_compute_forward_ssm_scan(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->src[0]->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_ssm_scan_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_win_part + +static void ggml_compute_forward_win_part_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + + const int32_t nep0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t nep1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t w = ((const int32_t *) (dst->op_params))[2]; + + assert(ne00 == ne0); + assert(ne3 == nep0 * nep1); + + // TODO: optimize / multi-thread + for (int py = 0; py < nep1; ++py) { + for (int px = 0; px < nep0; ++px) { + const int64_t i3 = py * nep0 + px; + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + const int64_t i02 = py * w + i2; + const int64_t i01 = px * w + i1; + const int64_t i00 = i0; + + const int64_t i = i3 * ne2 * ne1 * ne0 + i2 * ne1 * ne0 + i1 * ne0 + i0; + const int64_t j = i02 * ne01 * ne00 + i01 * ne00 + i00; + + if (py * w + i2 >= ne02 || px * w + i1 >= ne01) { + ((float *) dst->data)[i] = 0.0f; + } else { + ((float *) dst->data)[i] = ((float *) src0->data)[j]; + } + } + } + } + } + } +} + +void ggml_compute_forward_win_part(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_win_part_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_win_unpart + +static void ggml_compute_forward_win_unpart_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + + const int32_t w = ((const int32_t *) (dst->op_params))[0]; + + // padding + const int px = (w - ne1 % w) % w; + //const int py = (w - ne2%w)%w; + + const int npx = (px + ne1) / w; + //const int npy = (py + ne2)/w; + + assert(ne0 == ne00); + + // TODO: optimize / multi-thread + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + const int ip2 = i2 / w; + const int ip1 = i1 / w; + + const int64_t i02 = i2 % w; + const int64_t i01 = i1 % w; + const int64_t i00 = i0; + + const int64_t i = (ip2 * npx + ip1) * ne02 * ne01 * ne00 + i02 * ne01 * ne00 + i01 * ne00 + i00; + const int64_t j = i2 * ne1 * ne0 + i1 * ne0 + i0; + + ((float *) dst->data)[j] = ((float *) src0->data)[i]; + } + } + } +} + +void ggml_compute_forward_win_unpart(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_win_unpart_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +//gmml_compute_forward_unary + +void ggml_compute_forward_unary(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_unary_op op = ggml_get_unary_op(dst); + + switch (op) { + case GGML_UNARY_OP_ABS: + { + ggml_compute_forward_abs(params, dst); + } + break; + case GGML_UNARY_OP_SGN: + { + ggml_compute_forward_sgn(params, dst); + } + break; + case GGML_UNARY_OP_NEG: + { + ggml_compute_forward_neg(params, dst); + } + break; + case GGML_UNARY_OP_STEP: + { + ggml_compute_forward_step(params, dst); + } + break; + case GGML_UNARY_OP_TANH: + { + ggml_compute_forward_tanh(params, dst); + } + break; + case GGML_UNARY_OP_ELU: + { + ggml_compute_forward_elu(params, dst); + } + break; + case GGML_UNARY_OP_RELU: + { + ggml_compute_forward_relu(params, dst); + } + break; + case GGML_UNARY_OP_SIGMOID: + { + ggml_compute_forward_sigmoid(params, dst); + } + break; + case GGML_UNARY_OP_GELU: + { + ggml_compute_forward_gelu(params, dst); + } + break; + case GGML_UNARY_OP_GELU_ERF: + { + ggml_compute_forward_gelu_erf(params, dst); + } + break; + case GGML_UNARY_OP_GELU_QUICK: + { + ggml_compute_forward_gelu_quick(params, dst); + } + break; + case GGML_UNARY_OP_SILU: + { + ggml_compute_forward_silu(params, dst); + } + break; + case GGML_UNARY_OP_HARDSWISH: + { + ggml_compute_forward_hardswish(params, dst); + } + break; + case GGML_UNARY_OP_HARDSIGMOID: + { + ggml_compute_forward_hardsigmoid(params, dst); + } + break; + case GGML_UNARY_OP_EXP: + { + ggml_compute_forward_exp(params, dst); + } + break; + case GGML_UNARY_OP_FLOOR: + { + ggml_compute_forward_floor(params, dst); + } + break; + case GGML_UNARY_OP_CEIL: + { + ggml_compute_forward_ceil(params, dst); + } + break; + case GGML_UNARY_OP_ROUND: + { + ggml_compute_forward_round(params, dst); + } + break; + case GGML_UNARY_OP_TRUNC: + { + ggml_compute_forward_trunc(params, dst); + } + break; + case GGML_UNARY_OP_XIELU: + { + ggml_compute_forward_xielu(params, dst); + } + break; + case GGML_UNARY_OP_EXPM1: + { + ggml_compute_forward_expm1(params, dst); + } + break; + case GGML_UNARY_OP_SOFTPLUS: + { + ggml_compute_forward_softplus(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +//ggml_compute_forward_glu + +void ggml_compute_forward_glu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_glu_op op = ggml_get_glu_op(dst); + + switch (op) { + case GGML_GLU_OP_REGLU: + { + ggml_compute_forward_reglu(params, dst); + } + break; + case GGML_GLU_OP_GEGLU: + { + ggml_compute_forward_geglu(params, dst); + } + break; + case GGML_GLU_OP_SWIGLU: + { + ggml_compute_forward_swiglu(params, dst); + } + break; + case GGML_GLU_OP_SWIGLU_OAI: + { + ggml_compute_forward_swiglu_oai(params, dst); + } + break; + case GGML_GLU_OP_GEGLU_ERF: + { + ggml_compute_forward_geglu_erf(params, dst); + } + break; + case GGML_GLU_OP_GEGLU_QUICK: + { + ggml_compute_forward_geglu_quick(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_get_rel_pos + +static void ggml_compute_forward_get_rel_pos_f16(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322 + + GGML_TENSOR_UNARY_OP_LOCALS + + const int64_t w = ne1; + + ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data; + ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data; + + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + const int64_t pos = (w - i1 - 1) + i2; + for (int64_t i0 = 0; i0 < ne0; ++i0) { + dst_data[i2 * ne1 * ne0 + i1 * ne0 + i0] = src0_data[pos * ne00 + i0]; + } + } + } +} + +void ggml_compute_forward_get_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + { + ggml_compute_forward_get_rel_pos_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add_rel_pos + +static void ggml_compute_forward_add_rel_pos_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; + if (!inplace) { + if (params->ith == 0) { + memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359 + + float * src1_data = (float *) src1->data; + float * src2_data = (float *) src2->data; + float * dst_data = (float *) dst->data; + + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + const int64_t ne12 = src1->ne[2]; + const int64_t ne13 = src1->ne[3]; + + const int ith = params->ith; + const int nth = params->nth; + + // total patches in dst + const int np = ne13; + + // patches per thread + const int dp = (np + nth - 1) / nth; + + // patch range for this thread + const int ip0 = dp * ith; + const int ip1 = MIN(ip0 + dp, np); + + for (int64_t i13 = ip0; i13 < ip1; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { + const int64_t jp1 = i13 * ne12 * ne11 * ne10 + i12 * ne11 * ne10 + i11 * ne10; + for (int64_t i10 = 0; i10 < ne10; ++i10) { + const int64_t jp0 = jp1 + i10; + const float src1_e = src1_data[jp0]; + const float src2_e = src2_data[jp0]; + + const int64_t jdh = jp0 * ne10; + const int64_t jdw = jdh - (ne10 - 1) * i10; + + for (int64_t j = 0; j < ne10; ++j) { + dst_data[jdh + j] += src2_e; + dst_data[jdw + j * ne10] += src1_e; + } + } + } + } + } +} + +void ggml_compute_forward_add_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add_rel_pos_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rwkv_wkv6 + +static void ggml_compute_forward_rwkv_wkv6_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[5]->ne[1]; + const int64_t head_size = C / HEADS; + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * k = (float *) dst->src[0]->data; + float * v = (float *) dst->src[1]->data; + float * r = (float *) dst->src[2]->data; + float * time_faaaa = (float *) dst->src[3]->data; + float * time_decay = (float *) dst->src[4]->data; + + size_t t_stride = HEADS * head_size; // Same to C + + size_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + size_t h_stride_2d = head_size * head_size; + + if (ith == 0) { + memset(dst_data, 0, T * C * sizeof(float)); + } + ggml_barrier(params->threadpool); + +#if defined(__AVX__) && !defined(__AVX512F__) +# define GGML_F32X GGML_F32x8 +# define GGML_F32X_SET1 GGML_F32x8_SET1 +# define GGML_F32X_LOAD GGML_F32x8_LOAD +# define GGML_F32X_STORE GGML_F32x8_STORE +# define GGML_F32X_MUL GGML_F32x8_MUL +# define GGML_F32X_FMA GGML_F32x8_FMA +# define WKV_VECTOR_SIZE 8 +#elif defined(__AVX512F__) +# define GGML_F32X GGML_F32x16 +# define GGML_F32X_SET1 GGML_F32x16_SET1 +# define GGML_F32X_LOAD GGML_F32x16_LOAD +# define GGML_F32X_STORE GGML_F32x16_STORE +# define GGML_F32X_MUL GGML_F32x16_MUL +# define GGML_F32X_FMA GGML_F32x16_FMA +# define WKV_VECTOR_SIZE 16 +#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) +# define GGML_F32X GGML_F32xt +# define GGML_F32X_SET1 GGML_F32xt_SET1 +# define GGML_F32X_LOAD GGML_F32xt_LOAD +# define GGML_F32X_STORE GGML_F32xt_STORE +# define GGML_F32X_MUL GGML_F32xt_MUL +# define GGML_F32X_FMA GGML_F32xt_FMA +# define WKV_VECTOR_SIZE 8 +#elif defined(__ARM_NEON) && defined(__aarch64__) +# define GGML_F32X GGML_F32x4 +# define GGML_F32X_SET1 GGML_F32x4_SET1 +# define GGML_F32X_LOAD GGML_F32x4_LOAD +# define GGML_F32X_STORE GGML_F32x4_STORE +# define GGML_F32X_MUL GGML_F32x4_MUL +# define GGML_F32X_FMA GGML_F32x4_FMA +# define WKV_VECTOR_SIZE 4 +#endif + +#ifdef WKV_VECTOR_SIZE + int wkv_vector_size; +# if defined(__ARM_FEATURE_SVE) + wkv_vector_size = svcntw(); +# else + wkv_vector_size = WKV_VECTOR_SIZE; +# endif + const int64_t vec_count = head_size / wkv_vector_size; + + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_i_offset = h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float r_val = r[t_h_i_offset]; + float time_faaaa_val = time_faaaa[h_i_offset]; + float time_decay_val = time_decay[t_h_i_offset]; + + // Broadcast scalar values to vectors + GGML_F32X k_vec = GGML_F32X_SET1(k_val); + GGML_F32X r_vec = GGML_F32X_SET1(r_val); + GGML_F32X time_faaaa_vec = GGML_F32X_SET1(time_faaaa_val); + GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); + + for (int64_t j = 0; j < vec_count; j++) { + size_t base_j = j * wkv_vector_size; + size_t t_h_j_offset = t_h_offset + base_j; + size_t h_2d_i_j_offset = h_2d_i_offset + base_j; + + // Load x elements at once + GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); + GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); + GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); + + // Compute kv = v * k + GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); + + // Compute temp = kv * time_faaaa + prev_state + GGML_F32X temp_vec = GGML_F32X_FMA(prev_state_vec, kv_vec, time_faaaa_vec); + + // Update dst: dst += temp * r + dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, r_vec); + GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); + + // Update state: state = prev_state * time_decay + kv + GGML_F32X new_state_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, time_decay_vec); + GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], new_state_vec); + } + + // Handle remaining elements, this will not be used. + for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val * time_faaaa_val + prev_state_val; + dst_data[t_h_j_offset] += temp_val * r_val; + state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; + } + } + } + } + +#else + // basically fused operations: + // dst = r @ (time_faaaa * (k @ v) + state), + // state = time_decay * state + (k @ v), + // recursive through each token + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_i_offset = h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float r_val = r[t_h_i_offset]; + float time_faaaa_val = time_faaaa[h_i_offset]; + // RWKV v6: different time_decay for each token. + float time_decay_val = time_decay[t_h_i_offset]; + + for (int64_t j = 0; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val * time_faaaa_val + prev_state_val; + dst_data[t_h_j_offset] += temp_val * r_val; + state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; + } + } + } + } +#endif +} + +void ggml_compute_forward_rwkv_wkv6(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rwkv_wkv6_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gla + +static void ggml_compute_forward_gla_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[4]->ne[1]; + const int64_t head_size = C / HEADS; + const float scale = ggml_get_op_params_f32(dst, 0); + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * k = (float *) dst->src[0]->data; + float * v = (float *) dst->src[1]->data; + float * q = (float *) dst->src[2]->data; + float * g = (float *) dst->src[3]->data; + + size_t t_stride = HEADS * head_size; // Same to C + + size_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + size_t h_stride_2d = head_size * head_size; + + if (ith == 0) { + memset(dst_data, 0, T * C * sizeof(float)); + } + ggml_barrier(params->threadpool); + +#if defined(__AVX__) && !defined(__AVX512F__) +# define GGML_F32X GGML_F32x8 +# define GGML_F32X_SET1 GGML_F32x8_SET1 +# define GGML_F32X_LOAD GGML_F32x8_LOAD +# define GGML_F32X_STORE GGML_F32x8_STORE +# define GGML_F32X_MUL GGML_F32x8_MUL +# define GGML_F32X_FMA GGML_F32x8_FMA +# define GLA_VECTOR_SIZE 8 +#elif defined(__AVX512F__) +# define GGML_F32X GGML_F32x16 +# define GGML_F32X_SET1 GGML_F32x16_SET1 +# define GGML_F32X_LOAD GGML_F32x16_LOAD +# define GGML_F32X_STORE GGML_F32x16_STORE +# define GGML_F32X_MUL GGML_F32x16_MUL +# define GGML_F32X_FMA GGML_F32x16_FMA +# define GLA_VECTOR_SIZE 16 +#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) +# define GGML_F32X GGML_F32xt +# define GGML_F32X_SET1 GGML_F32xt_SET1 +# define GGML_F32X_LOAD GGML_F32xt_LOAD +# define GGML_F32X_STORE GGML_F32xt_STORE +# define GGML_F32X_MUL GGML_F32xt_MUL +# define GGML_F32X_FMA GGML_F32xt_FMA +# define GLA_VECTOR_SIZE 8 +#elif defined(__ARM_NEON) && defined(__aarch64__) +# define GGML_F32X GGML_F32x4 +# define GGML_F32X_SET1 GGML_F32x4_SET1 +# define GGML_F32X_LOAD GGML_F32x4_LOAD +# define GGML_F32X_STORE GGML_F32x4_STORE +# define GGML_F32X_MUL GGML_F32x4_MUL +# define GGML_F32X_FMA GGML_F32x4_FMA +# define GLA_VECTOR_SIZE 4 +#endif + +#ifdef GLA_VECTOR_SIZE + int gla_vector_size; +# if defined(__ARM_FEATURE_SVE) + gla_vector_size = svcntw(); +# else + gla_vector_size = GLA_VECTOR_SIZE; +# endif + const int64_t vec_count = head_size / gla_vector_size; + + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float q_val = q[t_h_i_offset] * scale; + float g_val = g[t_h_i_offset]; + + // Broadcast scalar values to vectors + GGML_F32X k_vec = GGML_F32X_SET1(k_val); + GGML_F32X q_vec = GGML_F32X_SET1(q_val); + GGML_F32X g_vec = GGML_F32X_SET1(g_val); + + for (int64_t j = 0; j < vec_count; j++) { + size_t base_j = j * gla_vector_size; + size_t t_h_j_offset = t_h_offset + base_j; + size_t h_2d_i_j_offset = h_2d_i_offset + base_j; + + // Load x elements at once + GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); + GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); + GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); + + // Compute kv = v * k + GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); + + // Compute temp = prev_state * g + kv + GGML_F32X temp_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, g_vec); + + // Update dst: dst += temp * q + dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, q_vec); + GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); + + // Update state + GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], temp_vec); + } + + // Handle remaining elements, this will not be used. + for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val + prev_state_val * g_val; + dst_data[t_h_j_offset] += temp_val * q_val; + state_cur[h_2d_i_j_offset] = temp_val; + } + } + } + } + +#else + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float q_val = q[t_h_i_offset] * scale; + float g_val = g[t_h_i_offset]; + + for (int64_t j = 0; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = prev_state_val * g_val + kv_val; + dst_data[t_h_j_offset] += temp_val * q_val; + state_cur[h_2d_i_j_offset] = temp_val; + } + } + } + } +#endif +} + +void ggml_compute_forward_gla(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gla_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_solve_tri_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + const struct ggml_tensor * src0 = dst->src[0]; // A (lower triangular) + const struct ggml_tensor * src1 = dst->src[1]; // B (RHS) + + GGML_TENSOR_BINARY_OP_LOCALS; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_ASSERT(ne00 == ne01); // A must be square + GGML_ASSERT(ne0 == ne10); // solution cols == B cols + GGML_ASSERT(ne1 == ne11); // solution rows == B rows + + GGML_ASSERT(ne02 == ne12 && ne12 == ne2); + GGML_ASSERT(ne03 == ne13 && ne13 == ne3); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t k = ne10; // number of RHS columns + const int64_t n = ne11; // A is n×n + const int64_t nr = + ne02 * ne03 * k; // we're parallelizing on columns here, so seq x token x column will be the unit + + // chunks per thread + const int64_t dr = (nr + nth - 1) / nth; + + // chunk range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + const float * A = (const float *) src0->data; // [n, n, B1, B2] + const float * B = (const float *) src1->data; // [n, k, B1, B2] + float * X = (float *) dst->data; // [n, k, B1, B2] + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * k); + const int64_t i02 = (ir - i03 * ne02 * k) / k; + const int64_t i01 = (ir - i03 * ne02 * k - i02 * k); + + const float * A_batch = A + i02 * nb02 / sizeof(float) + i03 * nb03 / sizeof(float); + const float * B_batch = B + i02 * nb12 / sizeof(float) + i03 * nb13 / sizeof(float); + + float * X_batch = X + i02 * nb2 / sizeof(float) + i03 * nb3 / sizeof(float); + + for (int64_t i00 = 0; i00 < n; ++i00) { + float sum = 0.0f; + for (int64_t t = 0; t < i00; ++t) { + sum += A_batch[i00 * n + t] * X_batch[t * k + i01]; + } + + const float diag = A_batch[i00 * n + i00]; + GGML_ASSERT(diag != 0.0f && "Zero diagonal in triangular matrix"); + X_batch[i00 * k + i01] = (B_batch[i00 * k + i01] - sum) / diag; + } + } +} + +void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { + ggml_compute_forward_solve_tri_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } +} + +// ggml_compute_forward_rwkv_wkv7 + +static void ggml_compute_forward_rwkv_wkv7_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[6]->ne[1]; + const int64_t head_size = C / HEADS; + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * r = (float *) dst->src[0]->data; + float * w = (float *) dst->src[1]->data; + float * k = (float *) dst->src[2]->data; + float * v = (float *) dst->src[3]->data; + float * a = (float *) dst->src[4]->data; + float * b = (float *) dst->src[5]->data; + + int64_t t_stride = HEADS * head_size; // Same to C + + int64_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + int64_t h_stride_2d = head_size * head_size; + +#if defined(GGML_SIMD) +# if defined(__ARM_FEATURE_SVE) || defined(__riscv_v_intrinsic) + // scalar Route to scalar implementation //TODO: Write SVE code and RVV code + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float v_val = v[t_h_i_offset]; + + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; + } + + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; + } + dst_data[t_h_i_offset] = result; + } + } + } +# else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t ii = 0; ii < head_size; ii++) { + int64_t t_h_i_offset = t_h_offset + ii; + int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + + GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + + float sa = 0; + { + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); + ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); + sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); + } + } + GGML_F32_VEC_REDUCE(sa, sum); + } + + GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + + int64_t j = 0; + GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + for (; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; + int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; + + GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); + GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); + GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); + GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); + + k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); + + GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); + // kv + s * decay + sa * b + state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); + state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); + GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); + + result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + } + } + GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); + + // There shouldn't be left-overs though. + for (; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v[t_h_i_offset] * k_val; + + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + } + } + } + } +# endif +#else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float v_val = v[t_h_i_offset]; + + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; + } + + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; + } + dst_data[t_h_i_offset] = result; + } + } + } +#endif +} + +void ggml_compute_forward_rwkv_wkv7(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rwkv_wkv7_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_map_custom1 + +void ggml_compute_forward_map_custom1(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + + struct ggml_map_custom1_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_map_custom2 + +void ggml_compute_forward_map_custom2(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + const ggml_tensor * b = dst->src[1]; + + struct ggml_map_custom2_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, b, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_map_custom3 + +void ggml_compute_forward_map_custom3(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + const ggml_tensor * b = dst->src[1]; + const ggml_tensor * c = dst->src[2]; + + struct ggml_map_custom3_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, b, c, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_custom + +void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + struct ggml_custom_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_cross_entropy_loss + +static void ggml_compute_forward_cross_entropy_loss_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); + GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); + GGML_ASSERT(ggml_are_same_shape(src0, src1)); + GGML_ASSERT(ggml_is_scalar(dst)); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + // TODO: handle transposed/permuted matrices + const int64_t nc = src0->ne[0]; + const int64_t nr = ggml_nrows(src0); + + const int ith = params->ith; + const int nth = params->nth; + + float * sums = (float *) params->wdata; + float * st = ((float *) params->wdata) + nth + ith * nc; + float sum_thread = 0.0f; + + GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + for (int64_t i1 = ir0; i1 < ir1; ++i1) { + const float * s0 = (const float *) ((const char *) src0->data + i1 * src0->nb[1]); + const float * s1 = (const float *) ((const char *) src1->data + i1 * src1->nb[1]); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(s0[i])); + assert(!isnan(s1[i])); + } +#endif + + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + const ggml_float sum_softmax = ggml_vec_log_soft_max_f32(nc, st, s0, max); + assert(sum_softmax >= 0.0); + + ggml_vec_add1_f32(nc, st, st, -sum_softmax); + ggml_vec_mul_f32(nc, st, st, s1); + + float sum_st = 0.0f; + ggml_vec_sum_f32(nc, &sum_st, st); + sum_thread += sum_st; + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + assert(!isnan(st[i])); + assert(!isinf(st[i])); + } +#endif + } + sums[ith] = sum_thread; + ggml_barrier(params->threadpool); + + if (ith == 0) { + float * dp = (float *) dst->data; + ggml_vec_sum_f32(nth, dp, sums); + dp[0] *= -1.0f / (float) nr; + } +} + +void ggml_compute_forward_cross_entropy_loss(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cross_entropy_loss_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cross_entropy_loss_back + +static void ggml_compute_forward_cross_entropy_loss_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; // gradient of forward pass output + const ggml_tensor * src0f = dst->src[1]; // src0 of forward pass + const ggml_tensor * src1f = dst->src[2]; // src1 of forward pass + + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_is_contiguous(src0f)); + GGML_ASSERT(ggml_is_contiguous(src1f)); + GGML_ASSERT(ggml_is_contiguous(grad)); + GGML_ASSERT(ggml_are_same_shape(src0f, src1f) && ggml_are_same_shape(src0f, dst)); + + const int64_t ith = params->ith; + const int64_t nth = params->nth; + + // TODO: handle transposed/permuted matrices + const int64_t nc = src0f->ne[0]; + const int64_t nr = ggml_nrows(src0f); + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + const float d_by_nr = ((const float *) grad->data)[0] / (float) nr; + + for (int64_t i1 = ir0; i1 < ir1; i1++) { + float * ds0 = (float *) ((char *) dst->data + i1 * dst->nb[1]); + const float * s0 = (const float *) ((const char *) src0f->data + i1 * src0f->nb[1]); + const float * s1 = (const float *) ((const char *) src1f->data + i1 * src1f->nb[1]); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(s0[i])); + assert(!isnan(s1[i])); + } +#endif + + // soft_max + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + const ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); + assert(sum > 0.0); + ggml_vec_scale_f32(nc, ds0, 1.0 / sum); + + // grad(src0f) = (softmax(src0f) - src1f) * grad(cross_entropy_loss(src0f, src1f)) / nr + ggml_vec_sub_f32(nc, ds0, ds0, s1); + ggml_vec_scale_f32(nc, ds0, d_by_nr); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + assert(!isnan(ds0[i])); + assert(!isinf(ds0[i])); + } +#endif + } +} + +void ggml_compute_forward_cross_entropy_loss_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cross_entropy_loss_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_opt_step_adamw_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src0_grad = dst->src[1]; + const ggml_tensor * src0_grad_m = dst->src[2]; + const ggml_tensor * src0_grad_v = dst->src[3]; + const ggml_tensor * adamw_params = dst->src[4]; + + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m)); + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v)); + GGML_ASSERT(ggml_nelements(adamw_params) == 7); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const float * adamw_params_ptr = ggml_get_data_f32(adamw_params); + + const float alpha = adamw_params_ptr[0]; + const float beta1 = adamw_params_ptr[1]; + const float beta2 = adamw_params_ptr[2]; + const float eps = adamw_params_ptr[3]; + const float wd = adamw_params_ptr[4]; + const float beta1h = adamw_params_ptr[5]; + const float beta2h = adamw_params_ptr[6]; + const float keep = 1.f - alpha * wd; + for (int ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; + + float * w = (float *) ((char *) src0->data + offset); // weight + const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad + float * m = (float *) ((char *) src0_grad_m->data + offset); + float * v = (float *) ((char *) src0_grad_v->data + offset); + + for (int i00 = 0; i00 < ne00; ++i00) { + m[i00] = m[i00] * beta1 + g[i00] * (1.0f - beta1); + v[i00] = v[i00] * beta2 + g[i00] * g[i00] * (1.0f - beta2); + + const float mh = m[i00] * beta1h; + const float vh = sqrtf(v[i00] * beta2h) + eps; + + // The weight decay is applied independently of the Adam momenta m and v. + // This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss. + // See: https://arxiv.org/pdf/1711.05101v3.pdf + w[i00] = w[i00] * keep - alpha * mh / vh; + } + } +} + +void ggml_compute_forward_opt_step_adamw(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_opt_step_adamw_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_opt_step_sgd_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src0_grad = dst->src[1]; + const ggml_tensor * sgd_params = dst->src[2]; + + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); + GGML_ASSERT(ggml_nelements(sgd_params) == 2); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // using adamw param subset we care about - alpha, wd - could have a separate struct + const float * sgd_params_ptr = ggml_get_data_f32(sgd_params); + const float alpha = sgd_params_ptr[0]; + const float keep = 1.f - alpha * sgd_params_ptr[1]; + + for (int ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; + + float * w = (float *) ((char *) src0->data + offset); // weight + const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad + + for (int i00 = 0; i00 < ne00; ++i00) { + w[i00] = w[i00] * keep - alpha * g[i00]; + } + } +} + +void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_opt_step_sgd_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error - sgd is F32 only"); + } + } +} + +#include "binary-ops.h" +#include "ggml-cpu.h" +#include "ggml-impl.h" +#include "ggml.h" +#include "ops.h" +#include "unary-ops.h" +#include "vec.h" + +#include +#include +#include +#include + +// ggml_compute_forward_dup + +static void ggml_compute_forward_dup_same_cont(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + GGML_ASSERT(src0->type == dst->type); + + const size_t nb0 = ggml_type_size(src0->type); + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by blocks + const int nk = ggml_nelements(src0) / ggml_blck_size(src0->type); + const int dr = (nk + nth - 1) / nth; + const int k0 = dr * ith; + const int k1 = MIN(k0 + dr, nk); + + if (k0 < k1) { + memcpy(((char *) dst->data + k0 * nb0), ((char *) src0->data + k0 * nb0), (k1 - k0) * nb0); + } +} + +template +static void ggml_compute_forward_dup_flt(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(!ggml_is_quantized(src0->type) && !ggml_is_quantized(dst->type)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // case: type & row size equal + if (src0->type == dst->type && ne00 == ne0 && nb00 == ggml_type_size(src0->type) && + nb0 == ggml_type_size(dst->type)) { + // copy by rows + const size_t rs = ne00 * nb00; + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ir0; i01 < ir1; i01++) { + memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); + } + } + } + return; + } + + // case: dst tensor is contiguous + if (ggml_is_contiguous(dst)) { + if (nb00 == sizeof(src_t)) { + if constexpr (std::is_same_v) { + // same type + size_t id = 0; + const size_t rs = ne00 * nb00; + char * dst_ptr = (char *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, rs); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + // casting between non-quantized types + size_t id = 0; + dst_t * dst_ptr = (dst_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const src_t * src0_ptr = + (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + for (int i00 = 0; i00 < ne00; i00++) { + float tmp = type_conversion_table::to_f32(src0_ptr[i00]); + dst_ptr[id] = type_conversion_table::from_f32(tmp); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } + } else { + //printf("%s: this is not optimal - fix me\n", __func__); + + size_t id = 0; + dst_t * dst_ptr = (dst_t *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += ne00 * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + const src_t * src0_ptr = + (src_t *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float tmp = type_conversion_table::to_f32(*src0_ptr); + dst_ptr[id] = type_conversion_table::from_f32(tmp); + id++; + } + } + id += ne00 * (ne01 - ir1); + } + } + } + return; + } + + // dst counters + int64_t i10 = 0; + int64_t i11 = 0; + int64_t i12 = 0; + int64_t i13 = 0; + + if constexpr (std::is_same_v) { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + memcpy(dst_ptr, src0_ptr, sizeof(dst_t)); + + if (++i10 == ne00) { + i10 = 0; + if (++i11 == ne01) { + i11 = 0; + if (++i12 == ne02) { + i12 = 0; + if (++i13 == ne03) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + + } else { + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + i10 += ne00 * ir0; + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + float tmp = type_conversion_table::to_f32(*(const src_t *) src0_ptr); + *(dst_t *) dst_ptr = type_conversion_table::from_f32(tmp); + + if (++i10 == ne0) { + i10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + i10 += ne00 * (ne01 - ir1); + while (i10 >= ne0) { + i10 -= ne0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + } +} + +template +static void ggml_compute_forward_dup_to_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(!ggml_is_quantized(src0->type)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (ggml_is_contiguous(dst) && nb00 == sizeof(src_t) && ggml_get_type_traits_cpu(dst->type)->from_float) { + // casting non-quantized types --> intermediate f32 --> quantized + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; + float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + size_t id = 0; + size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); + char * dst_ptr = (char *) dst->data; + + for (int i03 = 0; i03 < ne03; i03++) { + for (int i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int i01 = ir0; i01 < ir1; i01++) { + const src_t * src0_ptr = (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + for (int i00 = 0; i00 < ne00; i00++) { + src0_f32[i00] = type_conversion_table::to_f32(src0_ptr[i00]); + } + + quantize_row_q(src0_f32, dst_ptr + id, ne00); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + // printf("%s %s\n", ggml_type_name(src0->type), ggml_type_name(dst->type)); + GGML_ABORT("not implemented"); + } +} + +// A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy. +static void ggml_compute_forward_dup_bytes(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(src0->type == dst->type); + + GGML_TENSOR_UNARY_OP_LOCALS; + + if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) { + ggml_compute_forward_dup_same_cont(params, dst); + return; + } + + const size_t type_size = ggml_type_size(src0->type); + + const int ith = params->ith; // thread index + const int nth = params->nth; // number of threads + + // parallelize by rows + const int nr = ne01; + // number of rows per thread + const int dr = (nr + nth - 1) / nth; + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + if (src0->type == dst->type && ggml_are_same_shape(src0, dst) && nb00 == type_size && nb0 == type_size) { + // copy by rows + const size_t rs = ggml_row_size(src0->type, ne00); + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ir0; i01 < ir1; i01++) { + memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); + } + } + } + return; + } + + if (ggml_is_contiguous(dst)) { + size_t id = 0; + char * dst_ptr = (char *) dst->data; + const size_t rs = ne00 * type_size; + + if (nb00 == type_size) { + // src0 is contigous on first dimension, copy by rows + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int64_t i01 = ir0; i01 < ir1; i01++) { + const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, rs); + id += rs; + } + id += rs * (ne01 - ir1); + } + } + } else { + //printf("%s: this is not optimal - fix me\n", __func__); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + id += rs * ir0; + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + const char * src0_ptr = + (char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; + memcpy(dst_ptr + id, src0_ptr, type_size); + + id += type_size; + } + } + id += rs * (ne01 - ir1); + } + } + } + + return; + } + + // dst counters + int64_t k10 = 0; + int64_t i11 = 0; + int64_t i12 = 0; + int64_t i13 = 0; + + // number of blocks in a row + const int64_t nk00 = ne00 / ggml_blck_size(src0->type); + const int64_t nk0 = ne0 / ggml_blck_size(dst->type); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + k10 += nk00 * ir0; + while (k10 >= nk0) { + k10 -= nk0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + for (int64_t i01 = ir0; i01 < ir1; i01++) { + for (int64_t k00 = 0; k00 < nk00; k00++) { + const char * src0_ptr = ((char *) src0->data + k00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + char * dst_ptr = ((char *) dst->data + k10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); + + memcpy(dst_ptr, src0_ptr, type_size); + + if (++k10 == nk0) { + k10 = 0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } + k10 += nk00 * (ne01 - ir1); + while (k10 >= nk0) { + k10 -= nk0; + if (++i11 == ne1) { + i11 = 0; + if (++i12 == ne2) { + i12 = 0; + if (++i13 == ne3) { + i13 = 0; + } + } + } + } + } + } +} + +static void ggml_compute_forward_dup_from_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + size_t qk = ggml_blck_size(type); + const int64_t nr = ggml_nelements(src1) / qk; + + // destination must be contiguous in the first dimension + GGML_ASSERT(nb10 == ggml_type_size(dst->type)); + // must either have first dimension large enough to hold a row, or fully contiguous + GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + uint32_t i = ir * qk; + + const int64_t i03 = i / (ne00 * ne01 * ne02); + const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); + const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; + const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; + const int64_t x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; + + const int64_t i13 = i / (ne10 * ne11 * ne12); + const int64_t i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); + const int64_t i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; + const int64_t i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; + const int64_t dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; + + dequantize_row_q((const void *) ((char *) src0->data + x_offset), (float *) ((char *) dst->data + dst_offset), + qk); + } +} + +void ggml_compute_forward_dup(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (src0->type == dst->type) { + ggml_compute_forward_dup_bytes(params, dst); + return; + } + + switch (src0->type) { + case GGML_TYPE_F16: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_BF16: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_F32: + { + /**/ if (dst->type == GGML_TYPE_F16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_BF16) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else if (dst->type == GGML_TYPE_I32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + ggml_compute_forward_dup_to_q(params, dst); + } + } + break; + case GGML_TYPE_I32: + { + if (dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_flt(params, dst); + } else { + GGML_ABORT("not implemented"); + } + } + break; + default: + { + if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) { + ggml_compute_forward_dup_from_q(params, dst); + break; + } + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add + +static void ggml_compute_forward_add_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_type type = src0->type; + const ggml_type dtype = dst->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dtype)->from_float; + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == ggml_type_size(type)); + GGML_ASSERT(nb10 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ggml_is_quantized(src0->type)); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i03 = ir / (ne02 * ne01); + const int i02 = (ir - i03 * ne02 * ne01) / ne01; + const int i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + // src1 and dst are same shape as src0 => same indices + const int i13 = i03; + const int i12 = i02; + const int i11 = i01; + + const int i3 = i03; + const int i2 = i02; + const int i1 = i01; + + void * src0_row = (void *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * src1_row = (float *) ((char *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13)); + void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + assert(ne00 % 32 == 0); + + // unquantize row from src0 to temp buffer + dequantize_row_q(src0_row, wdata, ne00); + // add src1 + ggml_vec_acc_f32(ne00, wdata, src1_row); + // quantize row to dst + if (quantize_row_q != NULL) { + quantize_row_q(wdata, dst_row, ne00); + } else { + memcpy(dst_row, wdata, ne0 * nb0); + } + } +} + +void ggml_compute_forward_add(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + { + ggml_compute_forward_add_non_quantized(params, dst); + } + break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_add_q_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add_id + +static void ggml_compute_forward_add_id_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src2->type == GGML_TYPE_I32); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_TERNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + // src1 indices + const int i11 = *(int32_t *) ((char *) src2->data + i1 * nb20 + i2 * nb21); + + GGML_ASSERT(i11 >= 0 && i11 < ne11); + + ggml_vec_add_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), + (float *) ((char *) src1->data + i11 * nb11)); + } +} + +void ggml_compute_forward_add_id(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add_id_f32(params, dst); + } + break; + default: + { + GGML_ABORT("unsupported type for ggml_compute_forward_add_id: %s", ggml_type_name(src0->type)); + } + } +} + +// ggml_compute_forward_add1 + +static void ggml_compute_forward_add1_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + +#ifdef GGML_USE_ACCELERATE + GGML_UNUSED(ggml_vec_add1_f32); + + vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), 1, + (float *) ((char *) src1->data), 0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + 1, ne0); +#else + ggml_vec_add1_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), *(float *) src1->data); +#endif + } +} + +static void ggml_compute_forward_add1_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_f16_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F16); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(type)->from_float; + + // we don't support permuted src0 + GGML_ASSERT(nb00 == ggml_type_size(type)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ggml_is_quantized(src0->type)); + GGML_ASSERT(dst->type == src0->type); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + void * src0_row = (void *) ((char *) src0->data + (i1 * nb01 + i2 * nb02 + i3 * nb03)); + void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb0)); + + assert(ne0 % 32 == 0); + + // unquantize row from src0 to temp buffer + dequantize_row_q(src0_row, wdata, ne0); + // add src1 + ggml_vec_acc1_f32(ne0, wdata, v); + // quantize row to dst + quantize_row_q(wdata, dst_row, ne0); + } +} + +static void ggml_compute_forward_add1_bf16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = *(float *) src1->data; + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +static void ggml_compute_forward_add1_bf16_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_scalar(src1)); + + // scalar to add + const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(src0->type == GGML_TYPE_BF16); + GGML_ASSERT(src1->type == GGML_TYPE_BF16); + GGML_ASSERT(dst->type == GGML_TYPE_BF16); + + GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are same shape => same indices + const int i3 = ir / (ne2 * ne1); + const int i2 = (ir - i3 * ne2 * ne1) / ne1; + const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + for (int i = 0; i < ne0; i++) { + dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); + } + } +} + +void ggml_compute_forward_add1(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add1_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + if (src1->type == GGML_TYPE_F16) { + ggml_compute_forward_add1_f16_f16(params, dst); + } else if (src1->type == GGML_TYPE_F32) { + ggml_compute_forward_add1_f16_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } + } + break; + case GGML_TYPE_BF16: + { + if (src1->type == GGML_TYPE_BF16) { + ggml_compute_forward_add1_bf16_bf16(params, dst); + } else if (src1->type == GGML_TYPE_F32) { + ggml_compute_forward_add1_bf16_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } + } + break; + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_add1_q_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_acc + +static void ggml_compute_forward_acc_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during acc + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during acc + const size_t nb0 = ggml_element_size(src0); + + const size_t nb00 = nb0; + const size_t nb01 = nb1; + const size_t nb02 = nb2; + const size_t nb03 = nb3; + + GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb0 + (ne11 == 0 ? 0 : ne11 - 1) * nb1 + + (ne12 == 0 ? 0 : ne12 - 1) * nb2 + (ne13 == 0 ? 0 : ne13 - 1) * nb3 < + ggml_nbytes(dst)); + GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb00 + (ne11 == 0 ? 0 : ne11 - 1) * nb01 + + (ne12 == 0 ? 0 : ne12 - 1) * nb02 + (ne13 == 0 ? 0 : ne13 - 1) * nb03 < + ggml_nbytes(src0)); + + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + +#ifdef GGML_USE_ACCELERATE + vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), 1, + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11), 1, + (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), 1, nc); +#else + ggml_vec_add_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); +#endif + } +} + +void ggml_compute_forward_acc(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_acc_f32(params, dst); + } + break; + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_sum + +static void ggml_compute_forward_sum_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + assert(src0->nb[0] == sizeof(float)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + ggml_float sum = 0; + ggml_float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f32_ggf(ne00, &row_sum, + (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((float *) dst->data)[0] = sum; +} + +static void ggml_compute_forward_sum_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + + assert(src0->nb[0] == sizeof(ggml_fp16_t)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + float sum = 0; + float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f16_ggf(ne00, &row_sum, + (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); +} + +static void ggml_compute_forward_sum_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_scalar(dst)); + + assert(src0->nb[0] == sizeof(ggml_bf16_t)); + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) + + float sum = 0; + float row_sum = 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_bf16_ggf(ne00, &row_sum, + (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + sum += row_sum; + } + } + } + ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum); +} + +void ggml_compute_forward_sum(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sum_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_sum_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_sum_bf16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cumsum + +static void ggml_compute_forward_cumsum_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne01); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + const auto [ir0, ir1] = get_thread_range(params, src0); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + float * src_row = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * dst_row = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + ggml_vec_cumsum_f32(ne00, dst_row, src_row); + } +} + +void ggml_compute_forward_cumsum(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cumsum_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_sum_rows + +static void ggml_compute_forward_sum_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne0 == 1); + GGML_ASSERT(ne1 == ne01); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + for (int64_t i3 = 0; i3 < ne03; i3++) { + for (int64_t i2 = 0; i2 < ne02; i2++) { + for (int64_t i1 = 0; i1 < ne01; i1++) { + float * src_row = (float *) ((char *) src0->data + i1 * nb01 + i2 * nb02 + i3 * nb03); + float * dst_row = (float *) ((char *) dst->data + i1 * nb1 + i2 * nb2 + i3 * nb3); + float row_sum = 0; + ggml_vec_sum_f32(ne00, &row_sum, src_row); + dst_row[0] = row_sum; + } + } + } +} + +void ggml_compute_forward_sum_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_sum_rows_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_mean + +static void ggml_compute_forward_mean_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(src0->nb[0] == sizeof(float)); + + GGML_TENSOR_UNARY_OP_LOCALS + + assert(ne0 == 1); + assert(ne1 == ne01); + assert(ne2 == ne02); + assert(ne3 == ne03); + + GGML_UNUSED(ne0); + GGML_UNUSED(ne1); + GGML_UNUSED(ne2); + GGML_UNUSED(ne3); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), + (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); + + *(float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3) /= (float) ne00; + } + } + } +} + +void ggml_compute_forward_mean(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_mean_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_argmax + +static void ggml_compute_forward_argmax_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(src0->nb[0] == sizeof(float)); + assert(dst->nb[0] == sizeof(float)); + + const int64_t ne00 = src0->ne[0]; + const int64_t ne01 = src0->ne[1]; + + const size_t nb01 = src0->nb[1]; + const size_t nb0 = dst->nb[0]; + + for (int64_t i1 = 0; i1 < ne01; i1++) { + float * src = (float *) ((char *) src0->data + i1 * nb01); + int32_t * dst_ = (int32_t *) ((char *) dst->data + i1 * nb0); + int v = 0; + ggml_vec_argmax_f32(ne00, &v, src); + dst_[0] = v; + } +} + +void ggml_compute_forward_argmax(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_argmax_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_count_equal + +static void ggml_compute_forward_count_equal_i32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS; + + GGML_ASSERT(src0->type == GGML_TYPE_I32); + GGML_ASSERT(src1->type == GGML_TYPE_I32); + GGML_ASSERT(ggml_are_same_shape(src0, src1)); + GGML_ASSERT(ggml_is_scalar(dst)); + GGML_ASSERT(dst->type == GGML_TYPE_I64); + + const int64_t nr = ggml_nrows(src0); + + const int ith = params->ith; + const int nth = params->nth; + + int64_t * sums = (int64_t *) params->wdata; + int64_t sum_thread = 0; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne03) / ne01; + const int64_t i01 = ir - i03 * ne03 - i02 * ne02; + + const char * data0 = (const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01; + const char * data1 = (const char *) src1->data + i03 * nb13 + i02 * nb12 + i01 * nb11; + + for (int64_t i00 = 0; i00 < ne00; ++i00) { + const int32_t val0 = *((const int32_t *) (data0 + i00 * nb00)); + const int32_t val1 = *((const int32_t *) (data1 + i00 * nb10)); + + sum_thread += val0 == val1; + } + } + if (ith != 0) { + sums[ith] = sum_thread; + } + ggml_barrier(params->threadpool); + + if (ith != 0) { + return; + } + + for (int ith_other = 1; ith_other < nth; ++ith_other) { + sum_thread += sums[ith_other]; + } + *((int64_t *) dst->data) = sum_thread; +} + +void ggml_compute_forward_count_equal(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_I32: + { + ggml_compute_forward_count_equal_i32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_repeat + +static void ggml_compute_forward_repeat_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(src0, dst)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne0 / ne00); + const int nr1 = (int) (ne1 / ne01); + const int nr2 = (int) (ne2 / ne02); + const int nr3 = (int) (ne3 / ne03); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne03; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne02; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne01; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_vec_cpy_f32( + ne00, + (float *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + (i2 * ne02 + k2) * nb2 + + (i1 * ne01 + k1) * nb1 + (i0 * ne00) * nb0), + (float *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01)); + } + } + } + } + } + } + } +} + +static void ggml_compute_forward_repeat_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(src0, dst)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne0 / ne00); + const int nr1 = (int) (ne1 / ne01); + const int nr2 = (int) (ne2 / ne02); + const int nr3 = (int) (ne3 / ne03); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne03; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne02; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne01; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + + (i2 * ne02 + k2) * nb2 + (i1 * ne01 + k1) * nb1 + + (i0 * ne00) * nb0); + ggml_fp16_t * x = + (ggml_fp16_t *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01); + // ggml_vec_cpy_f16(ne00, y, x) + for (int i = 0; i < ne00; ++i) { + y[i] = x[i]; + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_repeat(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_I16: + { + ggml_compute_forward_repeat_f16(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_repeat_f32(params, dst); + } + break; + // TODO: templateify the implemenation and support for I64 + // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 + //case GGML_TYPE_I64: + // { + // ggml_compute_forward_repeat_i64(params, dst); + // } break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_repeat_back + +static void ggml_compute_forward_repeat_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_can_repeat(dst, src0)); + + GGML_TENSOR_UNARY_OP_LOCALS + + // guaranteed to be an integer due to the check in ggml_can_repeat + const int nr0 = (int) (ne00 / ne0); + const int nr1 = (int) (ne01 / ne1); + const int nr2 = (int) (ne02 / ne2); + const int nr3 = (int) (ne03 / ne3); + + // TODO: support for transposed / permuted tensors + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + if (ggml_is_contiguous(dst)) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } else { + for (int k3 = 0; k3 < ne3; k3++) { + for (int k2 = 0; k2 < ne2; k2++) { + for (int k1 = 0; k1 < ne1; k1++) { + ggml_vec_set_f32(ne0, (float *) ((char *) dst->data + k1 * nb1 + k2 * nb2 + k3 * nb3), 0); + } + } + } + } + + // TODO: maybe this is not optimal? + for (int i3 = 0; i3 < nr3; i3++) { + for (int k3 = 0; k3 < ne3; k3++) { + for (int i2 = 0; i2 < nr2; i2++) { + for (int k2 = 0; k2 < ne2; k2++) { + for (int i1 = 0; i1 < nr1; i1++) { + for (int k1 = 0; k1 < ne1; k1++) { + for (int i0 = 0; i0 < nr0; i0++) { + ggml_vec_acc_f32( + ne0, (float *) ((char *) dst->data + (k3) *nb3 + (k2) *nb2 + (k1) *nb1), + (float *) ((char *) src0->data + (i3 * ne3 + k3) * nb03 + (i2 * ne2 + k2) * nb02 + + (i1 * ne1 + k1) * nb01 + (i0 * ne0) * nb00)); + } + } + } + } + } + } + } +} + +void ggml_compute_forward_repeat_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_repeat_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_concat + +static void ggml_compute_forward_concat_any(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + const size_t len = ggml_type_size(src0->type); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const char * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + (i3) *nb03; + } else { + x = (const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + (i2 - o[2]) * nb12 + + (i3 - o[3]) * nb13; + } + + char * y = (char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3; + + memcpy(y, x, len); + } + } + } + } +} + +static void ggml_compute_forward_concat_i8(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(int8_t)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const int8_t * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const int8_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const int8_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + int8_t * y = (int8_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +static void ggml_compute_forward_concat_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(ggml_fp16_t)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const ggml_fp16_t * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const ggml_fp16_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const ggml_fp16_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +static void ggml_compute_forward_concat_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_type_size(src0->type) == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int32_t dim = ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(dim >= 0 && dim < 4); + + int64_t o[4] = { 0, 0, 0, 0 }; + o[dim] = src0->ne[dim]; + + const float * x; + + // TODO: smarter multi-theading + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = ith; i2 < ne2; i2 += nth) { + for (int i1 = 0; i1 < ne1; i1++) { + for (int i0 = 0; i0 < ne0; i0++) { + if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { + x = (const float *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + + (i3) *nb03); + } else { + x = (const float *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + + (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); + } + + float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } +} + +void ggml_compute_forward_concat(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_I16: + { + ggml_compute_forward_concat_f16(params, dst); + } + break; + case GGML_TYPE_I8: + { + ggml_compute_forward_concat_i8(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_concat_f32(params, dst); + } + break; + default: + { + ggml_compute_forward_concat_any(params, dst); + } + } +} + +// ggml_compute_forward_gelu + +static void ggml_compute_forward_gelu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_fill + +static void ggml_compute_forward_fill_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const float c = ggml_get_op_params_f32(dst, 0); + + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); + GGML_TENSOR_LOCALS(size_t, nb, dst, nb); + + const auto [ir0, ir1] = get_thread_range(params, dst); + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne2 * ne1); + const int64_t i02 = (ir - i03 * ne2 * ne1) / ne1; + const int64_t i01 = (ir - i03 * ne2 * ne1 - i02 * ne1); + + float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); + + ggml_vec_set_f32(ne0, dst_ptr, c); + } +} + +void ggml_compute_forward_fill(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_fill_f32(params, dst); +} + +// ggml_compute_tri + +static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0); + + GGML_ASSERT(ggml_is_contiguous(src0)); + + GGML_TENSOR_UNARY_OP_LOCALS + + const auto [ir0, ir1] = get_thread_range(params, src0); + + bool (*bipred)(int, int); + + switch (ttype) { + case GGML_TRI_TYPE_LOWER: + bipred = [](int i, int r) { + return i < r; + }; + break; + case GGML_TRI_TYPE_LOWER_DIAG: + bipred = [](int i, int r) { + return i <= r; + }; + break; + case GGML_TRI_TYPE_UPPER: + bipred = [](int i, int r) { + return i > r; + }; + break; + case GGML_TRI_TYPE_UPPER_DIAG: + bipred = [](int i, int r) { + return i >= r; + }; + break; + default: + GGML_ABORT("invalid tri type"); + } + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const float * src_ptr = (const float *) ((const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01); + float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); + + for (int i0 = 0; i0 < ne0; ++i0) { + dst_ptr[i0] = bipred(i0, i01) ? src_ptr[i0] : 0.0f; + } + } +} + +void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_tri_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gelu_erf + +static void ggml_compute_forward_gelu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_erf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_erf_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_erf_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gelu_quick + +static void ggml_compute_forward_gelu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_gelu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_gelu_quick(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gelu_quick_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_gelu_quick_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_silu + +static void ggml_compute_forward_silu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_silu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_silu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_silu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_silu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_leaky_relu + +static void ggml_compute_forward_leaky_relu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + float negative_slope; + memcpy(&negative_slope, dst->op_params, sizeof(float)); + + assert(dst->nb[0] == sizeof(float)); + assert(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < n; i++) { + ggml_vec_leaky_relu_f32(nc, (float *) ((char *) dst->data + i * (dst->nb[1])), + (float *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); + } +} + +static void ggml_compute_forward_leaky_relu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + assert(ggml_is_contiguous_1(src0)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src0, dst)); + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + float negative_slope; + memcpy(&negative_slope, dst->op_params, sizeof(float)); + + assert(dst->nb[0] == sizeof(ggml_fp16_t)); + assert(src0->nb[0] == sizeof(ggml_fp16_t)); + + for (int i = 0; i < n; i++) { + ggml_vec_leaky_relu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); + } +} + +void ggml_compute_forward_leaky_relu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_leaky_relu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_leaky_relu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_silu_back + +static void ggml_compute_forward_silu_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + assert(ggml_is_contiguous_1(grad)); + assert(ggml_is_contiguous_1(src1)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src1, dst)); + assert(ggml_are_same_shape(src1, grad)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; + const int nr = ggml_nrows(src1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_backward_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), + (float *) ((char *) src1->data + i1 * (src1->nb[1])), + (float *) ((char *) grad->data + i1 * (grad->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_silu_back_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + assert(ggml_is_contiguous_1(grad)); + assert(ggml_is_contiguous_1(src1)); + assert(ggml_is_contiguous_1(dst)); + assert(ggml_are_same_shape(src1, dst)); + assert(ggml_are_same_shape(src1, grad)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; + const int nr = ggml_nrows(src1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_silu_backward_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), + (ggml_fp16_t *) ((char *) src1->data + i1 * (src1->nb[1])), + (ggml_fp16_t *) ((char *) grad->data + i1 * (grad->nb[1]))); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_CPU_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +void ggml_compute_forward_silu_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_silu_back_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_silu_back_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_reglu + +static void ggml_compute_forward_reglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_reglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_reglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_reglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_reglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu + +static void ggml_compute_forward_geglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu + +static void ggml_compute_forward_swiglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_swiglu_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_swiglu_oai + +static void ggml_compute_forward_swiglu_oai_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + const float alpha = ggml_get_op_params_f32(dst, 2); + const float limit = ggml_get_op_params_f32(dst, 3); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + float * dst_p = (float *) ((char *) dst->data + i1 * (dst->nb[1])); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + for (int k = 0; k < nc; k++) { + const float x = std::min(src0_p[k], limit); + const float y = std::clamp(src1_p[k], -limit, limit); + const float out_glu = x / (1.f + expf(alpha * (-x))); + dst_p[k] = out_glu * (y + 1.f); + } + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = dst_p[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_swiglu_oai(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_swiglu_oai_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu_erf + +static void ggml_compute_forward_geglu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_erf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_erf_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_erf_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_geglu_quick + +static void ggml_compute_forward_geglu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * src0_p = (float *) (src0_d + i1 * src0_o); + float * src1_p = (float *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + GGML_UNUSED(x); + assert(!isnan(x)); + assert(!isinf(x)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + char * src0_d = (char *) src0->data; + char * src1_d = (char *) (src1 ? src1->data : src0->data); + const size_t src0_o = src0->nb[1]; + const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; + + GGML_ASSERT(ggml_is_contiguous_1(src0)); + GGML_ASSERT(ggml_is_contiguous_1(dst)); + + if (src1) { + GGML_ASSERT(ggml_is_contiguous_1(src1)); + GGML_ASSERT(src0->type == src1->type); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; + const int nr = ggml_nrows(src0); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(ggml_nrows(dst) == nr); + + const int32_t swapped = ggml_get_op_params_i32(dst, 1); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); + ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); + + if (!src1) { + src0_p += swapped ? nc : 0; + src1_p += swapped ? 0 : nc; + } + + ggml_vec_geglu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); + +#ifndef NDEBUG + for (int k = 0; k < nc; k++) { + const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; + const float v = GGML_FP16_TO_FP32(x); + GGML_UNUSED(v); + assert(!isnan(v)); + assert(!isinf(v)); + } +#endif + } +} + +static void ggml_compute_forward_geglu_quick(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_geglu_quick_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_geglu_quick_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_norm + +static void ggml_compute_forward_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float sum = 0.0; + ggml_vec_sum_f32(ne00, &sum, x); + float mean = sum / ne00; + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + float variance = 0; + +#ifdef GGML_USE_ACCELERATE + mean = -mean; + vDSP_vsadd(x, 1, &mean, y, 1, ne00); + vDSP_measqv(y, 1, &variance, ne00); +#else + variance = ggml_vec_cvar_f32(ne00, y, x, mean); +#endif //GGML_USE_ACCELERATE + + const float scale = 1.0f / sqrtf(variance + eps); + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_group_rms_norm + +static void ggml_compute_forward_rms_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sum = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum += (ggml_float) (x[i00] * x[i00]); + } + + const float mean = sum / ne00; + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + memcpy(y, x, ne00 * sizeof(float)); + // for (int i00 = 0; i00 < ne00; i00++) { + // y[i00] = x[i00]; + // } + + const float scale = 1.0f / sqrtf(mean + eps); + + // if you hit this, likely you got an inf somewhere earlier + assert(scale > 0.0f); + + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_rms_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rms_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_rms_norm_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // gradients from forward pass output + const ggml_tensor * src1 = dst->src[1]; // src1 from forward pass + + GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_BINARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + // src1 is same shape as src0 => same indices + const int64_t i11 = i01; + const int64_t i12 = i02; + const int64_t i13 = i03; + + const float * dz = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + const float * x = (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13); + + ggml_float sum_xx = 0.0; + ggml_float sum_xdz = 0.0; + + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum_xx += (ggml_float) (x[i00] * x[i00]); + sum_xdz += (ggml_float) (x[i00] * dz[i00]); + } + + //const float mean = (float)(sum_xx)/ne00; + const float mean_eps = (float) (sum_xx) / ne00 + eps; + const float sum_eps = (float) (sum_xx) + eps * ne00; + //const float mean_xdz = (float)(sum_xdz)/ne00; + // we could cache rms from forward pass to improve performance. + // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms. + //const float rms = sqrtf(mean_eps); + const float rrms = 1.0f / sqrtf(mean_eps); + //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3) + + { + // z = rms_norm(x) + // + // rms_norm(src1) = + // scale( + // src1, + // div( + // 1, + // sqrt( + // add( + // scale( + // sum( + // sqr( + // src1)), + // (1.0/N)), + // eps)))); + + // postorder: + // ## op args grad + // 00 param src1 grad[#00] + // 01 const 1 + // 02 sqr (#00) grad[#02] + // 03 sum (#02) grad[#03] + // 04 const 1/N + // 05 scale (#03, #04) grad[#05] + // 06 const eps + // 07 add (#05, #06) grad[#07] + // 08 sqrt (#07) grad[#08] + // 09 div (#01,#08) grad[#09] + // 10 scale (#00,#09) grad[#10] + // + // backward pass, given grad[#10] + // #10: scale + // grad[#00] += scale(grad[#10],#09) + // grad[#09] += sum(mul(grad[#10],#00)) + // #09: div + // grad[#08] += neg(mul(grad[#09], div(#09,#08))) + // #08: sqrt + // grad[#07] += mul(grad[#08], div(0.5, #08)) + // #07: add + // grad[#05] += grad[#07] + // #05: scale + // grad[#03] += scale(grad[#05],#04) + // #03: sum + // grad[#02] += repeat(grad[#03], #02) + // #02: + // grad[#00] += scale(mul(#00, grad[#02]), 2.0) + // + // substitute and simplify: + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) + // grad[#02] = repeat(grad[#03], #02) + // grad[#02] = repeat(scale(grad[#05],#04), #02) + // grad[#02] = repeat(scale(grad[#07],#04), #02) + // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02) + // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02) + // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02) + // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02) + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0) + // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N))) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps)) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps))) + // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps)) + // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps)) + // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps)) + // a = b*c + d*e + // a = b*c*f/f + d*e*f/f + // a = (b*c*f + d*e*f)*(1/f) + // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c)) + // a = (b + d*e/c)*c + // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps) + // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms + // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms + // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms + // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms + // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms + // a = (dz + x*div(-mean_xdz,mean_eps))*rrms + // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms) + // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + } + // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) + // post-order: + // dx := x + // dx := scale(dx,-mean_xdz/mean_eps) + // dx := add(dx, dz) + // dx := scale(dx, rrms) + float * dx = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + // dx[i00] = (x*(-sum_xdz/sum_eps) + dz) / sqrtf(mean_eps) + ggml_vec_cpy_f32(ne00, dx, x); + // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); + ggml_vec_scale_f32(ne00, dx, (float) (-sum_xdz) / sum_eps); + ggml_vec_acc_f32(ne00, dx, dz); + ggml_vec_scale_f32(ne00, dx, rrms); + } + } + } +} + +void ggml_compute_forward_rms_norm_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rms_norm_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_group_norm + +static void ggml_compute_forward_group_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + // TODO: optimize + + float eps; + memcpy(&eps, dst->op_params + 1, sizeof(float)); + + int n_channels = src0->ne[2]; + int n_groups = dst->op_params[0]; + int n_channels_per_group = (n_channels + n_groups - 1) / n_groups; + for (int i = ith; i < n_groups; i += nth) { + int start = i * n_channels_per_group; + int end = start + n_channels_per_group; + if (end > n_channels) { + end = n_channels; + } + int step = end - start; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + ggml_float sum = 0.0; + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sumr = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sumr += (ggml_float) x[i00]; + } + sum += sumr; + } + } + const float mean = sum / (ne00 * ne01 * step); + + ggml_float sum2 = 0.0; + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + ggml_float sumr = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + float v = x[i00] - mean; + y[i00] = v; + sumr += (ggml_float) (v * v); + } + sum2 += sumr; + } + } + const float variance = sum2 / (ne00 * ne01 * step); + const float scale = 1.0f / sqrtf(variance + eps); + + for (int64_t i02 = start; i02 < end; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + ggml_vec_scale_f32(ne00, y, scale); + } + } + } + } +} + +void ggml_compute_forward_group_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_group_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_l2_norm + +static void ggml_compute_forward_l2_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float eps; + memcpy(&eps, dst->op_params, sizeof(float)); + + GGML_ASSERT(eps >= 0.0f); + + // TODO: optimize + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + + ggml_float sum = 0.0; + for (int64_t i00 = 0; i00 < ne00; i00++) { + sum += (ggml_float) (x[i00] * x[i00]); + } + + float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + memcpy(y, x, ne00 * sizeof(float)); + + const float scale = 1.0f / fmaxf(sqrtf(sum), eps); + + ggml_vec_scale_f32(ne00, y, scale); + } + } + } +} + +void ggml_compute_forward_l2_norm(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_l2_norm_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_out_prod + +static void ggml_compute_forward_out_prod_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + GGML_ASSERT(ne2 % ne02 == 0); + GGML_ASSERT(ne3 % ne03 == 0); + + // we don't support permuted src0 or src1 + GGML_ASSERT(nb00 == sizeof(float)); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + + if (ith == 0) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } + ggml_barrier(params->threadpool); + + // dst[:,:,:,:] = 0 + // for i2,i3: + // for i1: + // for i01: + // for i0: + // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] + + // parallelize by last three dimensions + + // total rows in dst + const int64_t nr = ne1 * ne2 * ne3; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + // block-tiling attempt + const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32); + const int64_t blck_1 = 16; + + // dps == dst per src0, used for group query attention + const int64_t dps2 = ne2 / ne02; + const int64_t dps3 = ne3 / ne03; + + for (int64_t bir = ir0; bir < ir1; bir += blck_1) { + const int64_t bir1 = MIN(bir + blck_1, ir1); + for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) { + const int64_t bne01 = MIN(bi01 + blck_0, ne01); + for (int64_t ir = bir; ir < bir1; ++ir) { + // dst indices + const int64_t i3 = ir / (ne2 * ne1); + const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; + const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + const int64_t i02 = i2 / dps2; + const int64_t i03 = i3 / dps3; + + //const int64_t i10 = i1; + const int64_t i12 = i2; + const int64_t i13 = i3; + +#if GGML_VEC_MAD_UNROLL > 2 + const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL); + for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); + } + for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32(ne0, d, s0, *s1); + } +#else + for (int64_t i01 = bi01; i01 < bne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + ggml_vec_mad_f32(ne0, d, s0, *s1); + } +#endif + } + } + } +} + +static void ggml_compute_forward_out_prod_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int ith = params->ith; + const int nth = params->nth; + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + GGML_ASSERT(ne02 == ne12); + GGML_ASSERT(ne03 == ne13); + GGML_ASSERT(ne2 == ne12); + GGML_ASSERT(ne3 == ne13); + + // we don't support permuted src0 dim0 + GGML_ASSERT(nb00 == ggml_type_size(type)); + + // dst dim0 cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + // GGML_ASSERT(nb0 <= nb1); + // GGML_ASSERT(nb1 <= nb2); + // GGML_ASSERT(nb2 <= nb3); + + GGML_ASSERT(ne0 == ne00); + GGML_ASSERT(ne1 == ne10); + GGML_ASSERT(ne2 == ne02); + GGML_ASSERT(ne3 == ne03); + + // nb01 >= nb00 - src0 is not transposed + // compute by src0 rows + + if (ith == 0) { + ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); + } + ggml_barrier(params->threadpool); + + // parallelize by last three dimensions + + // total rows in dst + const int64_t nr = ne1 * ne2 * ne3; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + // dst[:,:,:,:] = 0 + // for i2,i3: + // for i1: + // for i01: + // for i0: + // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] + + float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + + for (int64_t ir = ir0; ir < ir1; ++ir) { + // dst indices + const int64_t i3 = ir / (ne2 * ne1); + const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; + const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); + + const int64_t i02 = i2; + const int64_t i03 = i3; + + //const int64_t i10 = i1; + const int64_t i12 = i2; + const int64_t i13 = i3; + + for (int64_t i01 = 0; i01 < ne01; ++i01) { + const int64_t i11 = i01; + + float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); + float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + dequantize_row_q(s0, wdata, ne0); + ggml_vec_mad_f32(ne0, d, wdata, *s1); + } + } +} + +void ggml_compute_forward_out_prod(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_out_prod_q_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + GGML_ABORT("fatal error"); // todo + // ggml_compute_forward_out_prod_f16_f32(params, dst); + } + case GGML_TYPE_F32: + { + ggml_compute_forward_out_prod_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_scale + +static void ggml_compute_forward_scale_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + + float s; // scale factor + float b; // bias + + memcpy(&s, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&b, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const size_t nb01 = src0->nb[1]; + + const size_t nb1 = dst->nb[1]; + + if (b == 0.0f) { + for (int i1 = ir0; i1 < ir1; i1++) { + if (dst->data != src0->data) { + // src0 is same shape as dst => same indices + // TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy + memcpy((char *) dst->data + i1 * nb1, (char *) src0->data + i1 * nb01, nc * sizeof(float)); + } + ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1 * nb1), s); + } + } else { + for (int i1 = ir0; i1 < ir1; i1++) { + ggml_vec_mad1_f32(nc, (float *) ((char *) dst->data + i1 * nb1), (float *) ((char *) src0->data + i1 * nb1), + s, b); + } + } +} + +void ggml_compute_forward_scale(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_scale_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_set + +static void ggml_compute_forward_set_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during set + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during set + const size_t nb0 = ggml_element_size(src0); + + const int im0 = (ne10 == 0 ? 0 : ne10 - 1); + const int im1 = (ne11 == 0 ? 0 : ne11 - 1); + const int im2 = (ne12 == 0 ? 0 : ne12 - 1); + const int im3 = (ne13 == 0 ? 0 : ne13 - 1); + + GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); + + GGML_ASSERT(nb10 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + + ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); + } +} + +static void ggml_compute_forward_set_i32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + + // view src0 and dst with these strides and data offset inbytes during set + // nb0 is implicitly element_size because src0 and dst are contiguous + size_t nb1 = ((int32_t *) dst->op_params)[0]; + size_t nb2 = ((int32_t *) dst->op_params)[1]; + size_t nb3 = ((int32_t *) dst->op_params)[2]; + size_t offset = ((int32_t *) dst->op_params)[3]; + bool inplace = (bool) ((int32_t *) dst->op_params)[4]; + + if (!inplace) { + if (params->ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src1); + const int nc = src1->ne[0]; + + GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) + GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) + + // src0 and dst as viewed during set + const size_t nb0 = ggml_element_size(src0); + + const int im0 = (ne10 == 0 ? 0 : ne10 - 1); + const int im1 = (ne11 == 0 ? 0 : ne11 - 1); + const int im2 = (ne12 == 0 ? 0 : ne12 - 1); + const int im3 = (ne13 == 0 ? 0 : ne13 - 1); + + GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); + + GGML_ASSERT(nb10 == sizeof(int32_t)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int ir = ir0; ir < ir1; ++ir) { + // src0 and dst are viewed with shape of src1 and offset + // => same indices + const int i3 = ir / (ne12 * ne11); + const int i2 = (ir - i3 * ne12 * ne11) / ne11; + const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); + + ggml_vec_cpy_i32(nc, (int32_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), + (int32_t *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); + } +} + +void ggml_compute_forward_set(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_set_f32(params, dst); + } + break; + case GGML_TYPE_I32: + { + ggml_compute_forward_set_i32(params, dst); + } + break; + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cpy + +void ggml_compute_forward_cpy(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_dup(params, dst); +} + +// ggml_compute_forward_cont + +void ggml_compute_forward_cont(const ggml_compute_params * params, ggml_tensor * dst) { + ggml_compute_forward_dup(params, dst); +} + +// ggml_compute_forward_get_rows + +static void ggml_compute_forward_get_rows_q(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + const ggml_type type = src0->type; + const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == ggml_type_size(type)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + dequantize_row_q((const void *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(ggml_fp16_t)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_cpu_fp16_to_fp32((const ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_bf16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(ggml_bf16_t)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_cpu_bf16_to_fp32((const ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), + (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); + } +} + +static void ggml_compute_forward_get_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ggml_nelements(src1); + + assert(ne0 == nc); + assert(ne02 == ne11); + assert(nb00 == sizeof(float)); + assert(ggml_nrows(dst) == nr); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i / (ne11 * ne10); + const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; + const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); + const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i01 >= 0 && i01 < ne01); + + ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), + (float *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03)); + } +} + +void ggml_compute_forward_get_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + { + ggml_compute_forward_get_rows_q(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_get_rows_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + { + ggml_compute_forward_get_rows_bf16(params, dst); + } + break; + case GGML_TYPE_F32: + case GGML_TYPE_I32: + { + ggml_compute_forward_get_rows_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } + + //static bool first = true; + //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + //if (first) { + // first = false; + //} else { + // for (int k = 0; k < dst->ne[1]; ++k) { + // for (int j = 0; j < dst->ne[0]/16; ++j) { + // for (int i = 0; i < 16; ++i) { + // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + // } + // printf("\n"); + // } + // printf("\n"); + // } + // printf("\n"); + // exit(0); + //} +} + +template +static void ggml_compute_forward_set_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const int64_t nc = ne00; + const int64_t nr = ne01; + + assert(ne0 == nc); + assert(ne2 == ne02); + assert(ne3 == ne03); + assert(src0->type == GGML_TYPE_F32); + assert(ne02 % ne11 == 0); + assert(ne03 % ne12 == 0); + + const int ith = params->ith; + const int nth = params->nth; + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = std::min(ir0 + dr, nr); + + const ggml_from_float_t from_float = ggml_get_type_traits_cpu(dst->type)->from_float; + + for (int64_t i03 = 0; i03 < ne03; ++i03) { + for (int64_t i02 = 0; i02 < ne02; ++i02) { + for (int64_t i = ir0; i < ir1; ++i) { + const int64_t i12 = i03 % ne12; + const int64_t i11 = i02 % ne11; + const int64_t i10 = i; + + const int64_t i1 = *(idx_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); + + GGML_ASSERT(i1 >= 0 && i1 < ne1); + + from_float((const float *) ((char *) src0->data + i * nb01 + i02 * nb02 + i03 * nb03), + ((char *) dst->data + i1 * nb1 + i02 * nb2 + i03 * nb3), nc); + } + } + } +} + +void ggml_compute_forward_set_rows(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + if (src1->type == GGML_TYPE_I64) { + ggml_compute_forward_set_rows_f32(params, dst); + } else if (src1->type == GGML_TYPE_I32) { + ggml_compute_forward_set_rows_f32(params, dst); + } else { + GGML_ABORT("src1->type = %d (%s) not supported", src1->type, ggml_type_name(src1->type)); + } + } + break; + default: + { + GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); + } + } +} + +// ggml_compute_forward_get_rows_back + +static void ggml_compute_forward_get_rows_back_f32_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_is_contiguous(dst)); + + // ggml_compute_forward_dup_same_cont(params, opt0, dst); + + memset(dst->data, 0, ggml_nbytes(dst)); + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t)); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + for (int j = 0; j < nc; ++j) { + ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i * src0->nb[1]))[j]; + ((float *) ((char *) dst->data + r * dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); + } + } +} + +static void ggml_compute_forward_get_rows_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (params->ith != 0) { + return; + } + + GGML_ASSERT(ggml_is_contiguous(dst)); + + // ggml_compute_forward_dup_same_cont(params, opt0, dst); + + memset(dst->data, 0, ggml_nbytes(dst)); + + const int nc = src0->ne[0]; + const int nr = ggml_nelements(src1); + + GGML_ASSERT(dst->ne[0] == nc); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + for (int i = 0; i < nr; ++i) { + const int r = ((int32_t *) src1->data)[i]; + + ggml_vec_add_f32(nc, (float *) ((char *) dst->data + r * dst->nb[1]), + (float *) ((char *) dst->data + r * dst->nb[1]), + (float *) ((char *) src0->data + i * src0->nb[1])); + } +} + +void ggml_compute_forward_get_rows_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_get_rows_back_f32_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_get_rows_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } + + //static bool first = true; + //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); + //if (first) { + // first = false; + //} else { + // for (int k = 0; k < dst->ne[1]; ++k) { + // for (int j = 0; j < dst->ne[0]/16; ++j) { + // for (int i = 0; i < 16; ++i) { + // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); + // } + // printf("\n"); + // } + // printf("\n"); + // } + // printf("\n"); + // exit(0); + //} +} + +// ggml_compute_forward_diag + +static void ggml_compute_forward_diag_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + if (params->ith != 0) { + return; + } + + // TODO: handle transposed/permuted matrices + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(ne00 == ne0); + GGML_ASSERT(ne00 == ne1); + GGML_ASSERT(ne01 == 1); + GGML_ASSERT(ne02 == ne2); + GGML_ASSERT(ne03 == ne3); + + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb0 == sizeof(float)); + + for (int i3 = 0; i3 < ne3; i3++) { + for (int i2 = 0; i2 < ne2; i2++) { + for (int i1 = 0; i1 < ne1; i1++) { + float * d = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + float * s = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02); + for (int i0 = 0; i0 < i1; i0++) { + d[i0] = 0; + } + d[i1] = s[i1]; + for (int i0 = i1 + 1; i0 < ne0; i0++) { + d[i0] = 0; + } + } + } + } +} + +void ggml_compute_forward_diag(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_diag_mask_inf + +static void ggml_compute_forward_diag_mask_f32(const ggml_compute_params * params, + ggml_tensor * dst, + const float value) { + const ggml_tensor * src0 = dst->src[0]; + + const int ith = params->ith; + const int nth = params->nth; + + const int n_past = ((int32_t *) dst->op_params)[0]; + const bool inplace = src0->data == dst->data; + + GGML_ASSERT(n_past >= 0); + + if (!inplace) { + if (ith == 0) { + // memcpy needs to be synchronized across threads to avoid race conditions. + // => do it in INIT phase + GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); + GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); + memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + + // TODO: handle transposed/permuted matrices + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + const int nr = src0->ne[1]; + const int nz = n / nr; + + GGML_ASSERT(dst->nb[0] == sizeof(float)); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + for (int k = 0; k < nz; k++) { + for (int j = ith; j < nr; j += nth) { + for (int i = n_past; i < nc; i++) { + if (i > n_past + j) { + *(float *) ((char *) dst->data + k * dst->nb[2] + j * dst->nb[1] + i * dst->nb[0]) = value; + } + } + } + } +} + +void ggml_compute_forward_diag_mask_inf(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +void ggml_compute_forward_diag_mask_zero(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_diag_mask_f32(params, dst, 0); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_soft_max + +static void ggml_compute_forward_soft_max_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + assert(ggml_is_contiguous(dst)); + assert(ggml_are_same_shape(src0, dst)); + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int64_t nb11 = src1 ? src1->nb[1] : 1; + const int64_t nb12 = src1 ? src1->nb[2] : 1; + const int64_t nb13 = src1 ? src1->nb[3] : 1; + + const int64_t ne12 = src1 ? src1->ne[2] : 1; + const int64_t ne13 = src1 ? src1->ne[3] : 1; + + // TODO: is this supposed to be ceil instead of floor? + // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 + const uint32_t n_head = ne02; + const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); + + const float m0 = powf(2.0f, -(max_bias) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + float * wp = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); + + // sinks + const float * sk = src2 ? (float *) ((char *) src2->data) : nullptr; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = ith; i01 < ne01; i01 += nth) { + const int64_t i11 = i01; + const int64_t i12 = i02 % ne12; + const int64_t i13 = i03 % ne13; + + // ALiBi + const uint32_t h = i02; // head + const float slope = + (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; + + float * sp = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * dp = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); + + // broadcast the mask across rows + ggml_fp16_t * mp_f16 = + src1 ? (ggml_fp16_t *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; + float * mp_f32 = src1 ? (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; + + ggml_vec_cpy_f32(ne00, wp, sp); + ggml_vec_scale_f32(ne00, wp, scale); + if (mp_f32) { + if (use_f16) { + for (int i = 0; i < ne00; ++i) { + wp[i] += slope * GGML_CPU_FP16_TO_FP32(mp_f16[i]); + } + } else { + for (int i = 0; i < ne00; ++i) { + wp[i] += slope * mp_f32[i]; + } + } + } + +#ifndef NDEBUG + for (int i = 0; i < ne00; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(wp[i])); + } +#endif + + float max = -INFINITY; + ggml_vec_max_f32(ne00, &max, wp); + + // if we have sinks, make a correction as if they were included in the softmax + if (sk) { + max = MAX(max, sk[i02]); + } + + ggml_float sum = ggml_vec_soft_max_f32(ne00, dp, wp, max); + assert(sum > 0.0); + + if (sk) { + sum += (ggml_float) expf(sk[i02] - max); + } + + sum = 1.0 / sum; + ggml_vec_scale_f32(ne00, dp, sum); + +#ifndef NDEBUG + for (int i = 0; i < ne00; ++i) { + assert(!isnan(dp[i])); + assert(!isinf(dp[i])); + } +#endif + } + } + } +} + +void ggml_compute_forward_soft_max(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_soft_max_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_soft_max_ext_back + +static void ggml_compute_forward_soft_max_ext_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(ggml_is_contiguous(src0)); + GGML_ASSERT(ggml_is_contiguous(src1)); + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_are_same_shape(src0, dst)); + GGML_ASSERT(ggml_are_same_shape(src1, dst)); + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); + + GGML_ASSERT(max_bias == 0.0f); + + // TODO: handle transposed/permuted matrices + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src0->ne[0]; + const int nr = ggml_nrows(src0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dy = (float *) ((char *) src0->data + i1 * src0->nb[1]); + float * y = (float *) ((char *) src1->data + i1 * src1->nb[1]); + float * dx = (float *) ((char *) dst->data + i1 * dst->nb[1]); + +#ifndef NDEBUG + for (int i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(dy[i])); + assert(!isnan(y[i])); + } +#endif + // Jii = yi - yi*yi + // Jij = -yi*yj + // J = diag(y)-y.T*y + // dx = J * dy + // dxk = sum_i(Jki * dyi) + // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk + // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk + // dxk = sum_i(-yk*yi * dyi) + yk*dyk + // dxk = -yk * sum_i(yi * dyi) + yk*dyk + // dxk = -yk * dot(y, dy) + yk*dyk + // dxk = yk * (- dot(y, dy) + dyk) + // dxk = yk * (dyk - dot(y, dy)) + // + // post-order: + // dot_y_dy := dot(y, dy) + // dx := dy + // dx := dx - dot_y_dy + // dx := dx * y + + // linear runtime, no additional memory + float dot_y_dy = 0; + ggml_vec_dot_f32(nc, &dot_y_dy, 0, y, 0, dy, 0, 1); + ggml_vec_cpy_f32(nc, dx, dy); + ggml_vec_acc1_f32(nc, dx, -dot_y_dy); + ggml_vec_mul_f32(nc, dx, dx, y); + ggml_vec_scale_f32(nc, dx, scale); + +#ifndef NDEBUG + for (int i = 0; i < nc; ++i) { + assert(!isnan(dx[i])); + assert(!isinf(dx[i])); + } +#endif + } +} + +void ggml_compute_forward_soft_max_ext_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_soft_max_ext_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_clamp + +static void ggml_compute_forward_clamp_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + float min; + float max; + memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + const size_t nb00 = src0->nb[0]; + const size_t nb01 = src0->nb[1]; + + const size_t nb0 = dst->nb[0]; + const size_t nb1 = dst->nb[1]; + + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb00 == sizeof(float)); + + for (int j = ith; j < n; j += nth) { + float * dst_ptr = (float *) ((char *) dst->data + j * nb1); + float * src0_ptr = (float *) ((char *) src0->data + j * nb01); + + for (int i = 0; i < nc; i++) { + dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min); + } + } +} + +static void ggml_compute_forward_clamp_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + float min; + float max; + memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int n = ggml_nrows(src0); + const int nc = src0->ne[0]; + + const size_t nb00 = src0->nb[0]; + const size_t nb01 = src0->nb[1]; + + const size_t nb0 = dst->nb[0]; + const size_t nb1 = dst->nb[1]; + + GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + + for (int j = ith; j < n; j += nth) { + ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j * nb1); + ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j * nb01); + + for (int i = 0; i < nc; i++) { + float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); + dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); + } + } +} + +void ggml_compute_forward_clamp(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_clamp_f32(params, dst); + } + break; + case GGML_TYPE_F16: + { + ggml_compute_forward_clamp_f16(params, dst); + } + break; + case GGML_TYPE_BF16: + case GGML_TYPE_Q4_0: + case GGML_TYPE_Q4_1: + case GGML_TYPE_Q5_0: + case GGML_TYPE_Q5_1: + case GGML_TYPE_Q8_0: + case GGML_TYPE_Q8_1: + case GGML_TYPE_MXFP4: + case GGML_TYPE_Q2_K: + case GGML_TYPE_Q3_K: + case GGML_TYPE_Q4_K: + case GGML_TYPE_Q5_K: + case GGML_TYPE_Q6_K: + case GGML_TYPE_TQ1_0: + case GGML_TYPE_TQ2_0: + case GGML_TYPE_IQ2_XXS: + case GGML_TYPE_IQ2_XS: + case GGML_TYPE_IQ3_XXS: + case GGML_TYPE_IQ1_S: + case GGML_TYPE_IQ1_M: + case GGML_TYPE_IQ4_NL: + case GGML_TYPE_IQ4_XS: + case GGML_TYPE_IQ3_S: + case GGML_TYPE_IQ2_S: + case GGML_TYPE_Q8_K: + case GGML_TYPE_I8: + case GGML_TYPE_I16: + case GGML_TYPE_I32: + case GGML_TYPE_I64: + case GGML_TYPE_F64: + case GGML_TYPE_COUNT: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rope + +static float rope_yarn_ramp(const float low, const float high, const int i0) { + const float y = (i0 / 2 - low) / MAX(0.001f, high - low); + return 1 - MIN(1, MAX(0, y)); +} + +// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn +// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. +static void rope_yarn(float theta_extrap, + float freq_scale, + float corr_dims[2], + int64_t i0, + float ext_factor, + float mscale, + float * cos_theta, + float * sin_theta) { + // Get n-d rotational scaling corrected for extrapolation + float theta_interp = freq_scale * theta_extrap; + float theta = theta_interp; + if (ext_factor != 0.0f) { + float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; + theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; + + // Get n-d magnitude scaling corrected for interpolation + mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); + } + *cos_theta = cosf(theta) * mscale; + *sin_theta = sinf(theta) * mscale; +} + +static void ggml_rope_cache_init(float theta_base, + float freq_scale, + const float * freq_factors, + float corr_dims[2], + int64_t ne0, + float ext_factor, + float mscale, + float * cache, + float sin_sign, + float theta_scale) { + // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py + float theta = theta_base; + for (int64_t i0 = 0; i0 < ne0; i0 += 2) { + const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; + rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); + cache[i0 + 1] *= sin_sign; + + theta *= theta_scale; + } +} + +static void ggml_mrope_cache_init(float theta_base_t, + float theta_base_h, + float theta_base_w, + float theta_base_e, + int sections[4], + bool is_imrope, + bool indep_sects, + float freq_scale, + const float * freq_factors, + float corr_dims[2], + int64_t ne0, + float ext_factor, + float mscale, + float * cache, + float sin_sign, + float theta_scale) { + // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py + float theta_t = theta_base_t; + float theta_h = theta_base_h; + float theta_w = theta_base_w; + float theta_e = theta_base_e; // extra position id for vision encoder + int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; + int sec_w = sections[1] + sections[0]; + int sec_e = sections[2] + sec_w; + GGML_ASSERT(sect_dims <= ne0); + + for (int64_t i0 = 0; i0 < ne0; i0 += 2) { + const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; + + int sector = (i0 / 2) % sect_dims; + if (indep_sects) { + // compute theta independently for each dim sections + // (i.e. reset corresponding theta when `i0` go from one section to another) + if (sector == 0) { + theta_t = theta_base_t; + } else if (sector == sections[0]) { + theta_h = theta_base_h; + ; + } else if (sector == sec_w) { + theta_w = theta_base_w; + } else if (sector == sec_e) { + theta_e = theta_base_e; + } + } + + float theta = theta_t; + if (is_imrope) { // qwen3vl apply interleaved mrope + if (sector % 3 == 1 && sector < 3 * sections[1]) { + theta = theta_h; + } else if (sector % 3 == 2 && sector < 3 * sections[2]) { + theta = theta_w; + } else if (sector % 3 == 0 && sector < 3 * sections[0]) { + theta = theta_t; + } else { + theta = theta_e; + } + } else { + if (sector >= sections[0] && sector < sec_w) { + theta = theta_h; + } else if (sector >= sec_w && sector < sec_w + sections[2]) { + theta = theta_w; + } else if (sector >= sec_w + sections[2]) { + theta = theta_e; + } + } + + rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); + cache[i0 + 1] *= sin_sign; + + theta_t *= theta_scale; + theta_w *= theta_scale; + theta_h *= theta_scale; + theta_e *= theta_scale; + } +} + +template +static void rotate_pairs(const int64_t n, + const int64_t n_offset, + const float * cache, + const T * src_data, + T * dst_data, + const int scale = 2) { + for (int64_t i0 = 0; i0 < n; i0 += 2) { + const int64_t ic = + i0 / scale; // hack for GGML_ROPE_TYPE_NORMAL, where we need ic = i0; for all other cases, ic = i0/2 + + const float cos_theta = cache[i0 + 0]; + const float sin_theta = cache[i0 + 1]; + + const T * const src = src_data + ic; + T * dst = dst_data + ic; + + const float x0 = type_conversion_table::to_f32(src[0]); + const float x1 = type_conversion_table::to_f32(src[n_offset]); + + dst[0] = type_conversion_table::from_f32(x0 * cos_theta - x1 * sin_theta); + dst[n_offset] = type_conversion_table::from_f32(x0 * sin_theta + x1 * cos_theta); + } +} + +template //float or ggml_fp16_t +static void ggml_compute_forward_rope_flt(const ggml_compute_params * params, ggml_tensor * dst, const bool forward) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_I32); + + float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; + int sections[4]; + + //const int n_past = ((int32_t *) dst->op_params)[0]; + const int n_dims = ((int32_t *) dst->op_params)[1]; + const int mode = ((int32_t *) dst->op_params)[2]; + //const int n_ctx = ((int32_t *) dst->op_params)[3]; + const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; + + memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); + memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); + memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); + memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); + memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); + memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); + memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int) * 4); + + GGML_TENSOR_UNARY_OP_LOCALS + + //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); + //printf("n_past = %d, ne2 = %d\n", n_past, ne2); + + GGML_ASSERT(nb0 == nb00); + GGML_ASSERT(nb0 == sizeof(T)); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(dst); + + GGML_ASSERT(n_dims <= ne0); + GGML_ASSERT(n_dims % 2 == 0); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // row index used to determine which thread to use + int ir = 0; + + const float theta_scale = powf(freq_base, -2.0f / n_dims); + + float corr_dims[2]; + ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); + + const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope + const bool mrope_used = + mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, note: also true for vision (24 & 8 == true) and for imrope + const bool is_vision = mode == GGML_ROPE_TYPE_VISION; + + if (mrope_used) { + GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); + } + + if (is_vision) { + GGML_ASSERT(n_dims == ne0 / 2); + } + + const float * freq_factors = NULL; + if (src2 != NULL) { + GGML_ASSERT(src2->type == GGML_TYPE_F32); + GGML_ASSERT(src2->ne[0] >= n_dims / 2); + freq_factors = (const float *) src2->data; + } + + // backward process uses inverse rotation by cos and sin. + // cos and sin build a rotation matrix, where the inverse is the transpose. + // this essentially just switches the sign of sin. + const float sin_sign = forward ? 1.0f : -1.0f; + + const int32_t * pos = (const int32_t *) src1->data; + + for (int64_t i3 = 0; i3 < ne3; i3++) { // batch + for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len + + float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; + if (!mrope_used) { + const int64_t p = pos[i2]; + ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, + sin_sign, theta_scale); + } else { + const int64_t p_t = pos[i2]; + const int64_t p_h = pos[i2 + ne2]; + const int64_t p_w = pos[i2 + ne2 * 2]; + const int64_t p_e = pos[i2 + ne2 * 3]; + ggml_mrope_cache_init(p_t, p_h, p_w, p_e, sections, is_imrope, is_vision, freq_scale, freq_factors, + corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); + } + + for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads + if (ir++ < ir0) { + continue; + } + if (ir > ir1) { + break; + } + + T * src = (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); + T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); + + switch (mode) { + case GGML_ROPE_TYPE_NORMAL: + rotate_pairs(n_dims, 1, cache, src, dst_data, 1); + break; + case GGML_ROPE_TYPE_NEOX: + case GGML_ROPE_TYPE_MROPE: + case GGML_ROPE_TYPE_IMROPE: + rotate_pairs(n_dims, n_dims / 2, cache, src, dst_data); + break; + case GGML_ROPE_TYPE_VISION: + rotate_pairs(ne0, n_dims, cache, src, dst_data); + break; + default: + GGML_ABORT("rope type not supported"); + } + + if (!is_vision) { + // fill the remain channels with data from src tensor + for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { + const T * const src = + (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + i0 * nb00); + T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0); + + dst_data[0] = src[0]; + dst_data[1] = src[1]; + } + } + } //attn-heads + } + } +} + +void ggml_compute_forward_rope(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_rope_flt(params, dst, true); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_rope_flt(params, dst, true); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rope_back + +void ggml_compute_forward_rope_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_rope_flt(params, dst, false); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_rope_flt(params, dst, false); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_conv_transpose_1d + +static void ggml_compute_forward_conv_transpose_1d_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i02 * nb02 + i01 * nb01); + ggml_fp16_t * dst_data = wdata + i01 * ne00 * ne02; + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i00 * ne02 + i02] = src[i00]; + } + } + } + } + + // permute source data (src1) from (L x Cin) to (Cin x L) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; + ggml_fp16_t * dst_data = wdata; + + for (int64_t i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i11 * nb11); + for (int64_t i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); + } + } + } + + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + + // total rows in dst + const int nr = ne1; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_fp16_t * const wdata_src = wdata + nk; + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *) ((char *) dst->data + i1 * nb1); + ggml_fp16_t * wdata_kernel = wdata + i1 * ne02 * ne00; + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i10 * ne11; + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f16(ne02, &v, 0, (ggml_fp16_t *) wdata_src + i1n, 0, + (ggml_fp16_t *) wdata_kernel + i00 * ne02, 0, 1); + dst_data[i10 * s0 + i00] += v; + } + } + } +} + +static void ggml_compute_forward_conv_transpose_1d_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02; + + GGML_ASSERT(nb00 == sizeof(float)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) + { + float * const wdata = (float *) params->wdata + 0; + + for (int64_t i02 = 0; i02 < ne02; i02++) { + for (int64_t i01 = 0; i01 < ne01; i01++) { + const float * const src = (float *) ((char *) src0->data + i02 * nb02 + i01 * nb01); + float * dst_data = wdata + i01 * ne00 * ne02; + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i00 * ne02 + i02] = src[i00]; + } + } + } + } + + // prepare source data (src1) + { + float * const wdata = (float *) params->wdata + nk; + float * dst_data = wdata; + + for (int64_t i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i11 * nb11); + for (int64_t i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne11 + i11] = src[i10]; + } + } + } + + // need to zero dst since we are accumulating into it + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + + // total rows in dst + const int nr = ne1; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + float * const wdata = (float *) params->wdata + 0; + float * const wdata_src = wdata + nk; + + for (int i1 = ir0; i1 < ir1; i1++) { + float * dst_data = (float *) ((char *) dst->data + i1 * nb1); + float * wdata_kernel = wdata + i1 * ne02 * ne00; + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i10 * ne11; + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f32(ne02, &v, 0, wdata_src + i1n, 0, wdata_kernel + i00 * ne02, 0, 1); + dst_data[i10 * s0 + i00] += v; + } + } + } +} + +void ggml_compute_forward_conv_transpose_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_conv_transpose_1d_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_im2col_f32 +// src0: kernel [OC, IC, KH, KW] +// src1: image [N, IC, IH, IW] +// dst: result [N, OH, OW, IC*KH*KW] +static void ggml_compute_forward_im2col_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne13 : ne12; + const int64_t IC = is_2D ? ne12 : ne11; + const int64_t IH = is_2D ? ne11 : 1; + const int64_t IW = ne10; + + const int64_t KH = is_2D ? ne01 : 1; + const int64_t KW = ne00; + + const int64_t OH = is_2D ? ne2 : 1; + const int64_t OW = ne1; + + int ofs0 = is_2D ? nb13 : nb12; + int ofs1 = is_2D ? nb12 : nb11; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + float * dst_data = wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + const float * const src_data = + (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] + + for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; + } else { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = (src_data[iih * IW + iiw]); + } + } + } + } + } + } + } + } +} + +// ggml_compute_forward_im2col_f16 +// src0: kernel [OC, IC, KH, KW] +// src1: image [N, IC, IH, IW] +// dst: result [N, OH, OW, IC*KH*KW] +static void ggml_compute_forward_im2col_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne13 : ne12; + const int64_t IC = is_2D ? ne12 : ne11; + const int64_t IH = is_2D ? ne11 : 1; + const int64_t IW = ne10; + + const int64_t KH = is_2D ? ne01 : 1; + const int64_t KW = ne00; + + const int64_t OH = is_2D ? ne2 : 1; + const int64_t OW = ne1; + + int ofs0 = is_2D ? nb13 : nb12; + int ofs1 = is_2D ? nb12 : nb11; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + ggml_fp16_t * dst_data = + wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + const float * const src_data = + (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] + + for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; + } else { + dst_data[iic * (KH * KW) + ikh * KW + ikw] = + GGML_CPU_FP32_TO_FP16(src_data[iih * IW + iiw]); + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_im2col(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_im2col_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_im2col_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_im2col_back_f32 + +void ggml_compute_forward_im2col_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output + const ggml_tensor * src1 = dst->src[1]; // convolution kernel + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; + const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = is_2D ? ne3 : ne2; + const int64_t IC = is_2D ? ne2 : ne1; + const int64_t IH = is_2D ? ne1 : 1; + const int64_t IW = ne0; + + const int64_t KH = is_2D ? ne11 : 1; + const int64_t KW = ne10; + + const int64_t OH = is_2D ? ne02 : 1; + const int64_t OW = ne01; + + int ofs0 = is_2D ? nb3 : nb2; + int ofs1 = is_2D ? nb2 : nb1; + + GGML_ASSERT(nb0 == sizeof(float)); + + // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + for (int64_t iih = 0; iih < IH; iih++) { + for (int64_t iiw = 0; iiw < IW; iiw++) { + // micro kernel + float grad = 0.0f; + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + // For s0 > 1 some values were skipped over in the forward pass. + // These values have tmpw % s0 != 0 and need to be skipped in the backwards pass as well. + const int64_t tmpw = (iiw + p0 - ikw * d0); + if (tmpw % s0 != 0) { + continue; + } + const int64_t iow = tmpw / s0; + + // Equivalent logic as above except for s1. + int64_t ioh; + if (is_2D) { + const int64_t tmph = iih + p1 - ikh * d1; + + if (tmph % s1 != 0) { + continue; + } + + ioh = tmph / s1; + } else { + ioh = 0; + } + + if (iow < 0 || iow >= OW || ioh < 0 || ioh >= OH) { + continue; + } + + const float * const grad_in = + (const float *) src0->data + + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] + grad += grad_in[iic * (KH * KW) + ikh * KW + ikw]; + } + } + float * dst_data = (float *) ((char *) wdata + (in * ofs0 + iic * ofs1)); // [IH, IW] + dst_data[iih * IW + iiw] = grad; + } + } + } + } + } +} + +// ggml_compute_forward_im2col_3d_f16 +// src0: kernel [OC*IC, KD, KH, KW] +// src1: image [N*IC, ID, IH, IW] +// dst: result [N*OD, OH, OW, IC * KD * KH * KW] +static void ggml_compute_forward_im2col_3d_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F16); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; + const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; + const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; + const int32_t IC = ((const int32_t *) (dst->op_params))[9]; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = ne13 / IC; + const int64_t ID = ne12; + const int64_t IH = ne11; + const int64_t IW = ne10; + + const int64_t OC = ne03 / IC; + GGML_UNUSED(OC); + const int64_t KD = ne02; + const int64_t KH = ne01; + const int64_t KW = ne00; + + const int64_t OD = ne3 / N; + const int64_t OH = ne2; + const int64_t OW = ne1; + const int64_t OH_OW = OH * OW; + const int64_t KD_KH_KW = KD * KH * KW; + const int64_t KH_KW = KH * KW; + const int64_t IC_KD_KH_KW = IC * KD * KH * KW; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iod = 0; iod < OD; iod++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + ggml_fp16_t * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * + IC_KD_KH_KW; // [IC, KD, KH, KW] + const float * const src_data = + (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] + + for (int64_t ikd = 0; ikd < KD; ikd++) { + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + const int64_t iid = iod * s2 + ikd * d2 - p2; + + if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || + iid < 0 || iid >= ID) { + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; + } else { + const float * const s = + (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + + iiw * nb10); // [ID, IH, IW] + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = + GGML_CPU_FP32_TO_FP16(*s); + } + } + } + } + } + } + } + } + } + } +} + +// ggml_compute_forward_im2col_3d_f32 +// src0: kernel [OC*IC, KD, KH, KW] +// src1: image [N*IC, ID, IH, IW] +// dst: result [N*OD, OH, OW, IC * KD * KH * KW] +static void ggml_compute_forward_im2col_3d_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS; + + const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; + const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; + const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; + const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; + const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; + const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; + const int32_t IC = ((const int32_t *) (dst->op_params))[9]; + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t N = ne13 / IC; + const int64_t ID = ne12; + const int64_t IH = ne11; + const int64_t IW = ne10; + + const int64_t OC = ne03 / IC; + GGML_UNUSED(OC); + const int64_t KD = ne02; + const int64_t KH = ne01; + const int64_t KW = ne00; + + const int64_t OD = ne3 / N; + const int64_t OH = ne2; + const int64_t OW = ne1; + + const int64_t OH_OW = OH * OW; + const int64_t KD_KH_KW = KD * KH * KW; + const int64_t KH_KW = KH * KW; + const int64_t IC_KD_KH_KW = IC * KD * KH * KW; + + GGML_ASSERT(nb10 == sizeof(float)); + + // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] + { + float * const wdata = (float *) dst->data; + + for (int64_t in = 0; in < N; in++) { + for (int64_t iod = 0; iod < OD; iod++) { + for (int64_t ioh = 0; ioh < OH; ioh++) { + for (int64_t iow = 0; iow < OW; iow++) { + for (int64_t iic = ith; iic < IC; iic += nth) { + // micro kernel + float * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * + IC_KD_KH_KW; // [IC, KD, KH, KW] + const float * const src_data = + (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] + + for (int64_t ikd = 0; ikd < KD; ikd++) { + for (int64_t ikh = 0; ikh < KH; ikh++) { + for (int64_t ikw = 0; ikw < KW; ikw++) { + const int64_t iiw = iow * s0 + ikw * d0 - p0; + const int64_t iih = ioh * s1 + ikh * d1 - p1; + const int64_t iid = iod * s2 + ikd * d2 - p2; + + if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || + iid < 0 || iid >= ID) { + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; + } else { + const float * const s = + (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + + iiw * nb10); // [ID, IH, IW] + dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = *s; + } + } + } + } + } + } + } + } + } + } +} + +void ggml_compute_forward_im2col_3d(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F16: + { + ggml_compute_forward_im2col_3d_f16(params, dst); + } + break; + case GGML_TYPE_F32: + { + ggml_compute_forward_im2col_3d_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_call_mul_mat(ggml_type type, + const ggml_compute_params * params, + int64_t m, + int64_t n, + int64_t k, + void * a, + void * b, + float * c) { + const ggml_type_traits * traits = ggml_get_type_traits(type); + struct ggml_tensor src1 = {}; + src1.type = type; + src1.ne[0] = k; + src1.ne[1] = m; + src1.ne[2] = 1; + src1.ne[3] = 1; + src1.nb[0] = traits->type_size; + src1.nb[1] = k * traits->type_size; + src1.nb[2] = src1.nb[1]; + src1.nb[3] = src1.nb[2]; + src1.data = a; + + struct ggml_tensor src0 = {}; + src0.type = type; + src0.ne[0] = k; + src0.ne[1] = n; + src0.ne[2] = 1; + src0.ne[3] = 1; + src0.nb[0] = traits->type_size; + src0.nb[1] = k * traits->type_size; + src0.nb[2] = src0.nb[1]; + src0.nb[3] = src0.nb[2]; + src0.data = b; + + struct ggml_tensor dst = {}; + dst.ne[0] = n; + dst.ne[1] = m; + dst.ne[2] = 1; + dst.ne[3] = 1; + dst.nb[0] = sizeof(float); + dst.nb[1] = n * sizeof(float); + dst.nb[2] = dst.nb[1]; + dst.nb[3] = dst.nb[2]; + dst.data = c; + dst.src[0] = &src0; + dst.src[1] = &src1; + + ggml_compute_forward_mul_mat(params, &dst); +} + +static inline int64_t ggml_wrap_around(int64_t coord, int64_t size) { + return (coord + size) % size; // adding size avoids negative number weirdness +} + +// ggml_compute_forward_conv_2d + +static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, + const ggml_tensor * kernel, // [KW, KH, IC, OC] + const ggml_tensor * src, // [W, H, C, N] + ggml_tensor * dst, // [OW, OH, OC, N] + ggml_type kernel_type) { + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); + GGML_ASSERT(kernel->type == kernel_type); + + const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); + + const int32_t stride_x = dst->op_params[0]; + const int32_t stride_y = dst->op_params[1]; + const int32_t pad_x = dst->op_params[2]; + const int32_t pad_y = dst->op_params[3]; + const int32_t dilation_x = dst->op_params[4]; + const int32_t dilation_y = dst->op_params[5]; + + const int64_t c_in = src->ne[2]; + const int64_t c_out = kernel->ne[3]; + GGML_ASSERT(c_in == kernel->ne[2]); + + const int64_t src_w = src->ne[0]; + const int64_t src_h = src->ne[1]; + const int64_t knl_w = kernel->ne[0]; + const int64_t knl_h = kernel->ne[1]; + const int64_t dst_w = dst->ne[0]; + const int64_t dst_h = dst->ne[1]; + + const float * src_data = (float *) src->data; + void * knl_data = kernel->data; + float * dst_data = (float *) dst->data; + + const int64_t knl_n = knl_w * knl_h * c_in; + const int64_t patch_total = dst->ne[3] * dst_w * dst_h; + + const int64_t space_per_patch = knl_n * traits->type_size + c_out * sizeof(float); + const int64_t batch_size = params->wsize / space_per_patch; + const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; + const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; + + GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); + + void * tmp = params->wdata; + + for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { + const int64_t patch_start_batch = batch_i * patches_per_batch; + const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); + const int64_t patch_n = patch_end_batch - patch_start_batch; + + const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth; + const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; + const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); + + //im2col for a patch + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t src_x = (p / dst_w) % dst_h; + const int64_t src_y = p % dst_w; + + const float * src_base = (const float *) ((const char *) src_data + batch_n * src->nb[3]); + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; + + for (int64_t ic = 0; ic < c_in; ++ic) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; + const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; + + int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; + + float src_val; + if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const float * src_ptr = (const float *) ((const char *) src_base + sx * src->nb[0] + + sy * src->nb[1] + ic * src->nb[2]); + src_val = *src_ptr; + } + + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } + } + } + } + } // patches handled by this thread + + ggml_barrier(params->threadpool); + + float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n * traits->type_size); + + GGML_ASSERT(gemm_output + patch_n * c_out <= (float *) tmp + params->wsize); + + // GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out] + ggml_call_mul_mat(kernel_type, params, patch_n, c_out, knl_n, tmp, knl_data, gemm_output); + + ggml_barrier(params->threadpool); + + //permute back [OC, N, OH, OW] to [N, OC, OH, OW] + const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth; + const int64_t permute_start = params->ith * permute_per_thread; + const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n); + + for (int64_t i = permute_start; i < permute_end; ++i) { + const int64_t p = patch_start_batch + i; + const int64_t batch_n = p / (dst_w * dst_h); + const int64_t dst_y = (p / dst_w) % dst_h; + const int64_t dst_x = p % dst_w; + + for (int64_t oc = 0; oc < c_out; ++oc) { + const float value = gemm_output[i * c_out + oc]; + float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + + oc * dst->nb[2] + batch_n * dst->nb[3]); + *dst_ptr = value; + } + } + } +} + +void ggml_compute_forward_conv_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + ggml_compute_forward_conv_2d_impl(params, src0, src1, dst, src0->type); +} + +// ggml_compute_forward_conv_3d + +static void ggml_compute_forward_conv_3d_impl(const ggml_compute_params * params, + const ggml_tensor * kernel, + const ggml_tensor * src, + ggml_tensor * dst, + ggml_type kernel_type) { + GGML_ASSERT(ggml_is_contiguous(kernel)); + GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); + GGML_ASSERT(kernel->type == kernel_type); + + const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); + + const int32_t s0 = dst->op_params[0]; + const int32_t s1 = dst->op_params[1]; + const int32_t s2 = dst->op_params[2]; + const int32_t p0 = dst->op_params[3]; + const int32_t p1 = dst->op_params[4]; + const int32_t p2 = dst->op_params[5]; + const int32_t d0 = dst->op_params[6]; + const int32_t d1 = dst->op_params[7]; + const int32_t d2 = dst->op_params[8]; + const int32_t c = dst->op_params[9]; + const int32_t n = dst->op_params[10]; + const int32_t oc = dst->op_params[11]; + + const int64_t src_w = src->ne[0]; + const int64_t src_h = src->ne[1]; + const int64_t src_d = src->ne[2]; + const int64_t knl_w = kernel->ne[0]; + const int64_t knl_h = kernel->ne[1]; + const int64_t knl_d = kernel->ne[2]; + const int64_t dst_w = dst->ne[0]; + const int64_t dst_h = dst->ne[1]; + const int64_t dst_d = dst->ne[2]; + + const float * src_data = (float *) src->data; + void * knl_data = kernel->data; + float * dst_data = (float *) dst->data; + + const int64_t knl_n_per_channel = knl_w * knl_h * knl_d; + const int64_t knl_n_total = knl_n_per_channel * c; + const int64_t patch_total = n * dst_w * dst_h * dst_d; + + const int64_t space_per_patch = knl_n_total * traits->type_size + oc * sizeof(float); + const int64_t batch_size = params->wsize / space_per_patch; + const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; + const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; + + GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); + + void * tmp = params->wdata; + + for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { + const int64_t patch_start_batch = batch_i * patches_per_batch; + const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); + const int64_t patch_n_in_batch = patch_end_batch - patch_start_batch; + + const int64_t patch_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; + const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; + const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); + + for (int64_t p = patch_start; p < patch_end; ++p) { + const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); + const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); + const int64_t batch_idx = p / (dst_w * dst_h * dst_d); + const int64_t dst_z = p_in_batch / (dst_w * dst_h); + const int64_t dst_y = p_in_depth / dst_w; + const int64_t dst_x = p_in_depth % dst_w; + + char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n_total * traits->type_size; + + for (int64_t ic = 0; ic < c; ++ic) { + for (int64_t kz = 0; kz < knl_d; ++kz) { + for (int64_t ky = 0; ky < knl_h; ++ky) { + for (int64_t kx = 0; kx < knl_w; ++kx) { + const int64_t sz = dst_z * s2 + kz * d2 - p2; + const int64_t sy = dst_y * s1 + ky * d1 - p1; + const int64_t sx = dst_x * s0 + kx * d0 - p0; + + int64_t dst_idx = ic * knl_n_per_channel + kz * (knl_h * knl_w) + ky * knl_w + kx; + + float src_val; + if (sz < 0 || sz >= src_d || sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { + src_val = 0.0f; + } else { + const int64_t cn_idx = batch_idx * c + ic; + const float * src_ptr = + (const float *) ((const char *) src_data + sx * src->nb[0] + sy * src->nb[1] + + sz * src->nb[2] + cn_idx * src->nb[3]); + src_val = *src_ptr; + } + + char * element_ptr = dst_row + dst_idx * traits->type_size; + if (kernel_type == GGML_TYPE_F32) { + *(float *) element_ptr = src_val; + } else if (kernel_type == GGML_TYPE_F16) { + *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); + } + } + } + } + } + } + + ggml_barrier(params->threadpool); + + float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n_total * traits->type_size); + ggml_call_mul_mat(kernel_type, params, patch_n_in_batch, oc, knl_n_total, tmp, knl_data, gemm_output); + + ggml_barrier(params->threadpool); + + const int64_t permute_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; + const int64_t permute_start = params->ith * permute_per_thread; + const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n_in_batch); + + for (int64_t i = permute_start; i < permute_end; ++i) { + const int64_t p = patch_start_batch + i; + const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); + const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); + const int64_t batch_idx = p / (dst_w * dst_h * dst_d); + const int64_t dst_z = p_in_batch / (dst_w * dst_h); + const int64_t dst_y = p_in_depth / dst_w; + const int64_t dst_x = p_in_depth % dst_w; + + for (int64_t ioc = 0; ioc < oc; ++ioc) { + const float value = gemm_output[i * oc + ioc]; + const int64_t ocn_idx = batch_idx * oc + ioc; + float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + + dst_z * dst->nb[2] + ocn_idx * dst->nb[3]); + *dst_ptr = value; + } + } + } +} + +void ggml_compute_forward_conv_3d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + ggml_compute_forward_conv_3d_impl(params, src0, src1, dst, src0->type); +} + +// ggml_compute_forward_conv_transpose_2d + +void ggml_compute_forward_conv_transpose_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F16); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_TENSOR_BINARY_OP_LOCALS + + const int ith = params->ith; + const int nth = params->nth; + + const int nk = ne00 * ne01 * ne02 * ne03; + + GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); + GGML_ASSERT(nb10 == sizeof(float)); + + if (ith == 0) { + memset(params->wdata, 0, params->wsize); + + // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + + for (int64_t i03 = 0; i03 < ne03; i03++) { + for (int64_t i02 = 0; i02 < ne02; i02++) { + const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i03 * nb03 + i02 * nb02); + ggml_fp16_t * dst_data = wdata + i02 * ne01 * ne00 * ne03; + for (int64_t i01 = 0; i01 < ne01; i01++) { + for (int64_t i00 = 0; i00 < ne00; i00++) { + dst_data[i01 * ne00 * ne03 + i00 * ne03 + i03] = src[i01 * ne00 + i00]; + } + } + } + } + } + + // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh) + { + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; + for (int i12 = 0; i12 < ne12; i12++) { + for (int i11 = 0; i11 < ne11; i11++) { + const float * const src = (float *) ((char *) src1->data + i12 * nb12 + i11 * nb11); + ggml_fp16_t * dst_data = wdata + i11 * ne10 * ne12; + for (int i10 = 0; i10 < ne10; i10++) { + dst_data[i10 * ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); + } + } + } + } + + memset(dst->data, 0, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + + const int32_t stride = ggml_get_op_params_i32(dst, 0); + + // total patches in dst + const int np = ne2; + + // patches per thread + const int dp = (np + nth - 1) / nth; + + // patch range for this thread + const int ip0 = dp * ith; + const int ip1 = MIN(ip0 + dp, np); + + ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; + ggml_fp16_t * const wdata_src = wdata + nk; + + for (int i2 = ip0; i2 < ip1; i2++) { // Cout + float * dst_data = (float *) ((char *) dst->data + i2 * nb2); + ggml_fp16_t * wdata_kernel = wdata + i2 * ne01 * ne00 * ne03; + for (int i11 = 0; i11 < ne11; i11++) { + for (int i10 = 0; i10 < ne10; i10++) { + const int i1n = i11 * ne10 * ne12 + i10 * ne12; + for (int i01 = 0; i01 < ne01; i01++) { + for (int i00 = 0; i00 < ne00; i00++) { + float v = 0; + ggml_vec_dot_f16(ne03, &v, 0, wdata_src + i1n, 0, wdata_kernel + i01 * ne00 * ne03 + i00 * ne03, + 0, 1); + dst_data[(i11 * stride + i01) * ne0 + i10 * stride + i00] += v; + } + } + } + } + } +} + +// ggml_compute_forward_conv_2d_dw + +struct ggml_conv_2d_dw_params { + int64_t channels; + int64_t batch; + int64_t src_w; + int64_t src_h; + int64_t dst_w; + int64_t dst_h; + int64_t knl_w; + int64_t knl_h; + int stride_x; + int stride_y; + int pad_x; + int pad_y; + int dilation_x; + int dilation_y; +}; + +static void ggml_compute_forward_conv_2d_dw_cwhn(const ggml_compute_params * params, + const ggml_tensor * src, + const ggml_tensor * kernel, + ggml_tensor * dst, + const ggml_conv_2d_dw_params & p) { + const int64_t c = p.channels; + const float * knl_data = (const float *) kernel->data; + + const int64_t rows_total = p.dst_h * p.batch; + const int64_t rows_per_thread = (rows_total + params->nth - 1) / params->nth; + const int64_t row_start = params->ith * rows_per_thread; + const int64_t row_end = MIN(row_start + rows_per_thread, rows_total); + +#ifdef GGML_SIMD +# if defined(__ARM_FEATURE_SVE) + const int64_t pkg_size = svcntw(); +# else + const int64_t pkg_size = GGML_F32_EPR; +# endif + const int64_t pkg_count = c / pkg_size; + const int64_t c_pkg_end = pkg_count * pkg_size; +#else + const int64_t c_pkg_end = 0; +#endif + + for (int64_t row = row_start; row < row_end; ++row) { + const int64_t dst_y = row % p.dst_h; + const float * src_data = (const float *) src->data + (row / p.dst_h) * p.src_w * p.src_h * c; + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float * dst_data = (float *) dst->data + (row * p.dst_w + dst_x) * c; + const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; + const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; + +#ifdef GGML_SIMD + // Vectorized loop + for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); + GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); + sum = GGML_F32_VEC_FMA(sum, k, s); + } + } + GGML_F32_VEC_STORE(dst_data + c_i, sum); + } +#endif + // Scalar loop + for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = src_y_base + knl_y * p.dilation_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = src_x_base + knl_x * p.dilation_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] * + src_data[(src_y * p.src_w + src_x) * c + c_i]; + } + } + dst_data[c_i] = sum; + } + } + } +} + +static void ggml_compute_forward_conv_2d_dw_whcn(const ggml_compute_params * params, + const ggml_tensor * src, + const ggml_tensor * kernel, + ggml_tensor * dst, + const ggml_conv_2d_dw_params & p) { + const int64_t n = p.channels * p.batch; + const int64_t per_thread = (n + params->nth - 1) / params->nth; + const int64_t start = params->ith * per_thread; + const int64_t end = MIN(start + per_thread, n); + + for (int64_t i = start; i < end; ++i) { + const float * knl_data = (const float *) kernel->data + (i % p.channels) * p.knl_w * p.knl_h; + const float * src_data = (const float *) src->data + i * p.src_w * p.src_h; + float * dst_data = (float *) dst->data + i * p.dst_w * p.dst_h; + + for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { + for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { + float sum = 0.0f; + for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { + const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; + if (src_y < 0 || src_y >= p.src_h) { + continue; + } + for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { + const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; + if (src_x < 0 || src_x >= p.src_w) { + continue; + } + sum += knl_data[knl_y * p.knl_w + knl_x] * src_data[src_y * p.src_w + src_x]; + } + } + dst_data[dst_y * p.dst_w + dst_x] = sum; + } + } + } +} + +void ggml_compute_forward_conv_2d_dw(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * kernel = dst->src[0]; + const ggml_tensor * src = dst->src[1]; + ggml_conv_2d_dw_params p; + p.channels = src->ne[2]; + p.batch = src->ne[3]; + p.src_w = src->ne[0]; + p.src_h = src->ne[1]; + p.dst_w = dst->ne[0]; + p.dst_h = dst->ne[1]; + p.knl_w = kernel->ne[0]; + p.knl_h = kernel->ne[1]; + p.stride_x = dst->op_params[0]; + p.stride_y = dst->op_params[1]; + p.pad_x = dst->op_params[2]; + p.pad_y = dst->op_params[3]; + p.dilation_x = dst->op_params[4]; + p.dilation_y = dst->op_params[5]; + + GGML_ASSERT(kernel->ne[3] == p.channels); + GGML_ASSERT(dst->ne[3] == p.batch); + + if (ggml_is_contiguous(src)) { + ggml_compute_forward_conv_2d_dw_whcn(params, src, kernel, dst, p); + } else if (ggml_is_contiguous_channels(src)) { + // kernel should also have channels most contiguous in memory + GGML_ASSERT(kernel->nb[0] >= kernel->nb[2] && kernel->nb[1] >= kernel->nb[0]); + ggml_compute_forward_conv_2d_dw_cwhn(params, src, kernel, dst, p); + } else { + GGML_ABORT("non-contiguous memory layout not supported"); + } +} + +// ggml_compute_forward_pool_1d_sk_p0 + +static void ggml_compute_forward_pool_1d_sk_p0(const ggml_compute_params * params, + const ggml_op_pool op, + const int k, + ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + + assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const char * cdata = (const char *) src->data; + const char * const data_end = cdata + ggml_nbytes(src); + float * drow = (float *) dst->data; + + const int64_t rs = dst->ne[0]; + + while (cdata < data_end) { + const void * srow = (const void *) cdata; + int j = 0; + for (int64_t i = 0; i < rs; ++i) { + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] = 0; + break; + case GGML_OP_POOL_MAX: + drow[i] = -FLT_MAX; + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + for (int ki = 0; ki < k; ++ki) { + const float srow_j = (src->type == GGML_TYPE_F32) ? + ((const float *) srow)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] += srow_j; + break; + case GGML_OP_POOL_MAX: + if (srow_j > drow[i]) { + drow[i] = srow_j; + } + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + ++j; + } + switch (op) { + case GGML_OP_POOL_AVG: + drow[i] /= k; + break; + case GGML_OP_POOL_MAX: + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + + cdata += src->nb[1]; + drow += rs; + } +} + +// ggml_compute_forward_pool_1d + +void ggml_compute_forward_pool_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int s0 = opts[2]; + const int p0 = opts[3]; + GGML_ASSERT(p0 == 0); // padding not supported + GGML_ASSERT(k0 == s0); // only s = k supported + + ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst); +} + +// ggml_compute_forward_pool_2d + +void ggml_compute_forward_pool_2d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + + assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; + const char * cdata = (const char *) src->data; + const char * const data_end = cdata + ggml_nbytes(src); + + const int64_t px = dst->ne[0]; + const int64_t py = dst->ne[1]; + const int64_t pa = px * py; + + float * dplane = (float *) dst->data; + + const int ka = k0 * k1; + const int offset0 = -p0; + const int offset1 = -p1; + + while (cdata < data_end) { + for (int oy = 0; oy < py; ++oy) { + float * const drow = dplane + oy * px; + for (int ox = 0; ox < px; ++ox) { + float * const out = drow + ox; + switch (op) { + case GGML_OP_POOL_AVG: + *out = 0; + break; + case GGML_OP_POOL_MAX: + *out = -FLT_MAX; + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + + const int ix = offset0 + ox * s0; + const int iy = offset1 + oy * s1; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= src->ne[1]) { + continue; + } + const void * srow = (const void *) (cdata + src->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= src->ne[0]) { + continue; + } + const float srow_j = (src->type == GGML_TYPE_F32) ? + ((const float *) srow)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); + switch (op) { + case GGML_OP_POOL_AVG: + *out += srow_j; + break; + case GGML_OP_POOL_MAX: + if (srow_j > *out) { + *out = srow_j; + } + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + } + switch (op) { + case GGML_OP_POOL_AVG: + *out /= ka; + break; + case GGML_OP_POOL_MAX: + break; + case GGML_OP_POOL_COUNT: + GGML_ABORT("fatal error"); + } + } + } + + cdata += src->nb[2]; + dplane += pa; + } +} + +// ggml_compute_forward_pool_2d_back + +void ggml_compute_forward_pool_2d_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src = dst->src[0]; + const ggml_tensor * dstf = dst->src[1]; // forward tensor of dst + + assert(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); + + if (params->ith != 0) { + return; + } + + const int32_t * opts = (const int32_t *) dst->op_params; + ggml_op_pool op = static_cast(opts[0]); + const int k0 = opts[1]; + const int k1 = opts[2]; + const int s0 = opts[3]; + const int s1 = opts[4]; + const int p0 = opts[5]; + const int p1 = opts[6]; + + char * cdata = (char *) dst->data; + const char * cdataf = (const char *) dstf->data; + const char * const data_end = cdata + ggml_nbytes(dst); + + GGML_ASSERT(params->ith == 0); + memset(cdata, 0, ggml_nbytes(dst)); + + const int64_t px = src->ne[0]; + const int64_t py = src->ne[1]; + const int64_t pa = px * py; + + const float * splane = (const float *) src->data; + + const int ka = k0 * k1; + const int offset0 = -p0; + const int offset1 = -p1; + + while (cdata < data_end) { + for (int oy = 0; oy < py; ++oy) { + const float * const srow = splane + oy * px; + for (int ox = 0; ox < px; ++ox) { + const float grad0 = srow[ox]; + + const int ix = offset0 + ox * s0; + const int iy = offset1 + oy * s1; + + if (op == GGML_OP_POOL_MAX) { + float maxval = -FLT_MAX; + int kxmax = -1; + int kymax = -1; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= dst->ne[1]) { + continue; + } + const void * drowf = (const void *) (cdataf + dst->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= dst->ne[0]) { + continue; + } + + const float val = dst->type == GGML_TYPE_F32 ? + ((const float *) drowf)[j] : + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); + if (val <= maxval) { + continue; + } + + maxval = val; + kxmax = kx; + kymax = ky; + } + } + + if (kxmax == -1 || kymax == -1) { + continue; + } + + void * drow = (void *) (cdata + dst->nb[1] * (iy + kymax)); + const int j = ix + kxmax; + if (dst->type == GGML_TYPE_F32) { + ((float *) drow)[j] += grad0; + } else { + ((ggml_fp16_t *) drow)[j] = + GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); + } + } else if (op == GGML_OP_POOL_AVG) { + const float grad = grad0 / ka; + + for (int ky = 0; ky < k1; ++ky) { + if (iy + ky < 0 || iy + ky >= dst->ne[1]) { + continue; + } + void * drow = (void *) (cdata + dst->nb[1] * (iy + ky)); + for (int kx = 0; kx < k0; ++kx) { + int j = ix + kx; + if (j < 0 || j >= dst->ne[0]) { + continue; + } + + if (dst->type == GGML_TYPE_F32) { + ((float *) drow)[j] += grad; + } else { + ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); + } + } + } + } else { + GGML_ASSERT(false); + } + } + } + + cdata += dst->nb[2]; + cdataf += dst->nb[2]; + splane += pa; + } +} + +// ggml_compute_forward_upscale + +static void ggml_compute_forward_upscale_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float sf0 = (float) ne0 / src0->ne[0]; + float sf1 = (float) ne1 / src0->ne[1]; + float sf2 = (float) ne2 / src0->ne[2]; + float sf3 = (float) ne3 / src0->ne[3]; + float pixel_offset = 0.5f; + + const int32_t mode_flags = ggml_get_op_params_i32(dst, 0); + const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); + + if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { + pixel_offset = 0.0f; + sf0 = ne0 > 1 && ne00 > 1 ? (float) (ne0 - 1) / (ne00 - 1) : sf0; + sf1 = ne1 > 1 && ne01 > 1 ? (float) (ne1 - 1) / (ne01 - 1) : sf1; + } + + if (mode == GGML_SCALE_MODE_NEAREST) { + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const int64_t i01 = i1 / sf1; + for (int64_t i0 = 0; i0 < ne0; i0++) { + const int64_t i00 = i0 / sf0; + + const float * x = + (float *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); + float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + + *y = *x; + } + } + } + } + } else if (mode == GGML_SCALE_MODE_BILINEAR) { + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; + int64_t y0 = (int64_t) floorf(y); + int64_t y1 = y0 + 1; + + y0 = std::max(int64_t(0), std::min(y0, ne01 - 1)); + y1 = std::max(int64_t(0), std::min(y1, ne01 - 1)); + + float dy = y - (float) y0; + dy = std::max(0.0f, std::min(dy, 1.0f)); + + for (int64_t i0 = 0; i0 < ne0; i0++) { + const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; + int64_t x0 = (int64_t) floorf(x); + int64_t x1 = x0 + 1; + + x0 = std::max(int64_t(0), std::min(x0, ne00 - 1)); + x1 = std::max(int64_t(0), std::min(x1, ne00 - 1)); + + float dx = x - (float) x0; + dx = std::max(0.0f, std::min(dx, 1.0f)); + + // fetch the four surrounding pixel values and interpolate + const float a = *(const float *) ((const char *) src0->data + x0 * nb00 + y0 * nb01 + + i02 * nb02 + i03 * nb03); + const float b = *(const float *) ((const char *) src0->data + x1 * nb00 + y0 * nb01 + + i02 * nb02 + i03 * nb03); + const float c = *(const float *) ((const char *) src0->data + x0 * nb00 + y1 * nb01 + + i02 * nb02 + i03 * nb03); + const float d = *(const float *) ((const char *) src0->data + x1 * nb00 + y1 * nb01 + + i02 * nb02 + i03 * nb03); + + const float val = a * (1 - dx) * (1 - dy) + b * dx * (1 - dy) + c * (1 - dx) * dy + d * dx * dy; + + float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + *y_dst = val; + } + } + } + } + } else if (mode == GGML_SCALE_MODE_BICUBIC) { + // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm + const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) + auto weight1 = [a](float x) { + return ((a + 2) * x - (a + 3)) * x * x + 1; + }; + auto weight2 = [a](float x) { + return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; + }; + auto bicubic = [=](float p0, float p1, float p2, float p3, float x) { + const float w0 = weight2(x + 1); + const float w1 = weight1(x + 0); + const float w2 = weight1(1 - x); + const float w3 = weight2(2 - x); + return p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3; + }; + + for (int64_t i3 = 0; i3 < ne3; i3++) { + const int64_t i03 = i3 / sf3; + for (int64_t i2 = ith; i2 < ne2; i2 += nth) { + const int64_t i02 = i2 / sf2; + for (int64_t i1 = 0; i1 < ne1; i1++) { + const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; + const int64_t y0 = (int64_t) floorf(y); + const float dy = y - (float) y0; + + for (int64_t i0 = 0; i0 < ne0; i0++) { + const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; + const int64_t x0 = (int64_t) floorf(x); + const float dx = x - (float) x0; + + auto p = [=](int64_t x_off, int64_t y_off) -> float { + int64_t i00 = std::max(int64_t(0), std::min(x0 + x_off, ne00 - 1)); + int64_t i01 = std::max(int64_t(0), std::min(y0 + y_off, ne01 - 1)); + return *(const float *) ((const char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + + i03 * nb03); + }; + + const float val = bicubic(bicubic(p(-1, -1), p(0, -1), p(1, -1), p(2, -1), dx), + bicubic(p(-1, 0), p(0, 0), p(1, 0), p(2, 0), dx), + bicubic(p(-1, 1), p(0, 1), p(1, 1), p(2, 1), dx), + bicubic(p(-1, 2), p(0, 2), p(1, 2), p(2, 2), dx), dy); + + float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); + *y_dst = val; + } + } + } + } + } else { + GGML_ABORT("unsupported upscale mode"); + } +} + +void ggml_compute_forward_upscale(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_upscale_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_pad + +static void ggml_compute_forward_pad_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + float * dst_ptr = (float *) dst->data; + const int32_t lp0 = ggml_get_op_params_i32(dst, 0); + const int32_t rp0 = ggml_get_op_params_i32(dst, 1); + const int32_t lp1 = ggml_get_op_params_i32(dst, 2); + const int32_t rp1 = ggml_get_op_params_i32(dst, 3); + const int32_t lp2 = ggml_get_op_params_i32(dst, 4); + const int32_t rp2 = ggml_get_op_params_i32(dst, 5); + const int32_t lp3 = ggml_get_op_params_i32(dst, 6); + const int32_t rp3 = ggml_get_op_params_i32(dst, 7); + const int32_t circular = ggml_get_op_params_i32(dst, 8); + + // TODO: optimize + + if (circular == 0) { + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + for (int64_t i3 = 0; i3 < ne3; ++i3) { + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && + (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t src_idx = + (i3 - lp3) * nb03 + (i2 - lp2) * nb02 + (i1 - lp1) * nb01 + (i0 - lp0) * nb00; + const float * src_ptr = (const float *) ((char *) src0->data + src_idx); + dst_ptr[dst_idx] = *src_ptr; + } else { + dst_ptr[dst_idx] = 0; + } + } + } + } + } + } else { + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + for (int64_t i3 = 0; i3 < ne3; ++i3) { + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); + const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); + const int64_t src_i2 = ggml_wrap_around(i2 - lp2, ne02); + const int64_t src_i3 = ggml_wrap_around(i3 - lp3, ne03); + + const int64_t src_idx = src_i3 * nb03 + src_i2 * nb02 + src_i1 * nb01 + src_i0 * nb00; + + const float * src_ptr = (const float *) ((char *) src0->data + src_idx); + dst_ptr[dst_idx] = *src_ptr; + } + } + } + } + } +} + +void ggml_compute_forward_pad(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_pad_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_pad_reflect_1d + +void ggml_compute_forward_pad_reflect_1d(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + const int ith = params->ith; + const int nth = params->nth; + + const int32_t * opts = (const int32_t *) dst->op_params; + const int p0 = opts[0]; + const int p1 = opts[1]; + + GGML_TENSOR_UNARY_OP_LOCALS + + for (int64_t i3 = 0; i3 < ne3; i3++) { + for (int64_t i2 = 0; i2 < ne2; i2++) { + for (int64_t i1 = ith; i1 < ne1; i1 += nth) { + float * left = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + p0 * nb0); + float * right = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + (ne0 - p1 - 1) * nb0); + + ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01)); + + for (int i0 = 1; i0 <= p0; i0++) { + left[-i0] = left[i0]; + } + for (int i0 = 1; i0 <= p1; i0++) { + right[i0] = right[-i0]; + } + } + } + } +} + +// ggml_compute_forward_roll + +static int64_t ggml_wrap_index(int64_t i, int64_t ne) { + if (i < 0) { + return i + ne; + } else if (i >= ne) { + return i - ne; + } + return i; +} + +static void ggml_compute_forward_roll_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src_data = (const float *) src0->data; + float * dst_data = (float *) dst->data; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int s0 = ggml_get_op_params_i32(dst, 0); + const int s1 = ggml_get_op_params_i32(dst, 1); + const int s2 = ggml_get_op_params_i32(dst, 2); + const int s3 = ggml_get_op_params_i32(dst, 3); + + const int64_t total = ne1 * ne2 * ne3; + const int64_t per_thread = (total + params->nth) / params->nth; + const int64_t start = params->ith * per_thread; + const int64_t end = std::min(start + per_thread, total); + + for (int64_t i = start; i < end; ++i) { + const int64_t i1 = i % ne1; + const int64_t i2 = (i / ne1) % ne2; + const int64_t i3 = i / (ne2 * ne1); + float * dst_row = dst_data + (i3 * nb3 + i2 * nb2 + i1 * nb1) / sizeof(float); + + const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); + const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); + const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); + const float * src_row = src_data + (i03 * nb03 + i02 * nb02 + i01 * nb01) / sizeof(float); + + const int64_t s = ggml_wrap_index(-s0, ne00); + const int64_t n = ne00 - s; + ggml_vec_cpy_f32(n, dst_row, src_row + s); + ggml_vec_cpy_f32(s, dst_row + n, src_row); + } +} + +void ggml_compute_forward_roll(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_roll_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_arange + +static void ggml_compute_forward_arange_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_ASSERT(dst->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const float start = ggml_get_op_params_f32(dst, 0); + const float stop = ggml_get_op_params_f32(dst, 1); + const float step = ggml_get_op_params_f32(dst, 2); + + const int64_t steps = (int64_t) ceilf((stop - start) / step); + + GGML_ASSERT(ggml_nelements(dst) == steps); + + for (int64_t i = ith; i < steps; i += nth) { + float value = start + step * i; + ((float *) dst->data)[i] = value; + } +} + +void ggml_compute_forward_arange(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_arange_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_timestep_embedding_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_ASSERT(src0->nb[0] == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + GGML_TENSOR_UNARY_OP_LOCALS + + const int dim = ggml_get_op_params_i32(dst, 0); + const int max_period = ggml_get_op_params_i32(dst, 1); + + int half = dim / 2; + + for (int64_t i = 0; i < ne00; i++) { + float * embed_data = (float *) ((char *) dst->data + i * nb1); + for (int64_t j = ith; j < half; j += nth) { + float timestep = ((float *) src0->data)[i]; + float freq = (float) expf(-logf(max_period) * j / half); + float arg = timestep * freq; + embed_data[j] = cosf(arg); + embed_data[j + half] = sinf(arg); + } + if (dim % 2 != 0 && ith == 0) { + embed_data[2 * half] = 0.f; + } + } +} + +void ggml_compute_forward_timestep_embedding(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_timestep_embedding_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_argsort + +template struct cmp_argsort { + const float * data; + + bool operator()(int32_t a, int32_t b) const { + if constexpr (order == GGML_SORT_ORDER_ASC) { + return data[a] < data[b]; + } else { + return data[a] > data[b]; + } + } +}; + +static void ggml_compute_forward_argsort_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nr = ggml_nrows(src0); + + ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0); + + for (int64_t i = ith; i < nr; i += nth) { + const float * src_data = (float *) ((char *) src0->data + i * nb01); + + int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); + + for (int64_t j = 0; j < ne0; j++) { + dst_data[j] = j; + } + + switch (order) { + case GGML_SORT_ORDER_ASC: + std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); + break; + + case GGML_SORT_ORDER_DESC: + std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); + break; + + default: + GGML_ABORT("invalid sort order"); + } + } +} + +void ggml_compute_forward_argsort(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_argsort_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_top_k + +struct cmp_top_k { + const float * data; + + bool operator()(int32_t a, int32_t b) const { return data[a] > data[b]; } +}; + +static void ggml_compute_forward_top_k_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_UNARY_OP_LOCALS + + GGML_ASSERT(nb0 == sizeof(float)); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nr = ggml_nrows(src0); + + const int top_k = ne0; + + int32_t * tmp = (int32_t *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; + + for (int64_t i = ith; i < nr; i += nth) { + const float * src_data = (float *) ((char *) src0->data + i * nb01); + + for (int64_t j = 0; j < ne00; j++) { + tmp[j] = j; + } + + std::partial_sort(tmp, tmp + top_k, tmp + ne00, cmp_top_k{ src_data }); + + int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); + + std::copy(tmp, tmp + top_k, dst_data); + + // emphasize that the order is not important + if (top_k > 1) { + std::swap(dst_data[0], dst_data[1]); + } + } +} + +void ggml_compute_forward_top_k(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_top_k_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_flash_attn_ext + +static void ggml_compute_forward_flash_attn_ext_f16_one_chunk(const ggml_compute_params * params, + ggml_tensor * dst, + int ir0, + int ir1) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + const ggml_tensor * mask = dst->src[3]; + const ggml_tensor * sinks = dst->src[4]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int64_t DK = nek0; + const int64_t DV = nev0; + const int64_t N = neq1; + + GGML_ASSERT(ne0 == DV); + GGML_ASSERT(ne2 == N); + + // input tensor rows must be contiguous + GGML_ASSERT(nbq0 == ggml_type_size(q->type)); + GGML_ASSERT(nbk0 == ggml_type_size(k->type)); + GGML_ASSERT(nbv0 == ggml_type_size(v->type)); + + GGML_ASSERT(neq0 == DK); + GGML_ASSERT(nek0 == DK); + GGML_ASSERT(nev0 == DV); + + GGML_ASSERT(neq1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // broadcast factors + const int64_t rk2 = neq2 / nek2; + const int64_t rk3 = neq3 / nek3; + + const int64_t rv2 = neq2 / nev2; + const int64_t rv3 = neq3 / nev3; + + // parallelize by q rows using ggml_vec_dot_f32 + + float scale = 1.0f; + float max_bias = 0.0f; + float logit_softcap = 0.0f; + + memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); + memcpy(&logit_softcap, (float *) dst->op_params + 2, sizeof(float)); + + if (logit_softcap != 0) { + scale /= logit_softcap; + } + + const uint32_t n_head = neq2; + const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); + + const float m0 = powf(2.0f, -(max_bias) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + const ggml_type k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; + const ggml_from_float_t q_to_vec_dot = ggml_get_type_traits_cpu(k_vec_dot_type)->from_float; + const ggml_vec_dot_t kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; + const ggml_to_float_t v_to_float = ggml_get_type_traits(v->type)->to_float; + + GGML_ASSERT((q_to_vec_dot) && "fattn: unsupported K-type"); + GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float) && "fattn: unsupported V-type"); + + int ith = params->ith; + + // loop over n_batch and n_head + for (int ir = ir0; ir < ir1; ++ir) { + // q indices + const int iq3 = ir / (neq2 * neq1); + const int iq2 = (ir - iq3 * neq2 * neq1) / neq1; + const int iq1 = (ir - iq3 * neq2 * neq1 - iq2 * neq1); + + const uint32_t h = iq2; // head index + const float slope = + (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; + + float S = 0.0f; // sum + float M = -INFINITY; // maximum KQ value + + float * VKQ32 = + (float *) params->wdata + ith * (1 * DK + 2 * DV + CACHE_LINE_SIZE_F32); // FP32 VKQ accumulator + float * V32 = (VKQ32 + 1 * DV); // (temporary) FP32 V buffer + ggml_fp16_t * VKQ16 = (ggml_fp16_t *) (VKQ32 + 1 * DV); // (temporary) FP16 VKQ accumulator + ggml_fp16_t * Q_q = (ggml_fp16_t *) (VKQ32 + 2 * DV); // (temporary) buffer for Q converted to quantized/FP16 + + if (v->type == GGML_TYPE_F16) { + memset(VKQ16, 0, DV * sizeof(ggml_fp16_t)); + } else { + memset(VKQ32, 0, DV * sizeof(float)); + } + + const ggml_fp16_t * mp = + mask ? (ggml_fp16_t *) ((char *) mask->data + iq1 * mask->nb[1] + (iq2 % mask->ne[2]) * mask->nb[2] + + (iq3 % mask->ne[3]) * mask->nb[3]) : + NULL; + + // k indices + const int ik3 = iq3 / rk3; + const int ik2 = iq2 / rk2; + + // v indices + const int iv3 = iq3 / rv3; + const int iv2 = iq2 / rv2; + + const float * pq = (const float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)); + q_to_vec_dot(pq, Q_q, DK); + + // online softmax / attention + // loop over n_kv and n_head_kv + // ref: https://arxiv.org/pdf/2112.05682.pdf + for (int64_t ic = 0; ic < nek1; ++ic) { + const float mv = mp ? slope * GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; + if (mv == -INFINITY) { + continue; + } + + float s; // KQ value + + const char * k_data = (const char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3); + kq_vec_dot(DK, &s, 0, k_data, 0, Q_q, 0, 1); + + s = s * scale; // scale KQ value + + if (logit_softcap != 0.0f) { + s = logit_softcap * tanhf(s); + } + + s += mv; // apply mask + + const float Mold = M; + + float ms = 1.0f; // upon new higher max val, scale VKQ and KQ sum with this value + float vs = 1.0f; // post-softmax KQ value, expf(s - M) + + const char * v_data = ((const char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)); + + if (v->type == GGML_TYPE_F16) { + if (s > M) { + // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f + M = s; + ms = expf(Mold - M); + + // V = V*expf(Mold - M) + ggml_vec_scale_f16(DV, VKQ16, ms); + } else { + // no new maximum, ms == 1.0f, vs != 1.0f + vs = expf(s - M); + } + + // V += v*expf(s - M) + ggml_vec_mad_f16(DV, VKQ16, (const ggml_fp16_t *) v_data, vs); + } else { + if (s > M) { + // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f + M = s; + ms = expf(Mold - M); + + // V = V*expf(Mold - M) + ggml_vec_scale_f32(DV, VKQ32, ms); + } else { + // no new maximum, ms == 1.0f, vs != 1.0f + vs = expf(s - M); + } + + // V += v*expf(s - M) + if (v_to_float) { + v_to_float(v_data, V32, DV); + ggml_vec_mad_f32(DV, VKQ32, V32, vs); + } else { + // V is F32 + ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); + } + } + + S = S * ms + vs; // scale and increment sum with partial sum + } + + if (v->type == GGML_TYPE_F16) { + for (int64_t d = 0; d < DV; ++d) { + VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); + } + } + + // sinks + if (sinks) { + const float s = ((float *) ((char *) sinks->data))[h]; + + float ms = 1.0f; + float vs = 1.0f; + + if (s > M) { + ms = expf(M - s); + ggml_vec_scale_f32(DV, VKQ32, ms); + } else { + vs = expf(s - M); + } + + S = S * ms + vs; + } + + // V /= S + const float S_inv = S == 0.0f ? 0.0f : 1.0f / S; + ggml_vec_scale_f32(DV, VKQ32, S_inv); + + // dst indices + const int i1 = iq1; + const int i2 = iq2; + const int i3 = iq3; + + // original + //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float)); + + // permute(0, 2, 1, 3) + memcpy((char *) dst->data + (i3 * ne2 * ne1 + i2 + i1 * ne1) * nb1, VKQ32, nb1); + } +} + +static void ggml_compute_forward_flash_attn_ext_f16(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int64_t DK = nek0; + const int64_t DV = nev0; + const int64_t N = neq1; + + GGML_ASSERT(ne0 == DV); + GGML_ASSERT(ne2 == N); + + // input tensor rows must be contiguous + GGML_ASSERT(nbq0 == ggml_type_size(q->type)); + GGML_ASSERT(nbk0 == ggml_type_size(k->type)); + GGML_ASSERT(nbv0 == ggml_type_size(v->type)); + + GGML_ASSERT(neq0 == DK); + GGML_ASSERT(nek0 == DK); + GGML_ASSERT(nev0 == DV); + + GGML_ASSERT(neq1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + // parallelize by q rows using ggml_vec_dot_f32 + + // total rows in q + const int64_t nr = neq1 * neq2 * neq3; + + // rows per thread + const int ith = params->ith; + const int nth = params->nth; + + // disable for NUMA + const bool disable_chunking = ggml_is_numa(); + + // 4x chunks per thread + int nth_scaled = nth * 4; + int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; + int64_t nchunk = (nr + chunk_size - 1) / chunk_size; + + if (nth == 1 || nchunk < nth || disable_chunking) { + nchunk = nth; + } + + if (ith == 0) { + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + ggml_threadpool_chunk_set(params->threadpool, nth); + } + + ggml_barrier(params->threadpool); + + // The number of elements in each chunk + const int64_t dr = (nr + nchunk - 1) / nchunk; + + // The first chunk comes from our thread_id, the rest will get auto-assigned. + int current_chunk = ith; + + while (current_chunk < nchunk) { + const int64_t ir0 = dr * current_chunk; + const int64_t ir1 = MIN(ir0 + dr, nr); + + ggml_compute_forward_flash_attn_ext_f16_one_chunk(params, dst, ir0, ir1); + + current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); + } +} + +void ggml_compute_forward_flash_attn_ext(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->op_params[3]) { + case GGML_PREC_DEFAULT: + case GGML_PREC_F32: + { + // uses F32 accumulators + ggml_compute_forward_flash_attn_ext_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_flash_attn_back + +static void ggml_compute_forward_flash_attn_back_f32(const ggml_compute_params * params, + const bool masked, + ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + const ggml_tensor * k = dst->src[1]; + const ggml_tensor * v = dst->src[2]; + const ggml_tensor * d = dst->src[3]; + + GGML_TENSOR_LOCALS(int64_t, neq, q, ne) + GGML_TENSOR_LOCALS(size_t, nbq, q, nb) + GGML_TENSOR_LOCALS(int64_t, nek, k, ne) + GGML_TENSOR_LOCALS(size_t, nbk, k, nb) + GGML_TENSOR_LOCALS(int64_t, nev, v, ne) + GGML_TENSOR_LOCALS(size_t, nbv, v, nb) + GGML_TENSOR_LOCALS(int64_t, ned, d, ne) + GGML_TENSOR_LOCALS(size_t, nbd, d, nb) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + GGML_TENSOR_LOCALS(size_t, nb, dst, nb) + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t D = neq0; + const int64_t N = neq1; + const int64_t P = nek1 - N; + const int64_t M = P + N; + + const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); + const int mxDM = MAX(D, Mup); + + // GGML_ASSERT(ne0 == D); + // GGML_ASSERT(ne1 == N); + GGML_ASSERT(P >= 0); + + GGML_ASSERT(nbq0 == sizeof(float)); + GGML_ASSERT(nbk0 == sizeof(float)); + GGML_ASSERT(nbv0 == sizeof(float)); + + GGML_ASSERT(neq0 == D); + GGML_ASSERT(nek0 == D); + GGML_ASSERT(nev1 == D); + GGML_ASSERT(ned0 == D); + + GGML_ASSERT(neq1 == N); + GGML_ASSERT(nek1 == N + P); + GGML_ASSERT(nev1 == D); + GGML_ASSERT(ned1 == N); + + // dst cannot be transposed or permuted + GGML_ASSERT(nb0 == sizeof(float)); + GGML_ASSERT(nb0 <= nb1); + GGML_ASSERT(nb1 <= nb2); + GGML_ASSERT(nb2 <= nb3); + + if (ith == 0) { + memset(dst->data, 0, nb0 * ne0 * ne1 * ne2 * ne3); + } + ggml_barrier(params->threadpool); + + const int64_t elem_q = ggml_nelements(q); + const int64_t elem_k = ggml_nelements(k); + + ggml_type result_type = dst->type; + GGML_ASSERT(ggml_blck_size(result_type) == 1); + const size_t tsize = ggml_type_size(result_type); + + const size_t offs_q = 0; + const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); + const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); + + void * grad_q = (char *) dst->data; + void * grad_k = (char *) dst->data + offs_k; + void * grad_v = (char *) dst->data + offs_v; + + const size_t nbgq1 = nb0 * neq0; + const size_t nbgq2 = nb0 * neq0 * neq1; + const size_t nbgq3 = nb0 * neq0 * neq1 * neq2; + + const size_t nbgk1 = nb0 * nek0; + const size_t nbgk2 = nb0 * nek0 * nek1; + const size_t nbgk3 = nb0 * nek0 * nek1 * neq2; + + const size_t nbgv1 = nb0 * nev0; + const size_t nbgv2 = nb0 * nev0 * nev1; + const size_t nbgv3 = nb0 * nev0 * nev1 * neq2; + + // parallelize by k rows using ggml_vec_dot_f32 + + // total rows in k + const int nr = nek2 * nek3; + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const float scale = 1.0f / sqrtf(D); + + //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); + + // how often k2 (and v2) is repeated in q2 + int nrep = neq2 / nek2; + + for (int ir = ir0; ir < ir1; ++ir) { + // q indices + const int ik3 = ir / (nek2); + const int ik2 = ir - ik3 * nek2; + + const int iq3 = ik3; + const int id3 = ik3; + const int iv3 = ik3; + const int iv2 = ik2; + + for (int irep = 0; irep < nrep; ++irep) { + const int iq2 = ik2 + irep * nek2; + const int id2 = iq2; + + // (ik2 + irep*nek2) % nek2 == ik2 + for (int iq1 = 0; iq1 < neq1; ++iq1) { + const int id1 = iq1; + + // not sure about CACHE_LINE_SIZE_F32.. + // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset? + float * S = + (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 0 * (mxDM + CACHE_LINE_SIZE_F32); + float * SM = + (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 1 * (mxDM + CACHE_LINE_SIZE_F32); + + for (int i = M; i < Mup; ++i) { + S[i] = -INFINITY; + } + + const int64_t masked_begin = masked ? (P + iq1 + 1) : M; + for (int64_t ic = 0; ic < masked_begin; ++ic) { + // k indices + const int ik1 = ic; + + // S indices + const int i1 = ik1; + + ggml_vec_dot_f32(neq0, S + i1, 0, + (float *) ((char *) k->data + (ik1 * nbk1 + ik2 * nbk2 + ik3 * nbk3)), 0, + (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), 0, 1); + } + + // scale + ggml_vec_scale_f32(masked_begin, S, scale); + + for (int64_t i = masked_begin; i < M; i++) { + S[i] = -INFINITY; + } + + // softmax + // exclude known -INF S[..] values from max and loop + // dont forget to set their SM values to zero + { + float max = -INFINITY; + ggml_vec_max_f32(masked_begin, &max, S); + + ggml_float sum = 0.0; + { +#ifdef GGML_SOFT_MAX_ACCELERATE + max = -max; + vDSP_vsadd(SM, 1, &max, SM, 1, Mup); + vvexpf(SM, SM, &Mup); + ggml_vec_sum_f32(Mup, &sum, SM); +#else + sum = ggml_vec_soft_max_f32(Mup, SM, S, max); +#endif + } + + assert(sum > 0.0); + + sum = 1.0 / sum; + ggml_vec_scale_f32(masked_begin, SM, sum); + } + + // step-by-step explanation + { + // forward-process shape grads from backward process + // parallel_for ik2,ik3: + // for irep: + // iq2 = ik2 + irep*nek2 + // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur] + // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur] + // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur] + // for iq1: + // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur + // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur + // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4 + // S0 = -Inf [D,1,1,1] + // ~S1[i] = dot(kcur[:D,i], qcur) + // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale + // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P) + // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur + // ~S5[i] = dot(vcur[:,i], S4) + // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3] + // ~dst[i,iq1,iq2,iq3] = S5[i] ^ + // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3] + // dst backward-/ grad[dst] = d + // + // output gradients with their dependencies: + // + // grad[kcur] = grad[S1].T @ qcur + // grad[S1] = diag_mask_zero(grad[S3], P) * scale + // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // grad[S4] = grad[S5] @ vcur + // grad[S4] = d[:D,id1,id2,id3] @ vcur + // grad[qcur] = grad[S1] @ kcur + // grad[vcur] = grad[S5].T @ S4 + // grad[vcur] = d[:D,id1,id2,id3].T @ S4 + // + // in post-order: + // + // S1 = qcur @ kcur.T + // S2 = S1 * scale + // S3 = diag_mask_inf(S2, P) + // S4 = softmax(S3) + // grad[S4] = d[:D,id1,id2,id3] @ vcur + // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) + // grad[S1] = diag_mask_zero(grad[S3], P) * scale + // grad[qcur] = grad[S1] @ kcur + // grad[kcur] = grad[S1].T @ qcur + // grad[vcur] = d[:D,id1,id2,id3].T @ S4 + // + // using less variables (SM=S4): + // + // S = diag_mask_inf(qcur @ kcur.T * scale, P) + // SM = softmax(S) + // S = d[:D,iq1,iq2,iq3] @ vcur + // dot_SM_gradSM = dot(SM, S) + // S = SM * (S - dot(SM, S)) + // S = diag_mask_zero(S, P) * scale + // + // grad[q][:D,iq1,iq2,iq3] += S @ kcur + // grad[k][:D,:M,ik2,ik3] += S.T @ qcur + // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM + } + + // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] + // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] + // for ic: + // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3] + // exclude known future zero S[..] values from operation + ggml_vec_set_f32(masked_begin, S, 0); + for (int64_t ic = 0; ic < D; ++ic) { + ggml_vec_mad_f32( + masked_begin, S, (float *) ((char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)), + *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); + } + + // S = SM * (S - dot(SM, S)) + float dot_SM_gradSM = 0; + ggml_vec_dot_f32(masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1); + ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); + ggml_vec_mul_f32(masked_begin, S, S, SM); + + // S = diag_mask_zero(S, P) * scale + // already done by above ggml_vec_set_f32 + + // exclude known zero S[..] values from operation + ggml_vec_scale_f32(masked_begin, S, scale); + + // S shape [M,1] + // SM shape [M,1] + // kcur shape [D,M] + // qcur shape [D,1] + // vcur shape [M,D] + + // grad[q][:D,iq1,iq2,iq3] += S @ kcur + // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M] + // for ic: + // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3] + // exclude known zero S[..] values from loop + for (int64_t ic = 0; ic < masked_begin; ++ic) { + ggml_vec_mad_f32(D, (float *) ((char *) grad_q + (iq1 * nbgq1 + iq2 * nbgq2 + iq3 * nbgq3)), + (float *) ((char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3)), S[ic]); + } + + // grad[k][:D,:M,iq2,iq3] += S.T @ qcur + // for ic: + // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0] + // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0] + // exclude known zero S[..] values from loop + for (int64_t ic = 0; ic < masked_begin; ++ic) { + ggml_vec_mad_f32(D, (float *) ((char *) grad_k + (ic * nbgk1 + ik2 * nbgk2 + ik3 * nbgk3)), + (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), S[ic]); + } + + // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM + // for ic: + // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M] + // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M] + // exclude known zero SM[..] values from mad + for (int64_t ic = 0; ic < D; ++ic) { + ggml_vec_mad_f32( + masked_begin, (float *) ((char *) grad_v + (ic * nbgv1 + iv2 * nbgv2 + iv3 * nbgv3)), SM, + *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); + } + } + } + } +} + +void ggml_compute_forward_flash_attn_back(const ggml_compute_params * params, const bool masked, ggml_tensor * dst) { + const ggml_tensor * q = dst->src[0]; + + switch (q->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_flash_attn_back_f32(params, masked, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_ssm_conv + +static void ggml_compute_forward_ssm_conv_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // conv_x + const ggml_tensor * src1 = dst->src[1]; // conv1d.weight + + const int ith = params->ith; + const int nth = params->nth; + + const int nc = src1->ne[0]; // d_conv + const int ncs = src0->ne[0]; // d_conv - 1 + n_t + const int nr = src0->ne[1]; // d_inner + const int n_t = dst->ne[1]; // tokens per sequence + const int n_s = dst->ne[2]; // number of sequences in the batch + + GGML_ASSERT(dst->ne[0] == nr); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + const int ir = ir1 - ir0; + + for (int i3 = 0; i3 < n_s; ++i3) { + for (int i2 = 0; i2 < n_t; ++i2) { + // {d_conv - 1 + n_t, d_inner, n_seqs} + // sliding window + const float * s = (const float *) ((const char *) src0->data + ir0 * (src0->nb[1]) + i2 * (src0->nb[0]) + + i3 * (src0->nb[2])); // {d_conv, d_inner, n_s} + const float * c = (const float *) ((const char *) src1->data + ir0 * (src1->nb[1])); // {d_conv, d_inner} + float * x = (float *) ((char *) dst->data + ir0 * (dst->nb[0]) + i2 * (dst->nb[1]) + + i3 * (dst->nb[2])); // {d_inner, n_t, n_s} + + // TODO: transpose the output for smaller strides for big batches? + // d_inner + for (int i1 = 0; i1 < ir; ++i1) { + // rowwise dot product + // NOTE: not using ggml_vec_dot_f32, because its sum is in double precision + float sumf = 0.0f; + + // d_conv + for (int i0 = 0; i0 < nc; ++i0) { + sumf += s[i0 + i1 * ncs] * c[i0 + i1 * nc]; + } + x[i1] = sumf; + } + } + } +} + +void ggml_compute_forward_ssm_conv(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->src[0]->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_ssm_conv_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_ssm_scan + +static void ggml_compute_forward_ssm_scan_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; // s {d_state, dim, n_head, n_seqs+} + const ggml_tensor * src1 = dst->src[1]; // x {dim, n_head, n_seq_tokens, n_seqs} + const ggml_tensor * src2 = dst->src[2]; // dt {n_head, n_seq_tokens, n_seqs} + const ggml_tensor * src3 = dst->src[3]; // A {d_state, n_head} or {1, n_head} + const ggml_tensor * src4 = dst->src[4]; // B {d_state, n_group, n_seq_tokens, n_seqs} + const ggml_tensor * src5 = dst->src[5]; // C {d_state, n_group, n_seq_tokens, n_seqs} + const ggml_tensor * src6 = dst->src[6]; // ids {n_seqs} + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t nc = src0->ne[0]; // d_state + const int64_t nr = src0->ne[1]; // dim + const int64_t nh = src1->ne[1]; // n_head + const int64_t ng = src4->ne[1]; + const int64_t nt = src1->ne[2]; // number of tokens per sequence + const int64_t ns = src1->ne[3]; // number of sequences in the batch + + // can't use ggml_nbytes because src1 is not necessarily contiguous + const int64_t s_off = ggml_nelements(src1) * ggml_element_size(src1); + + GGML_ASSERT(ggml_nelements(src1) + nc * nr * nh * ns == ggml_nelements(dst)); + GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src1->nb[0] == sizeof(float)); + GGML_ASSERT(src2->nb[0] == sizeof(float)); + GGML_ASSERT(src3->nb[0] == sizeof(float)); + GGML_ASSERT(src4->nb[0] == sizeof(float)); + GGML_ASSERT(src5->nb[0] == sizeof(float)); + GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); + GGML_ASSERT(nh % ng == 0); + + // heads per thread + const int dh = (nh + nth - 1) / nth; + + // head range for this thread + const int ih0 = dh * ith; + const int ih1 = MIN(ih0 + dh, nh); + + const int32_t * ids = (const int32_t *) src6->data; + + for (int i3 = 0; i3 < ns; ++i3) { + const float * s0 = + (const float *) ((const char *) src0->data + ids[i3] * (src0->nb[3])); // {d_state, dim, nh, ns} + float * s = (float *) ((char *) dst->data + i3 * (src0->nb[3]) + s_off); // {d_state, dim, nh, ns} + + for (int i2 = 0; i2 < nt; ++i2) { + const float * x = (const float *) ((const char *) src1->data + i2 * (src1->nb[2]) + + i3 * (src1->nb[3])); // {dim, nh, nt, ns} + const float * dt = + (const float *) ((const char *) src2->data + i2 * (src2->nb[1]) + i3 * (src2->nb[2])); // {nh, nt, ns} + const float * A = (const float *) ((const char *) src3->data); // {d_state, nh} or {1, nh} + const float * B = (const float *) ((const char *) src4->data + i2 * (src4->nb[2]) + + i3 * (src4->nb[3])); // {d_state, ng, nt, ns} + const float * C = (const float *) ((const char *) src5->data + i2 * (src5->nb[2]) + + i3 * (src5->nb[3])); // {d_state, ng, nt, ns} + float * y = (float *) ((char *) dst->data + i2 * (nh * nr * sizeof(float)) + + i3 * (nt * nh * nr * sizeof(float))); // {dim, nh, nt, ns} + + if (src3->ne[0] == 1) { + // Mamba-2 has a scalar decay factor per head; dA can be outside the state-wise loop + + // n_head + for (int h = ih0; h < ih1; ++h) { + // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 + const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); + const float dA = expf(dt_soft_plus * A[h]); + const int g = h / (nh / ng); // repeat_interleave + + // dim + for (int i1 = 0; i1 < nr; ++i1) { + const int ii = i1 + h * nr; + const float x_dt = x[ii] * dt_soft_plus; + float sumf = 0.0f; +#if defined(GGML_SIMD) +# if defined(__ARM_FEATURE_SVE) + const int ggml_f32_epr = svcntw(); + const int ggml_f32_step = 1 * ggml_f32_epr; + + const int np = (nc & ~(ggml_f32_step - 1)); + + GGML_F32_VEC sum = GGML_F32_VEC_ZERO; + + GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); + GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); + + for (int i = 0; i < np; i += ggml_f32_step) { + // TODO: maybe unroll more? + for (int j = 0; j < 1; j++) { + GGML_F32_VEC t0 = GGML_F32_VEC_LOAD(s0 + i + j * ggml_f32_epr + ii * nc); + GGML_F32_VEC t1 = GGML_F32_VEC_LOAD(B + i + j * ggml_f32_epr + g * nc); + GGML_F32_VEC t2 = GGML_F32_VEC_LOAD(C + i + j * ggml_f32_epr + g * nc); + + t0 = GGML_F32_VEC_MUL(t0, adA); + t1 = GGML_F32_VEC_MUL(t1, axdt); + + t0 = GGML_F32_VEC_ADD(t0, t1); + + sum = GGML_F32_VEC_FMA(sum, t0, t2); + + GGML_F32_VEC_STORE(s + i + j * ggml_f32_epr + ii * nc, t0); + } + } + + sumf = GGML_F32xt_REDUCE_ONE(sum); +# elif defined(__riscv_v_intrinsic) + // todo: RVV implementation + const int np = 0; +# else + const int np = (nc & ~(GGML_F32_STEP - 1)); + + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + + GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); + GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); + + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + GGML_F32_VEC az[GGML_F32_ARR]; + + for (int i = 0; i < np; i += GGML_F32_STEP) { + for (int j = 0; j < GGML_F32_ARR; j++) { + ax[j] = GGML_F32_VEC_LOAD(s0 + i + j * GGML_F32_EPR + ii * nc); + ay[j] = GGML_F32_VEC_LOAD(B + i + j * GGML_F32_EPR + g * nc); + az[j] = GGML_F32_VEC_LOAD(C + i + j * GGML_F32_EPR + g * nc); + + ax[j] = GGML_F32_VEC_MUL(ax[j], adA); + ay[j] = GGML_F32_VEC_MUL(ay[j], axdt); + + ax[j] = GGML_F32_VEC_ADD(ax[j], ay[j]); + + sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], az[j]); + + GGML_F32_VEC_STORE(s + i + j * GGML_F32_EPR + ii * nc, ax[j]); + } + } + + // reduce sum0..sum3 to sum0 + GGML_F32_VEC_REDUCE(sumf, sum); +# endif +#else + const int np = 0; +#endif + // d_state + for (int i0 = np; i0 < nc; ++i0) { + const int i = i0 + ii * nc; + const int ig = i0 + g * nc; + // state = prev_state * dA + dB * x + const float state = (s0[i] * dA) + (B[ig] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[ig]; + s[i] = state; + } + y[ii] = sumf; + } + } + } else { + // Mamba-1 has an element-wise decay factor for the states + + // n_head + for (int h = ih0; h < ih1; ++h) { + // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 + const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); + const int g = h / (nh / ng); // repeat_interleave + + // dim + for (int i1 = 0; i1 < nr; ++i1) { + const int ii = i1 + h * nr; + const float x_dt = x[ii] * dt_soft_plus; +#if defined(__ARM_FEATURE_SVE) + svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); + svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); + svfloat32_t r1_vector = GGML_F32_VEC_ZERO; + + // d_state + // TODO: what happens when (d_state % svcntw()) != 0? + for (int64_t k = 0; k < nc; k += svcntw()) { + svfloat32_t vA = GGML_F32_VEC_LOAD(&A[h * nc + k]); + svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k + g * nc]); + svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k + g * nc]); + svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[ii * nc + k]); + + svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); + t1 = exp_ps_sve(svptrue_b32(), t1); + svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); + + vs0 = GGML_F32_VEC_FMA(t2, vs0, t1); + r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); + + GGML_F32_VEC_STORE(&s[ii * nc + k], vs0); + } + y[ii] = GGML_F32xt_REDUCE_ONE(r1_vector); +#else + float sumf = 0.0f; + // NOTE: can't really use GGML_SIMD here because d_state is usually 16 + // and also because expf is used within the loop. + // d_state + for (int i0 = 0; i0 < nc; ++i0) { + const int i = i0 + ii * nc; + const int ig = i0 + g * nc; + // state = prev_state * dA + dB * x + const float state = (s0[i] * expf(dt_soft_plus * A[i0 + h * nc])) + (B[ig] * x_dt); + // y = rowwise_dotprod(state, C) + sumf += state * C[ig]; + s[i] = state; + } + y[ii] = sumf; +#endif + } + } + } + // use the output as the source when it's not the first token-wise iteration + s0 = s; + } + } +} + +void ggml_compute_forward_ssm_scan(const ggml_compute_params * params, ggml_tensor * dst) { + switch (dst->src[0]->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_ssm_scan_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_win_part + +static void ggml_compute_forward_win_part_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + + const int32_t nep0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t nep1 = ((const int32_t *) (dst->op_params))[1]; + const int32_t w = ((const int32_t *) (dst->op_params))[2]; + + assert(ne00 == ne0); + assert(ne3 == nep0 * nep1); + + // TODO: optimize / multi-thread + for (int py = 0; py < nep1; ++py) { + for (int px = 0; px < nep0; ++px) { + const int64_t i3 = py * nep0 + px; + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + const int64_t i02 = py * w + i2; + const int64_t i01 = px * w + i1; + const int64_t i00 = i0; + + const int64_t i = i3 * ne2 * ne1 * ne0 + i2 * ne1 * ne0 + i1 * ne0 + i0; + const int64_t j = i02 * ne01 * ne00 + i01 * ne00 + i00; + + if (py * w + i2 >= ne02 || px * w + i1 >= ne01) { + ((float *) dst->data)[i] = 0.0f; + } else { + ((float *) dst->data)[i] = ((float *) src0->data)[j]; + } + } + } + } + } + } +} + +void ggml_compute_forward_win_part(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_win_part_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_win_unpart + +static void ggml_compute_forward_win_unpart_f32(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) + GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) + + const int32_t w = ((const int32_t *) (dst->op_params))[0]; + + // padding + const int px = (w - ne1 % w) % w; + //const int py = (w - ne2%w)%w; + + const int npx = (px + ne1) / w; + //const int npy = (py + ne2)/w; + + assert(ne0 == ne00); + + // TODO: optimize / multi-thread + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + for (int64_t i0 = 0; i0 < ne0; ++i0) { + const int ip2 = i2 / w; + const int ip1 = i1 / w; + + const int64_t i02 = i2 % w; + const int64_t i01 = i1 % w; + const int64_t i00 = i0; + + const int64_t i = (ip2 * npx + ip1) * ne02 * ne01 * ne00 + i02 * ne01 * ne00 + i01 * ne00 + i00; + const int64_t j = i2 * ne1 * ne0 + i1 * ne0 + i0; + + ((float *) dst->data)[j] = ((float *) src0->data)[i]; + } + } + } +} + +void ggml_compute_forward_win_unpart(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_win_unpart_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +//gmml_compute_forward_unary + +void ggml_compute_forward_unary(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_unary_op op = ggml_get_unary_op(dst); + + switch (op) { + case GGML_UNARY_OP_ABS: + { + ggml_compute_forward_abs(params, dst); + } + break; + case GGML_UNARY_OP_SGN: + { + ggml_compute_forward_sgn(params, dst); + } + break; + case GGML_UNARY_OP_NEG: + { + ggml_compute_forward_neg(params, dst); + } + break; + case GGML_UNARY_OP_STEP: + { + ggml_compute_forward_step(params, dst); + } + break; + case GGML_UNARY_OP_TANH: + { + ggml_compute_forward_tanh(params, dst); + } + break; + case GGML_UNARY_OP_ELU: + { + ggml_compute_forward_elu(params, dst); + } + break; + case GGML_UNARY_OP_RELU: + { + ggml_compute_forward_relu(params, dst); + } + break; + case GGML_UNARY_OP_SIGMOID: + { + ggml_compute_forward_sigmoid(params, dst); + } + break; + case GGML_UNARY_OP_GELU: + { + ggml_compute_forward_gelu(params, dst); + } + break; + case GGML_UNARY_OP_GELU_ERF: + { + ggml_compute_forward_gelu_erf(params, dst); + } + break; + case GGML_UNARY_OP_GELU_QUICK: + { + ggml_compute_forward_gelu_quick(params, dst); + } + break; + case GGML_UNARY_OP_SILU: + { + ggml_compute_forward_silu(params, dst); + } + break; + case GGML_UNARY_OP_HARDSWISH: + { + ggml_compute_forward_hardswish(params, dst); + } + break; + case GGML_UNARY_OP_HARDSIGMOID: + { + ggml_compute_forward_hardsigmoid(params, dst); + } + break; + case GGML_UNARY_OP_EXP: + { + ggml_compute_forward_exp(params, dst); + } + break; + case GGML_UNARY_OP_FLOOR: + { + ggml_compute_forward_floor(params, dst); + } + break; + case GGML_UNARY_OP_CEIL: + { + ggml_compute_forward_ceil(params, dst); + } + break; + case GGML_UNARY_OP_ROUND: + { + ggml_compute_forward_round(params, dst); + } + break; + case GGML_UNARY_OP_TRUNC: + { + ggml_compute_forward_trunc(params, dst); + } + break; + case GGML_UNARY_OP_XIELU: + { + ggml_compute_forward_xielu(params, dst); + } + break; + case GGML_UNARY_OP_EXPM1: + { + ggml_compute_forward_expm1(params, dst); + } + break; + case GGML_UNARY_OP_SOFTPLUS: + { + ggml_compute_forward_softplus(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +//ggml_compute_forward_glu + +void ggml_compute_forward_glu(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_glu_op op = ggml_get_glu_op(dst); + + switch (op) { + case GGML_GLU_OP_REGLU: + { + ggml_compute_forward_reglu(params, dst); + } + break; + case GGML_GLU_OP_GEGLU: + { + ggml_compute_forward_geglu(params, dst); + } + break; + case GGML_GLU_OP_SWIGLU: + { + ggml_compute_forward_swiglu(params, dst); + } + break; + case GGML_GLU_OP_SWIGLU_OAI: + { + ggml_compute_forward_swiglu_oai(params, dst); + } + break; + case GGML_GLU_OP_GEGLU_ERF: + { + ggml_compute_forward_geglu_erf(params, dst); + } + break; + case GGML_GLU_OP_GEGLU_QUICK: + { + ggml_compute_forward_geglu_quick(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_get_rel_pos + +static void ggml_compute_forward_get_rel_pos_f16(const ggml_compute_params * params, ggml_tensor * dst) { + GGML_UNUSED(params); + + const ggml_tensor * src0 = dst->src[0]; + + // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322 + + GGML_TENSOR_UNARY_OP_LOCALS + + const int64_t w = ne1; + + ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data; + ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data; + + for (int64_t i2 = 0; i2 < ne2; ++i2) { + for (int64_t i1 = 0; i1 < ne1; ++i1) { + const int64_t pos = (w - i1 - 1) + i2; + for (int64_t i0 = 0; i0 < ne0; ++i0) { + dst_data[i2 * ne1 * ne0 + i1 * ne0 + i0] = src0_data[pos * ne00 + i0]; + } + } + } +} + +void ggml_compute_forward_get_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F16: + case GGML_TYPE_BF16: + { + ggml_compute_forward_get_rel_pos_f16(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_add_rel_pos + +static void ggml_compute_forward_add_rel_pos_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + const ggml_tensor * src2 = dst->src[2]; + + const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; + if (!inplace) { + if (params->ith == 0) { + memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); + } + ggml_barrier(params->threadpool); + } + // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359 + + float * src1_data = (float *) src1->data; + float * src2_data = (float *) src2->data; + float * dst_data = (float *) dst->data; + + const int64_t ne10 = src1->ne[0]; + const int64_t ne11 = src1->ne[1]; + const int64_t ne12 = src1->ne[2]; + const int64_t ne13 = src1->ne[3]; + + const int ith = params->ith; + const int nth = params->nth; + + // total patches in dst + const int np = ne13; + + // patches per thread + const int dp = (np + nth - 1) / nth; + + // patch range for this thread + const int ip0 = dp * ith; + const int ip1 = MIN(ip0 + dp, np); + + for (int64_t i13 = ip0; i13 < ip1; ++i13) { + for (int64_t i12 = 0; i12 < ne12; ++i12) { + for (int64_t i11 = 0; i11 < ne11; ++i11) { + const int64_t jp1 = i13 * ne12 * ne11 * ne10 + i12 * ne11 * ne10 + i11 * ne10; + for (int64_t i10 = 0; i10 < ne10; ++i10) { + const int64_t jp0 = jp1 + i10; + const float src1_e = src1_data[jp0]; + const float src2_e = src2_data[jp0]; + + const int64_t jdh = jp0 * ne10; + const int64_t jdw = jdh - (ne10 - 1) * i10; + + for (int64_t j = 0; j < ne10; ++j) { + dst_data[jdh + j] += src2_e; + dst_data[jdw + j * ne10] += src1_e; + } + } + } + } + } +} + +void ggml_compute_forward_add_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_add_rel_pos_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_rwkv_wkv6 + +static void ggml_compute_forward_rwkv_wkv6_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[5]->ne[1]; + const int64_t head_size = C / HEADS; + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * k = (float *) dst->src[0]->data; + float * v = (float *) dst->src[1]->data; + float * r = (float *) dst->src[2]->data; + float * time_faaaa = (float *) dst->src[3]->data; + float * time_decay = (float *) dst->src[4]->data; + + size_t t_stride = HEADS * head_size; // Same to C + + size_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + size_t h_stride_2d = head_size * head_size; + + if (ith == 0) { + memset(dst_data, 0, T * C * sizeof(float)); + } + ggml_barrier(params->threadpool); + +#if defined(__AVX__) && !defined(__AVX512F__) +# define GGML_F32X GGML_F32x8 +# define GGML_F32X_SET1 GGML_F32x8_SET1 +# define GGML_F32X_LOAD GGML_F32x8_LOAD +# define GGML_F32X_STORE GGML_F32x8_STORE +# define GGML_F32X_MUL GGML_F32x8_MUL +# define GGML_F32X_FMA GGML_F32x8_FMA +# define WKV_VECTOR_SIZE 8 +#elif defined(__AVX512F__) +# define GGML_F32X GGML_F32x16 +# define GGML_F32X_SET1 GGML_F32x16_SET1 +# define GGML_F32X_LOAD GGML_F32x16_LOAD +# define GGML_F32X_STORE GGML_F32x16_STORE +# define GGML_F32X_MUL GGML_F32x16_MUL +# define GGML_F32X_FMA GGML_F32x16_FMA +# define WKV_VECTOR_SIZE 16 +#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) +# define GGML_F32X GGML_F32xt +# define GGML_F32X_SET1 GGML_F32xt_SET1 +# define GGML_F32X_LOAD GGML_F32xt_LOAD +# define GGML_F32X_STORE GGML_F32xt_STORE +# define GGML_F32X_MUL GGML_F32xt_MUL +# define GGML_F32X_FMA GGML_F32xt_FMA +# define WKV_VECTOR_SIZE 8 +#elif defined(__ARM_NEON) && defined(__aarch64__) +# define GGML_F32X GGML_F32x4 +# define GGML_F32X_SET1 GGML_F32x4_SET1 +# define GGML_F32X_LOAD GGML_F32x4_LOAD +# define GGML_F32X_STORE GGML_F32x4_STORE +# define GGML_F32X_MUL GGML_F32x4_MUL +# define GGML_F32X_FMA GGML_F32x4_FMA +# define WKV_VECTOR_SIZE 4 +#endif + +#ifdef WKV_VECTOR_SIZE + int wkv_vector_size; +# if defined(__ARM_FEATURE_SVE) + wkv_vector_size = svcntw(); +# else + wkv_vector_size = WKV_VECTOR_SIZE; +# endif + const int64_t vec_count = head_size / wkv_vector_size; + + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_i_offset = h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float r_val = r[t_h_i_offset]; + float time_faaaa_val = time_faaaa[h_i_offset]; + float time_decay_val = time_decay[t_h_i_offset]; + + // Broadcast scalar values to vectors + GGML_F32X k_vec = GGML_F32X_SET1(k_val); + GGML_F32X r_vec = GGML_F32X_SET1(r_val); + GGML_F32X time_faaaa_vec = GGML_F32X_SET1(time_faaaa_val); + GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); + + for (int64_t j = 0; j < vec_count; j++) { + size_t base_j = j * wkv_vector_size; + size_t t_h_j_offset = t_h_offset + base_j; + size_t h_2d_i_j_offset = h_2d_i_offset + base_j; + + // Load x elements at once + GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); + GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); + GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); + + // Compute kv = v * k + GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); + + // Compute temp = kv * time_faaaa + prev_state + GGML_F32X temp_vec = GGML_F32X_FMA(prev_state_vec, kv_vec, time_faaaa_vec); + + // Update dst: dst += temp * r + dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, r_vec); + GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); + + // Update state: state = prev_state * time_decay + kv + GGML_F32X new_state_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, time_decay_vec); + GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], new_state_vec); + } + + // Handle remaining elements, this will not be used. + for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val * time_faaaa_val + prev_state_val; + dst_data[t_h_j_offset] += temp_val * r_val; + state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; + } + } + } + } + +#else + // basically fused operations: + // dst = r @ (time_faaaa * (k @ v) + state), + // state = time_decay * state + (k @ v), + // recursive through each token + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_i_offset = h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float r_val = r[t_h_i_offset]; + float time_faaaa_val = time_faaaa[h_i_offset]; + // RWKV v6: different time_decay for each token. + float time_decay_val = time_decay[t_h_i_offset]; + + for (int64_t j = 0; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val * time_faaaa_val + prev_state_val; + dst_data[t_h_j_offset] += temp_val * r_val; + state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; + } + } + } + } +#endif +} + +void ggml_compute_forward_rwkv_wkv6(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rwkv_wkv6_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_gla + +static void ggml_compute_forward_gla_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[4]->ne[1]; + const int64_t head_size = C / HEADS; + const float scale = ggml_get_op_params_f32(dst, 0); + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * k = (float *) dst->src[0]->data; + float * v = (float *) dst->src[1]->data; + float * q = (float *) dst->src[2]->data; + float * g = (float *) dst->src[3]->data; + + size_t t_stride = HEADS * head_size; // Same to C + + size_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + size_t h_stride_2d = head_size * head_size; + + if (ith == 0) { + memset(dst_data, 0, T * C * sizeof(float)); + } + ggml_barrier(params->threadpool); + +#if defined(__AVX__) && !defined(__AVX512F__) +# define GGML_F32X GGML_F32x8 +# define GGML_F32X_SET1 GGML_F32x8_SET1 +# define GGML_F32X_LOAD GGML_F32x8_LOAD +# define GGML_F32X_STORE GGML_F32x8_STORE +# define GGML_F32X_MUL GGML_F32x8_MUL +# define GGML_F32X_FMA GGML_F32x8_FMA +# define GLA_VECTOR_SIZE 8 +#elif defined(__AVX512F__) +# define GGML_F32X GGML_F32x16 +# define GGML_F32X_SET1 GGML_F32x16_SET1 +# define GGML_F32X_LOAD GGML_F32x16_LOAD +# define GGML_F32X_STORE GGML_F32x16_STORE +# define GGML_F32X_MUL GGML_F32x16_MUL +# define GGML_F32X_FMA GGML_F32x16_FMA +# define GLA_VECTOR_SIZE 16 +#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) +# define GGML_F32X GGML_F32xt +# define GGML_F32X_SET1 GGML_F32xt_SET1 +# define GGML_F32X_LOAD GGML_F32xt_LOAD +# define GGML_F32X_STORE GGML_F32xt_STORE +# define GGML_F32X_MUL GGML_F32xt_MUL +# define GGML_F32X_FMA GGML_F32xt_FMA +# define GLA_VECTOR_SIZE 8 +#elif defined(__ARM_NEON) && defined(__aarch64__) +# define GGML_F32X GGML_F32x4 +# define GGML_F32X_SET1 GGML_F32x4_SET1 +# define GGML_F32X_LOAD GGML_F32x4_LOAD +# define GGML_F32X_STORE GGML_F32x4_STORE +# define GGML_F32X_MUL GGML_F32x4_MUL +# define GGML_F32X_FMA GGML_F32x4_FMA +# define GLA_VECTOR_SIZE 4 +#endif + +#ifdef GLA_VECTOR_SIZE + int gla_vector_size; +# if defined(__ARM_FEATURE_SVE) + gla_vector_size = svcntw(); +# else + gla_vector_size = GLA_VECTOR_SIZE; +# endif + const int64_t vec_count = head_size / gla_vector_size; + + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float q_val = q[t_h_i_offset] * scale; + float g_val = g[t_h_i_offset]; + + // Broadcast scalar values to vectors + GGML_F32X k_vec = GGML_F32X_SET1(k_val); + GGML_F32X q_vec = GGML_F32X_SET1(q_val); + GGML_F32X g_vec = GGML_F32X_SET1(g_val); + + for (int64_t j = 0; j < vec_count; j++) { + size_t base_j = j * gla_vector_size; + size_t t_h_j_offset = t_h_offset + base_j; + size_t h_2d_i_j_offset = h_2d_i_offset + base_j; + + // Load x elements at once + GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); + GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); + GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); + + // Compute kv = v * k + GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); + + // Compute temp = prev_state * g + kv + GGML_F32X temp_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, g_vec); + + // Update dst: dst += temp * q + dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, q_vec); + GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); + + // Update state + GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], temp_vec); + } + + // Handle remaining elements, this will not be used. + for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = kv_val + prev_state_val * g_val; + dst_data[t_h_j_offset] += temp_val * q_val; + state_cur[h_2d_i_j_offset] = temp_val; + } + } + } + } + +#else + for (int64_t t = 0; t < T; t++) { + size_t t_offset = t * t_stride; + size_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + size_t h_offset = h * h_stride; + size_t t_h_offset = t_offset + h_offset; + size_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + size_t t_h_i_offset = t_h_offset + i; + size_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float k_val = k[t_h_i_offset]; + float q_val = q[t_h_i_offset] * scale; + float g_val = g[t_h_i_offset]; + + for (int64_t j = 0; j < head_size; j++) { + size_t t_h_j_offset = t_h_offset + j; + size_t h_2d_i_j_offset = h_2d_i_offset + j; + + float v_val = v[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + float temp_val = prev_state_val * g_val + kv_val; + dst_data[t_h_j_offset] += temp_val * q_val; + state_cur[h_2d_i_j_offset] = temp_val; + } + } + } + } +#endif +} + +void ggml_compute_forward_gla(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_gla_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_solve_tri_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + const struct ggml_tensor * src0 = dst->src[0]; // A (lower triangular) + const struct ggml_tensor * src1 = dst->src[1]; // B (RHS) + + GGML_TENSOR_BINARY_OP_LOCALS; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + GGML_ASSERT(ne00 == ne01); // A must be square + GGML_ASSERT(ne0 == ne10); // solution cols == B cols + GGML_ASSERT(ne1 == ne11); // solution rows == B rows + + GGML_ASSERT(ne02 == ne12 && ne12 == ne2); + GGML_ASSERT(ne03 == ne13 && ne13 == ne3); + + const int ith = params->ith; + const int nth = params->nth; + + const int64_t k = ne10; // number of RHS columns + const int64_t n = ne11; // A is n×n + const int64_t nr = + ne02 * ne03 * k; // we're parallelizing on columns here, so seq x token x column will be the unit + + // chunks per thread + const int64_t dr = (nr + nth - 1) / nth; + + // chunk range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + const float * A = (const float *) src0->data; // [n, n, B1, B2] + const float * B = (const float *) src1->data; // [n, k, B1, B2] + float * X = (float *) dst->data; // [n, k, B1, B2] + + for (int64_t ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * k); + const int64_t i02 = (ir - i03 * ne02 * k) / k; + const int64_t i01 = (ir - i03 * ne02 * k - i02 * k); + + const float * A_batch = A + i02 * nb02 / sizeof(float) + i03 * nb03 / sizeof(float); + const float * B_batch = B + i02 * nb12 / sizeof(float) + i03 * nb13 / sizeof(float); + + float * X_batch = X + i02 * nb2 / sizeof(float) + i03 * nb3 / sizeof(float); + + for (int64_t i00 = 0; i00 < n; ++i00) { + float sum = 0.0f; + for (int64_t t = 0; t < i00; ++t) { + sum += A_batch[i00 * n + t] * X_batch[t * k + i01]; + } + + const float diag = A_batch[i00 * n + i00]; + GGML_ASSERT(diag != 0.0f && "Zero diagonal in triangular matrix"); + X_batch[i00 * k + i01] = (B_batch[i00 * k + i01] - sum) / diag; + } + } +} + +void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { + ggml_compute_forward_solve_tri_f32(params, dst); + } else { + GGML_ABORT("fatal error"); + } +} + +// ggml_compute_forward_rwkv_wkv7 + +static void ggml_compute_forward_rwkv_wkv7_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const int64_t T = dst->src[1]->ne[2]; + const int64_t C = dst->ne[0]; + const int64_t HEADS = dst->src[1]->ne[1]; + const int64_t n_seqs = dst->src[6]->ne[1]; + const int64_t head_size = C / HEADS; + + float * dst_data = (float *) dst->data; + float * state = ((float *) dst->data) + C * T; + + const int ith = params->ith; + const int nth = params->nth; + + if (ith >= HEADS) { + return; + } + + const int h_start = (HEADS * ith) / nth; + const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; + + float * r = (float *) dst->src[0]->data; + float * w = (float *) dst->src[1]->data; + float * k = (float *) dst->src[2]->data; + float * v = (float *) dst->src[3]->data; + float * a = (float *) dst->src[4]->data; + float * b = (float *) dst->src[5]->data; + + int64_t t_stride = HEADS * head_size; // Same to C + + int64_t h_stride = C / HEADS; + GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS + int64_t h_stride_2d = head_size * head_size; + +#if defined(GGML_SIMD) +# if defined(__ARM_FEATURE_SVE) || defined(__riscv_v_intrinsic) + // scalar Route to scalar implementation //TODO: Write SVE code and RVV code + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float v_val = v[t_h_i_offset]; + + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; + } + + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; + } + dst_data[t_h_i_offset] = result; + } + } + } +# else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t ii = 0; ii < head_size; ii++) { + int64_t t_h_i_offset = t_h_offset + ii; + int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; + + GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); + + float sa = 0; + { + GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + GGML_F32_VEC ax[GGML_F32_ARR]; + GGML_F32_VEC ay[GGML_F32_ARR]; + for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); + ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); + sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); + } + } + GGML_F32_VEC_REDUCE(sa, sum); + } + + GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); + + int64_t j = 0; + GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; + for (; j < head_size; j += GGML_F32_STEP) { + for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { + int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; + int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; + + GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); + GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); + GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); + GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); + + k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); + + GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); + // kv + s * decay + sa * b + state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); + state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); + GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); + + result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); + } + } + GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); + + // There shouldn't be left-overs though. + for (; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v[t_h_i_offset] * k_val; + + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; + } + } + } + } +# endif +#else + for (int64_t t = 0; t < T; t++) { + int64_t t_offset = t * t_stride; + int64_t state_offset = head_size * C * (t / (T / n_seqs)); + float * state_cur = state + state_offset; + float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; + + for (int64_t h = h_start; h < h_end; h++) { + int64_t h_offset = h * h_stride; + int64_t t_h_offset = t_offset + h_offset; + int64_t h_2d_offset = h * h_stride_2d; + + for (int64_t i = 0; i < head_size; i++) { + int64_t t_h_i_offset = t_h_offset + i; + int64_t h_2d_i_offset = h_2d_offset + i * h_stride; + + float v_val = v[t_h_i_offset]; + + float sa = 0, result = 0; + for (int64_t j = 0; j < head_size; j++) { + sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; + } + + for (int64_t j = 0; j < head_size; j++) { + int64_t t_h_j_offset = t_h_offset + j; + int64_t h_2d_i_j_offset = h_2d_i_offset + j; + + float r_val = r[t_h_j_offset]; + float w_val = w[t_h_j_offset]; + float k_val = k[t_h_j_offset]; + float b_val = b[t_h_j_offset]; + float kv_val = v_val * k_val; + float prev_state_val = state_prev[h_2d_i_j_offset]; + state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; + result += state_cur[h_2d_i_j_offset] * r_val; + } + dst_data[t_h_i_offset] = result; + } + } + } +#endif +} + +void ggml_compute_forward_rwkv_wkv7(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_rwkv_wkv7_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_map_custom1 + +void ggml_compute_forward_map_custom1(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + + struct ggml_map_custom1_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_map_custom2 + +void ggml_compute_forward_map_custom2(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + const ggml_tensor * b = dst->src[1]; + + struct ggml_map_custom2_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, b, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_map_custom3 + +void ggml_compute_forward_map_custom3(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * a = dst->src[0]; + const ggml_tensor * b = dst->src[1]; + const ggml_tensor * c = dst->src[2]; + + struct ggml_map_custom3_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, a, b, c, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_custom + +void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst) { + struct ggml_custom_op_params p; + memcpy(&p, dst->op_params, sizeof(p)); + + p.fun(dst, params->ith, params->nth, p.userdata); +} + +// ggml_compute_forward_cross_entropy_loss + +static void ggml_compute_forward_cross_entropy_loss_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src1 = dst->src[1]; + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(src1->type == GGML_TYPE_F32); + GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); + GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); + GGML_ASSERT(ggml_are_same_shape(src0, src1)); + GGML_ASSERT(ggml_is_scalar(dst)); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + + // TODO: handle transposed/permuted matrices + const int64_t nc = src0->ne[0]; + const int64_t nr = ggml_nrows(src0); + + const int ith = params->ith; + const int nth = params->nth; + + float * sums = (float *) params->wdata; + float * st = ((float *) params->wdata) + nth + ith * nc; + float sum_thread = 0.0f; + + GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + for (int64_t i1 = ir0; i1 < ir1; ++i1) { + const float * s0 = (const float *) ((const char *) src0->data + i1 * src0->nb[1]); + const float * s1 = (const float *) ((const char *) src1->data + i1 * src1->nb[1]); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(s0[i])); + assert(!isnan(s1[i])); + } +#endif + + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + const ggml_float sum_softmax = ggml_vec_log_soft_max_f32(nc, st, s0, max); + assert(sum_softmax >= 0.0); + + ggml_vec_add1_f32(nc, st, st, -sum_softmax); + ggml_vec_mul_f32(nc, st, st, s1); + + float sum_st = 0.0f; + ggml_vec_sum_f32(nc, &sum_st, st); + sum_thread += sum_st; + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + assert(!isnan(st[i])); + assert(!isinf(st[i])); + } +#endif + } + sums[ith] = sum_thread; + ggml_barrier(params->threadpool); + + if (ith == 0) { + float * dp = (float *) dst->data; + ggml_vec_sum_f32(nth, dp, sums); + dp[0] *= -1.0f / (float) nr; + } +} + +void ggml_compute_forward_cross_entropy_loss(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cross_entropy_loss_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +// ggml_compute_forward_cross_entropy_loss_back + +static void ggml_compute_forward_cross_entropy_loss_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * grad = dst->src[0]; // gradient of forward pass output + const ggml_tensor * src0f = dst->src[1]; // src0 of forward pass + const ggml_tensor * src1f = dst->src[2]; // src1 of forward pass + + GGML_ASSERT(ggml_is_contiguous(dst)); + GGML_ASSERT(ggml_is_contiguous(src0f)); + GGML_ASSERT(ggml_is_contiguous(src1f)); + GGML_ASSERT(ggml_is_contiguous(grad)); + GGML_ASSERT(ggml_are_same_shape(src0f, src1f) && ggml_are_same_shape(src0f, dst)); + + const int64_t ith = params->ith; + const int64_t nth = params->nth; + + // TODO: handle transposed/permuted matrices + const int64_t nc = src0f->ne[0]; + const int64_t nr = ggml_nrows(src0f); + + // rows per thread + const int64_t dr = (nr + nth - 1) / nth; + + // row range for this thread + const int64_t ir0 = dr * ith; + const int64_t ir1 = MIN(ir0 + dr, nr); + + const float d_by_nr = ((const float *) grad->data)[0] / (float) nr; + + for (int64_t i1 = ir0; i1 < ir1; i1++) { + float * ds0 = (float *) ((char *) dst->data + i1 * dst->nb[1]); + const float * s0 = (const float *) ((const char *) src0f->data + i1 * src0f->nb[1]); + const float * s1 = (const float *) ((const char *) src1f->data + i1 * src1f->nb[1]); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + //printf("p[%d] = %f\n", i, p[i]); + assert(!isnan(s0[i])); + assert(!isnan(s1[i])); + } +#endif + + // soft_max + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + const ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); + assert(sum > 0.0); + ggml_vec_scale_f32(nc, ds0, 1.0 / sum); + + // grad(src0f) = (softmax(src0f) - src1f) * grad(cross_entropy_loss(src0f, src1f)) / nr + ggml_vec_sub_f32(nc, ds0, ds0, s1); + ggml_vec_scale_f32(nc, ds0, d_by_nr); + +#ifndef NDEBUG + for (int64_t i = 0; i < nc; ++i) { + assert(!isnan(ds0[i])); + assert(!isinf(ds0[i])); + } +#endif + } +} + +void ggml_compute_forward_cross_entropy_loss_back(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_cross_entropy_loss_back_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_opt_step_adamw_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src0_grad = dst->src[1]; + const ggml_tensor * src0_grad_m = dst->src[2]; + const ggml_tensor * src0_grad_v = dst->src[3]; + const ggml_tensor * adamw_params = dst->src[4]; + + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m)); + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v)); + GGML_ASSERT(ggml_nelements(adamw_params) == 7); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + const float * adamw_params_ptr = ggml_get_data_f32(adamw_params); + + const float alpha = adamw_params_ptr[0]; + const float beta1 = adamw_params_ptr[1]; + const float beta2 = adamw_params_ptr[2]; + const float eps = adamw_params_ptr[3]; + const float wd = adamw_params_ptr[4]; + const float beta1h = adamw_params_ptr[5]; + const float beta2h = adamw_params_ptr[6]; + const float keep = 1.f - alpha * wd; + for (int ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; + + float * w = (float *) ((char *) src0->data + offset); // weight + const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad + float * m = (float *) ((char *) src0_grad_m->data + offset); + float * v = (float *) ((char *) src0_grad_v->data + offset); + + for (int i00 = 0; i00 < ne00; ++i00) { + m[i00] = m[i00] * beta1 + g[i00] * (1.0f - beta1); + v[i00] = v[i00] * beta2 + g[i00] * g[i00] * (1.0f - beta2); + + const float mh = m[i00] * beta1h; + const float vh = sqrtf(v[i00] * beta2h) + eps; + + // The weight decay is applied independently of the Adam momenta m and v. + // This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss. + // See: https://arxiv.org/pdf/1711.05101v3.pdf + w[i00] = w[i00] * keep - alpha * mh / vh; + } + } +} + +void ggml_compute_forward_opt_step_adamw(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_opt_step_adamw_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error"); + } + } +} + +static void ggml_compute_forward_opt_step_sgd_f32(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const ggml_tensor * src0_grad = dst->src[1]; + const ggml_tensor * sgd_params = dst->src[2]; + + GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); + GGML_ASSERT(ggml_nelements(sgd_params) == 2); + + const int ith = params->ith; + const int nth = params->nth; + + const int nr = ggml_nrows(src0); + + GGML_TENSOR_UNARY_OP_LOCALS + GGML_ASSERT(nb00 == sizeof(float)); + + // rows per thread + const int dr = (nr + nth - 1) / nth; + + // row range for this thread + const int ir0 = dr * ith; + const int ir1 = MIN(ir0 + dr, nr); + + // using adamw param subset we care about - alpha, wd - could have a separate struct + const float * sgd_params_ptr = ggml_get_data_f32(sgd_params); + const float alpha = sgd_params_ptr[0]; + const float keep = 1.f - alpha * sgd_params_ptr[1]; + + for (int ir = ir0; ir < ir1; ++ir) { + const int64_t i03 = ir / (ne02 * ne01); + const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; + const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); + + const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; + + float * w = (float *) ((char *) src0->data + offset); // weight + const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad + + for (int i00 = 0; i00 < ne00; ++i00) { + w[i00] = w[i00] * keep - alpha * g[i00]; + } + } +} + +void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + + switch (src0->type) { + case GGML_TYPE_F32: + { + ggml_compute_forward_opt_step_sgd_f32(params, dst); + } + break; + default: + { + GGML_ABORT("fatal error - sgd is F32 only"); + } + } +} From d383a2cb6c3ec2a55d75a8fe67dd49583fd51862 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 18:58:19 -0800 Subject: [PATCH 22/39] don't change so much stuff --- ggml/src/ggml-cpu/ops.cpp | 30195 +----------------------------------- 1 file changed, 1 insertion(+), 30194 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index f9fbe0cb607..c72789b10ec 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7605,6 +7605,7 @@ static void ggml_compute_forward_pad_f32( } } + void ggml_compute_forward_pad( const ggml_compute_params * params, ggml_tensor * dst) { @@ -10409,30197 +10410,3 @@ void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_ } } } -#include "ops.h" - -#include "binary-ops.h" -#include "ggml-cpu.h" -#include "ggml-impl.h" -#include "ggml.h" -#include "unary-ops.h" -#include "vec.h" - -#include -#include -#include -#include - -// ggml_compute_forward_dup - -static void ggml_compute_forward_dup_same_cont(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - GGML_ASSERT(src0->type == dst->type); - - const size_t nb0 = ggml_type_size(src0->type); - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by blocks - const int nk = ggml_nelements(src0) / ggml_blck_size(src0->type); - const int dr = (nk + nth - 1) / nth; - const int k0 = dr * ith; - const int k1 = MIN(k0 + dr, nk); - - if (k0 < k1) { - memcpy(((char *) dst->data + k0 * nb0), ((char *) src0->data + k0 * nb0), (k1 - k0) * nb0); - } -} - -template -static void ggml_compute_forward_dup_flt(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(!ggml_is_quantized(src0->type) && !ggml_is_quantized(dst->type)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // case: type & row size equal - if (src0->type == dst->type && ne00 == ne0 && nb00 == ggml_type_size(src0->type) && - nb0 == ggml_type_size(dst->type)) { - // copy by rows - const size_t rs = ne00 * nb00; - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ir0; i01 < ir1; i01++) { - memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); - } - } - } - return; - } - - // case: dst tensor is contiguous - if (ggml_is_contiguous(dst)) { - if (nb00 == sizeof(src_t)) { - if constexpr (std::is_same_v) { - // same type - size_t id = 0; - const size_t rs = ne00 * nb00; - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - // casting between non-quantized types - size_t id = 0; - dst_t * dst_ptr = (dst_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const src_t * src0_ptr = - (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - for (int i00 = 0; i00 < ne00; i00++) { - float tmp = type_conversion_table::to_f32(src0_ptr[i00]); - dst_ptr[id] = type_conversion_table::from_f32(tmp); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } - } else { - //printf("%s: this is not optimal - fix me\n", __func__); - - size_t id = 0; - dst_t * dst_ptr = (dst_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - const src_t * src0_ptr = - (src_t *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float tmp = type_conversion_table::to_f32(*src0_ptr); - dst_ptr[id] = type_conversion_table::from_f32(tmp); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } - return; - } - - // dst counters - int64_t i10 = 0; - int64_t i11 = 0; - int64_t i12 = 0; - int64_t i13 = 0; - - if constexpr (std::is_same_v) { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - memcpy(dst_ptr, src0_ptr, sizeof(dst_t)); - - if (++i10 == ne00) { - i10 = 0; - if (++i11 == ne01) { - i11 = 0; - if (++i12 == ne02) { - i12 = 0; - if (++i13 == ne03) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - - } else { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - float tmp = type_conversion_table::to_f32(*(const src_t *) src0_ptr); - *(dst_t *) dst_ptr = type_conversion_table::from_f32(tmp); - - if (++i10 == ne0) { - i10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - } -} - -template -static void ggml_compute_forward_dup_to_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(!ggml_is_quantized(src0->type)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - if (ggml_is_contiguous(dst) && nb00 == sizeof(src_t) && ggml_get_type_traits_cpu(dst->type)->from_float) { - // casting non-quantized types --> intermediate f32 --> quantized - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; - float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - size_t id = 0; - size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const src_t * src0_ptr = (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - for (int i00 = 0; i00 < ne00; i00++) { - src0_f32[i00] = type_conversion_table::to_f32(src0_ptr[i00]); - } - - quantize_row_q(src0_f32, dst_ptr + id, ne00); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - // printf("%s %s\n", ggml_type_name(src0->type), ggml_type_name(dst->type)); - GGML_ABORT("not implemented"); - } -} - -// A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy. -static void ggml_compute_forward_dup_bytes(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(src0->type == dst->type); - - GGML_TENSOR_UNARY_OP_LOCALS; - - if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) { - ggml_compute_forward_dup_same_cont(params, dst); - return; - } - - const size_t type_size = ggml_type_size(src0->type); - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - if (src0->type == dst->type && ggml_are_same_shape(src0, dst) && nb00 == type_size && nb0 == type_size) { - // copy by rows - const size_t rs = ggml_row_size(src0->type, ne00); - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ir0; i01 < ir1; i01++) { - memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); - } - } - } - return; - } - - if (ggml_is_contiguous(dst)) { - size_t id = 0; - char * dst_ptr = (char *) dst->data; - const size_t rs = ne00 * type_size; - - if (nb00 == type_size) { - // src0 is contigous on first dimension, copy by rows - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int64_t i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - //printf("%s: this is not optimal - fix me\n", __func__); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - (char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, type_size); - - id += type_size; - } - } - id += rs * (ne01 - ir1); - } - } - } - - return; - } - - // dst counters - int64_t k10 = 0; - int64_t i11 = 0; - int64_t i12 = 0; - int64_t i13 = 0; - - // number of blocks in a row - const int64_t nk00 = ne00 / ggml_blck_size(src0->type); - const int64_t nk0 = ne0 / ggml_blck_size(dst->type); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - k10 += nk00 * ir0; - while (k10 >= nk0) { - k10 -= nk0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t k00 = 0; k00 < nk00; k00++) { - const char * src0_ptr = ((char *) src0->data + k00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + k10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - memcpy(dst_ptr, src0_ptr, type_size); - - if (++k10 == nk0) { - k10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - k10 += nk00 * (ne01 - ir1); - while (k10 >= nk0) { - k10 -= nk0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } -} - -static void ggml_compute_forward_dup_from_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - size_t qk = ggml_blck_size(type); - const int64_t nr = ggml_nelements(src1) / qk; - - // destination must be contiguous in the first dimension - GGML_ASSERT(nb10 == ggml_type_size(dst->type)); - // must either have first dimension large enough to hold a row, or fully contiguous - GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - uint32_t i = ir * qk; - - const int64_t i03 = i / (ne00 * ne01 * ne02); - const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); - const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; - const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; - const int64_t x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; - - const int64_t i13 = i / (ne10 * ne11 * ne12); - const int64_t i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); - const int64_t i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; - const int64_t i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; - const int64_t dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; - - dequantize_row_q((const void *) ((char *) src0->data + x_offset), (float *) ((char *) dst->data + dst_offset), - qk); - } -} - -void ggml_compute_forward_dup(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (src0->type == dst->type) { - ggml_compute_forward_dup_bytes(params, dst); - return; - } - - switch (src0->type) { - case GGML_TYPE_F16: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_BF16: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_F32: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_I32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_I32: - { - if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - GGML_ABORT("not implemented"); - } - } - break; - default: - { - if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_from_q(params, dst); - break; - } - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add - -static void ggml_compute_forward_add_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const ggml_type type = src0->type; - const ggml_type dtype = dst->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dtype)->from_float; - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(type)); - GGML_ASSERT(nb10 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i03 = ir / (ne02 * ne01); - const int i02 = (ir - i03 * ne02 * ne01) / ne01; - const int i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - // src1 and dst are same shape as src0 => same indices - const int i13 = i03; - const int i12 = i02; - const int i11 = i01; - - const int i3 = i03; - const int i2 = i02; - const int i1 = i01; - - void * src0_row = (void *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * src1_row = (float *) ((char *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13)); - void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - assert(ne00 % 32 == 0); - - // unquantize row from src0 to temp buffer - dequantize_row_q(src0_row, wdata, ne00); - // add src1 - ggml_vec_acc_f32(ne00, wdata, src1_row); - // quantize row to dst - if (quantize_row_q != NULL) { - quantize_row_q(wdata, dst_row, ne00); - } else { - memcpy(dst_row, wdata, ne0 * nb0); - } - } -} - -void ggml_compute_forward_add(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - { - ggml_compute_forward_add_non_quantized(params, dst); - } - break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_add_q_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add_id - -static void ggml_compute_forward_add_id_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(src2->type == GGML_TYPE_I32); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_TERNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - // src1 indices - const int i11 = *(int32_t *) ((char *) src2->data + i1 * nb20 + i2 * nb21); - - GGML_ASSERT(i11 >= 0 && i11 < ne11); - - ggml_vec_add_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), - (float *) ((char *) src1->data + i11 * nb11)); - } -} - -void ggml_compute_forward_add_id(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add_id_f32(params, dst); - } - break; - default: - { - GGML_ABORT("unsupported type for ggml_compute_forward_add_id: %s", ggml_type_name(src0->type)); - } - } -} - -// ggml_compute_forward_add1 - -static void ggml_compute_forward_add1_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - -#ifdef GGML_USE_ACCELERATE - GGML_UNUSED(ggml_vec_add1_f32); - - vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), 1, - (float *) ((char *) src1->data), 0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - 1, ne0); -#else - ggml_vec_add1_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), *(float *) src1->data); -#endif - } -} - -static void ggml_compute_forward_add1_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_f16_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(type)->from_float; - - // we don't support permuted src0 - GGML_ASSERT(nb00 == ggml_type_size(type)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(dst->type == src0->type); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - void * src0_row = (void *) ((char *) src0->data + (i1 * nb01 + i2 * nb02 + i3 * nb03)); - void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb0)); - - assert(ne0 % 32 == 0); - - // unquantize row from src0 to temp buffer - dequantize_row_q(src0_row, wdata, ne0); - // add src1 - ggml_vec_acc1_f32(ne0, wdata, v); - // quantize row to dst - quantize_row_q(wdata, dst_row, ne0); - } -} - -static void ggml_compute_forward_add1_bf16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_BF16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_BF16); - - GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_bf16_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_BF16); - GGML_ASSERT(src1->type == GGML_TYPE_BF16); - GGML_ASSERT(dst->type == GGML_TYPE_BF16); - - GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -void ggml_compute_forward_add1(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add1_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - if (src1->type == GGML_TYPE_F16) { - ggml_compute_forward_add1_f16_f16(params, dst); - } else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add1_f16_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } - } - break; - case GGML_TYPE_BF16: - { - if (src1->type == GGML_TYPE_BF16) { - ggml_compute_forward_add1_bf16_bf16(params, dst); - } else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add1_bf16_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } - } - break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_add1_q_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_acc - -static void ggml_compute_forward_acc_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during acc - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during acc - const size_t nb0 = ggml_element_size(src0); - - const size_t nb00 = nb0; - const size_t nb01 = nb1; - const size_t nb02 = nb2; - const size_t nb03 = nb3; - - GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb0 + (ne11 == 0 ? 0 : ne11 - 1) * nb1 + - (ne12 == 0 ? 0 : ne12 - 1) * nb2 + (ne13 == 0 ? 0 : ne13 - 1) * nb3 < - ggml_nbytes(dst)); - GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb00 + (ne11 == 0 ? 0 : ne11 - 1) * nb01 + - (ne12 == 0 ? 0 : ne12 - 1) * nb02 + (ne13 == 0 ? 0 : ne13 - 1) * nb03 < - ggml_nbytes(src0)); - - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - -#ifdef GGML_USE_ACCELERATE - vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), 1, - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11), 1, - (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), 1, nc); -#else - ggml_vec_add_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); -#endif - } -} - -void ggml_compute_forward_acc(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_acc_f32(params, dst); - } - break; - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_sum - -static void ggml_compute_forward_sum_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - assert(src0->nb[0] == sizeof(float)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - ggml_float sum = 0; - ggml_float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32_ggf(ne00, &row_sum, - (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((float *) dst->data)[0] = sum; -} - -static void ggml_compute_forward_sum_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - - assert(src0->nb[0] == sizeof(ggml_fp16_t)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - float sum = 0; - float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f16_ggf(ne00, &row_sum, - (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); -} - -static void ggml_compute_forward_sum_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - - assert(src0->nb[0] == sizeof(ggml_bf16_t)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - float sum = 0; - float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_bf16_ggf(ne00, &row_sum, - (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum); -} - -void ggml_compute_forward_sum(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sum_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_sum_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - { - ggml_compute_forward_sum_bf16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cumsum - -static void ggml_compute_forward_cumsum_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne01); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - const auto [ir0, ir1] = get_thread_range(params, src0); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - float * src_row = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * dst_row = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - ggml_vec_cumsum_f32(ne00, dst_row, src_row); - } -} - -void ggml_compute_forward_cumsum(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cumsum_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_sum_rows - -static void ggml_compute_forward_sum_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne0 == 1); - GGML_ASSERT(ne1 == ne01); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - for (int64_t i3 = 0; i3 < ne03; i3++) { - for (int64_t i2 = 0; i2 < ne02; i2++) { - for (int64_t i1 = 0; i1 < ne01; i1++) { - float * src_row = (float *) ((char *) src0->data + i1 * nb01 + i2 * nb02 + i3 * nb03); - float * dst_row = (float *) ((char *) dst->data + i1 * nb1 + i2 * nb2 + i3 * nb3); - float row_sum = 0; - ggml_vec_sum_f32(ne00, &row_sum, src_row); - dst_row[0] = row_sum; - } - } - } -} - -void ggml_compute_forward_sum_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sum_rows_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_mean - -static void ggml_compute_forward_mean_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(src0->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - assert(ne0 == 1); - assert(ne1 == ne01); - assert(ne2 == ne02); - assert(ne3 == ne03); - - GGML_UNUSED(ne0); - GGML_UNUSED(ne1); - GGML_UNUSED(ne2); - GGML_UNUSED(ne3); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - - *(float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3) /= (float) ne00; - } - } - } -} - -void ggml_compute_forward_mean(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_mean_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_argmax - -static void ggml_compute_forward_argmax_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(src0->nb[0] == sizeof(float)); - assert(dst->nb[0] == sizeof(float)); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - - const size_t nb01 = src0->nb[1]; - const size_t nb0 = dst->nb[0]; - - for (int64_t i1 = 0; i1 < ne01; i1++) { - float * src = (float *) ((char *) src0->data + i1 * nb01); - int32_t * dst_ = (int32_t *) ((char *) dst->data + i1 * nb0); - int v = 0; - ggml_vec_argmax_f32(ne00, &v, src); - dst_[0] = v; - } -} - -void ggml_compute_forward_argmax(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_argmax_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_count_equal - -static void ggml_compute_forward_count_equal_i32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS; - - GGML_ASSERT(src0->type == GGML_TYPE_I32); - GGML_ASSERT(src1->type == GGML_TYPE_I32); - GGML_ASSERT(ggml_are_same_shape(src0, src1)); - GGML_ASSERT(ggml_is_scalar(dst)); - GGML_ASSERT(dst->type == GGML_TYPE_I64); - - const int64_t nr = ggml_nrows(src0); - - const int ith = params->ith; - const int nth = params->nth; - - int64_t * sums = (int64_t *) params->wdata; - int64_t sum_thread = 0; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne03) / ne01; - const int64_t i01 = ir - i03 * ne03 - i02 * ne02; - - const char * data0 = (const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01; - const char * data1 = (const char *) src1->data + i03 * nb13 + i02 * nb12 + i01 * nb11; - - for (int64_t i00 = 0; i00 < ne00; ++i00) { - const int32_t val0 = *((const int32_t *) (data0 + i00 * nb00)); - const int32_t val1 = *((const int32_t *) (data1 + i00 * nb10)); - - sum_thread += val0 == val1; - } - } - if (ith != 0) { - sums[ith] = sum_thread; - } - ggml_barrier(params->threadpool); - - if (ith != 0) { - return; - } - - for (int ith_other = 1; ith_other < nth; ++ith_other) { - sum_thread += sums[ith_other]; - } - *((int64_t *) dst->data) = sum_thread; -} - -void ggml_compute_forward_count_equal(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_I32: - { - ggml_compute_forward_count_equal_i32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_repeat - -static void ggml_compute_forward_repeat_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(src0, dst)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne0 / ne00); - const int nr1 = (int) (ne1 / ne01); - const int nr2 = (int) (ne2 / ne02); - const int nr3 = (int) (ne3 / ne03); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne03; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne02; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne01; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_vec_cpy_f32( - ne00, - (float *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + (i2 * ne02 + k2) * nb2 + - (i1 * ne01 + k1) * nb1 + (i0 * ne00) * nb0), - (float *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01)); - } - } - } - } - } - } - } -} - -static void ggml_compute_forward_repeat_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(src0, dst)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne0 / ne00); - const int nr1 = (int) (ne1 / ne01); - const int nr2 = (int) (ne2 / ne02); - const int nr3 = (int) (ne3 / ne03); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne03; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne02; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne01; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + - (i2 * ne02 + k2) * nb2 + (i1 * ne01 + k1) * nb1 + - (i0 * ne00) * nb0); - ggml_fp16_t * x = - (ggml_fp16_t *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01); - // ggml_vec_cpy_f16(ne00, y, x) - for (int i = 0; i < ne00; ++i) { - y[i] = x[i]; - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_repeat(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_I16: - { - ggml_compute_forward_repeat_f16(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_repeat_f32(params, dst); - } - break; - // TODO: templateify the implemenation and support for I64 - // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 - //case GGML_TYPE_I64: - // { - // ggml_compute_forward_repeat_i64(params, dst); - // } break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_repeat_back - -static void ggml_compute_forward_repeat_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(dst, src0)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne00 / ne0); - const int nr1 = (int) (ne01 / ne1); - const int nr2 = (int) (ne02 / ne2); - const int nr3 = (int) (ne03 / ne3); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - if (ggml_is_contiguous(dst)) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } else { - for (int k3 = 0; k3 < ne3; k3++) { - for (int k2 = 0; k2 < ne2; k2++) { - for (int k1 = 0; k1 < ne1; k1++) { - ggml_vec_set_f32(ne0, (float *) ((char *) dst->data + k1 * nb1 + k2 * nb2 + k3 * nb3), 0); - } - } - } - } - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne3; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne2; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne1; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_vec_acc_f32( - ne0, (float *) ((char *) dst->data + (k3) *nb3 + (k2) *nb2 + (k1) *nb1), - (float *) ((char *) src0->data + (i3 * ne3 + k3) * nb03 + (i2 * ne2 + k2) * nb02 + - (i1 * ne1 + k1) * nb01 + (i0 * ne0) * nb00)); - } - } - } - } - } - } - } -} - -void ggml_compute_forward_repeat_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_repeat_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_concat - -static void ggml_compute_forward_concat_any(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - const size_t len = ggml_type_size(src0->type); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const char * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + (i3) *nb03; - } else { - x = (const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + (i2 - o[2]) * nb12 + - (i3 - o[3]) * nb13; - } - - char * y = (char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3; - - memcpy(y, x, len); - } - } - } - } -} - -static void ggml_compute_forward_concat_i8(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(int8_t)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const int8_t * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const int8_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const int8_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - int8_t * y = (int8_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -static void ggml_compute_forward_concat_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(ggml_fp16_t)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const ggml_fp16_t * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const ggml_fp16_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const ggml_fp16_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -static void ggml_compute_forward_concat_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const float * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const float *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const float *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -void ggml_compute_forward_concat(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_I16: - { - ggml_compute_forward_concat_f16(params, dst); - } - break; - case GGML_TYPE_I8: - { - ggml_compute_forward_concat_i8(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_concat_f32(params, dst); - } - break; - default: - { - ggml_compute_forward_concat_any(params, dst); - } - } -} - -// ggml_compute_forward_gelu - -static void ggml_compute_forward_gelu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_fill - -static void ggml_compute_forward_fill_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const float c = ggml_get_op_params_f32(dst, 0); - - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); - GGML_TENSOR_LOCALS(size_t, nb, dst, nb); - - const auto [ir0, ir1] = get_thread_range(params, dst); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne2 * ne1); - const int64_t i02 = (ir - i03 * ne2 * ne1) / ne1; - const int64_t i01 = (ir - i03 * ne2 * ne1 - i02 * ne1); - - float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); - - ggml_vec_set_f32(ne0, dst_ptr, c); - } -} - -void ggml_compute_forward_fill(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_fill_f32(params, dst); -} - -// ggml_compute_tri - -static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(ggml_is_contiguous(src0)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const auto [ir0, ir1] = get_thread_range(params, src0); - - bool (*bipred)(int, int); - - switch (ttype) { - case GGML_TRI_TYPE_LOWER: - bipred = [](int i, int r) { - return i < r; - }; - break; - case GGML_TRI_TYPE_LOWER_DIAG: - bipred = [](int i, int r) { - return i <= r; - }; - break; - case GGML_TRI_TYPE_UPPER: - bipred = [](int i, int r) { - return i > r; - }; - break; - case GGML_TRI_TYPE_UPPER_DIAG: - bipred = [](int i, int r) { - return i >= r; - }; - break; - default: - GGML_ABORT("invalid tri type"); - } - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const float * src_ptr = (const float *) ((const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01); - float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); - - for (int i0 = 0; i0 < ne0; ++i0) { - dst_ptr[i0] = bipred(i0, i01) ? src_ptr[i0] : 0.0f; - } - } -} - -void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_tri_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gelu_erf - -static void ggml_compute_forward_gelu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_erf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_erf_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_erf_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gelu_quick - -static void ggml_compute_forward_gelu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_quick(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_quick_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_quick_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_silu - -static void ggml_compute_forward_silu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_silu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_silu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_silu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_silu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_leaky_relu - -static void ggml_compute_forward_leaky_relu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - float negative_slope; - memcpy(&negative_slope, dst->op_params, sizeof(float)); - - assert(dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_leaky_relu_f32(nc, (float *) ((char *) dst->data + i * (dst->nb[1])), - (float *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); - } -} - -static void ggml_compute_forward_leaky_relu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - float negative_slope; - memcpy(&negative_slope, dst->op_params, sizeof(float)); - - assert(dst->nb[0] == sizeof(ggml_fp16_t)); - assert(src0->nb[0] == sizeof(ggml_fp16_t)); - - for (int i = 0; i < n; i++) { - ggml_vec_leaky_relu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); - } -} - -void ggml_compute_forward_leaky_relu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_leaky_relu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_leaky_relu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_silu_back - -static void ggml_compute_forward_silu_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - assert(ggml_is_contiguous_1(grad)); - assert(ggml_is_contiguous_1(src1)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src1, dst)); - assert(ggml_are_same_shape(src1, grad)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; - const int nr = ggml_nrows(src1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_backward_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src1->data + i1 * (src1->nb[1])), - (float *) ((char *) grad->data + i1 * (grad->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_silu_back_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - assert(ggml_is_contiguous_1(grad)); - assert(ggml_is_contiguous_1(src1)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src1, dst)); - assert(ggml_are_same_shape(src1, grad)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; - const int nr = ggml_nrows(src1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_backward_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src1->data + i1 * (src1->nb[1])), - (ggml_fp16_t *) ((char *) grad->data + i1 * (grad->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -void ggml_compute_forward_silu_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_silu_back_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_silu_back_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_reglu - -static void ggml_compute_forward_reglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_reglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_reglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_reglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_reglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu - -static void ggml_compute_forward_geglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_swiglu - -static void ggml_compute_forward_swiglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_swiglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_swiglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_swiglu_oai - -static void ggml_compute_forward_swiglu_oai_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - const float alpha = ggml_get_op_params_f32(dst, 2); - const float limit = ggml_get_op_params_f32(dst, 3); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - float * dst_p = (float *) ((char *) dst->data + i1 * (dst->nb[1])); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - for (int k = 0; k < nc; k++) { - const float x = std::min(src0_p[k], limit); - const float y = std::clamp(src1_p[k], -limit, limit); - const float out_glu = x / (1.f + expf(alpha * (-x))); - dst_p[k] = out_glu * (y + 1.f); - } - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = dst_p[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu_oai(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_swiglu_oai_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu_erf - -static void ggml_compute_forward_geglu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_erf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_erf_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_erf_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu_quick - -static void ggml_compute_forward_geglu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_quick(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_quick_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_quick_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_norm - -static void ggml_compute_forward_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float sum = 0.0; - ggml_vec_sum_f32(ne00, &sum, x); - float mean = sum / ne00; - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - float variance = 0; - -#ifdef GGML_USE_ACCELERATE - mean = -mean; - vDSP_vsadd(x, 1, &mean, y, 1, ne00); - vDSP_measqv(y, 1, &variance, ne00); -#else - variance = ggml_vec_cvar_f32(ne00, y, x, mean); -#endif //GGML_USE_ACCELERATE - - const float scale = 1.0f / sqrtf(variance + eps); - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_group_rms_norm - -static void ggml_compute_forward_rms_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sum = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float) (x[i00] * x[i00]); - } - - const float mean = sum / ne00; - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - memcpy(y, x, ne00 * sizeof(float)); - // for (int i00 = 0; i00 < ne00; i00++) { - // y[i00] = x[i00]; - // } - - const float scale = 1.0f / sqrtf(mean + eps); - - // if you hit this, likely you got an inf somewhere earlier - assert(scale > 0.0f); - - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_rms_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rms_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_rms_norm_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // gradients from forward pass output - const ggml_tensor * src1 = dst->src[1]; // src1 from forward pass - - GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - // src1 is same shape as src0 => same indices - const int64_t i11 = i01; - const int64_t i12 = i02; - const int64_t i13 = i03; - - const float * dz = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - const float * x = (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13); - - ggml_float sum_xx = 0.0; - ggml_float sum_xdz = 0.0; - - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum_xx += (ggml_float) (x[i00] * x[i00]); - sum_xdz += (ggml_float) (x[i00] * dz[i00]); - } - - //const float mean = (float)(sum_xx)/ne00; - const float mean_eps = (float) (sum_xx) / ne00 + eps; - const float sum_eps = (float) (sum_xx) + eps * ne00; - //const float mean_xdz = (float)(sum_xdz)/ne00; - // we could cache rms from forward pass to improve performance. - // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms. - //const float rms = sqrtf(mean_eps); - const float rrms = 1.0f / sqrtf(mean_eps); - //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3) - - { - // z = rms_norm(x) - // - // rms_norm(src1) = - // scale( - // src1, - // div( - // 1, - // sqrt( - // add( - // scale( - // sum( - // sqr( - // src1)), - // (1.0/N)), - // eps)))); - - // postorder: - // ## op args grad - // 00 param src1 grad[#00] - // 01 const 1 - // 02 sqr (#00) grad[#02] - // 03 sum (#02) grad[#03] - // 04 const 1/N - // 05 scale (#03, #04) grad[#05] - // 06 const eps - // 07 add (#05, #06) grad[#07] - // 08 sqrt (#07) grad[#08] - // 09 div (#01,#08) grad[#09] - // 10 scale (#00,#09) grad[#10] - // - // backward pass, given grad[#10] - // #10: scale - // grad[#00] += scale(grad[#10],#09) - // grad[#09] += sum(mul(grad[#10],#00)) - // #09: div - // grad[#08] += neg(mul(grad[#09], div(#09,#08))) - // #08: sqrt - // grad[#07] += mul(grad[#08], div(0.5, #08)) - // #07: add - // grad[#05] += grad[#07] - // #05: scale - // grad[#03] += scale(grad[#05],#04) - // #03: sum - // grad[#02] += repeat(grad[#03], #02) - // #02: - // grad[#00] += scale(mul(#00, grad[#02]), 2.0) - // - // substitute and simplify: - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) - // grad[#02] = repeat(grad[#03], #02) - // grad[#02] = repeat(scale(grad[#05],#04), #02) - // grad[#02] = repeat(scale(grad[#07],#04), #02) - // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02) - // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02) - // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02) - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N))) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps))) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps)) - // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps)) - // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps)) - // a = b*c + d*e - // a = b*c*f/f + d*e*f/f - // a = (b*c*f + d*e*f)*(1/f) - // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c)) - // a = (b + d*e/c)*c - // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps) - // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms - // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms - // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms - // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms - // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms - // a = (dz + x*div(-mean_xdz,mean_eps))*rrms - // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms) - // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - } - // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - // post-order: - // dx := x - // dx := scale(dx,-mean_xdz/mean_eps) - // dx := add(dx, dz) - // dx := scale(dx, rrms) - float * dx = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - // dx[i00] = (x*(-sum_xdz/sum_eps) + dz) / sqrtf(mean_eps) - ggml_vec_cpy_f32(ne00, dx, x); - // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); - ggml_vec_scale_f32(ne00, dx, (float) (-sum_xdz) / sum_eps); - ggml_vec_acc_f32(ne00, dx, dz); - ggml_vec_scale_f32(ne00, dx, rrms); - } - } - } -} - -void ggml_compute_forward_rms_norm_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rms_norm_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_group_norm - -static void ggml_compute_forward_group_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - // TODO: optimize - - float eps; - memcpy(&eps, dst->op_params + 1, sizeof(float)); - - int n_channels = src0->ne[2]; - int n_groups = dst->op_params[0]; - int n_channels_per_group = (n_channels + n_groups - 1) / n_groups; - for (int i = ith; i < n_groups; i += nth) { - int start = i * n_channels_per_group; - int end = start + n_channels_per_group; - if (end > n_channels) { - end = n_channels; - } - int step = end - start; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - ggml_float sum = 0.0; - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sumr = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sumr += (ggml_float) x[i00]; - } - sum += sumr; - } - } - const float mean = sum / (ne00 * ne01 * step); - - ggml_float sum2 = 0.0; - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - ggml_float sumr = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - float v = x[i00] - mean; - y[i00] = v; - sumr += (ggml_float) (v * v); - } - sum2 += sumr; - } - } - const float variance = sum2 / (ne00 * ne01 * step); - const float scale = 1.0f / sqrtf(variance + eps); - - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - ggml_vec_scale_f32(ne00, y, scale); - } - } - } - } -} - -void ggml_compute_forward_group_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_group_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_l2_norm - -static void ggml_compute_forward_l2_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sum = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float) (x[i00] * x[i00]); - } - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - memcpy(y, x, ne00 * sizeof(float)); - - const float scale = 1.0f / fmaxf(sqrtf(sum), eps); - - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_l2_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_l2_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_out_prod - -static void ggml_compute_forward_out_prod_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - GGML_ASSERT(ne2 % ne02 == 0); - GGML_ASSERT(ne3 % ne03 == 0); - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - - if (ith == 0) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } - ggml_barrier(params->threadpool); - - // dst[:,:,:,:] = 0 - // for i2,i3: - // for i1: - // for i01: - // for i0: - // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] - - // parallelize by last three dimensions - - // total rows in dst - const int64_t nr = ne1 * ne2 * ne3; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - // block-tiling attempt - const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32); - const int64_t blck_1 = 16; - - // dps == dst per src0, used for group query attention - const int64_t dps2 = ne2 / ne02; - const int64_t dps3 = ne3 / ne03; - - for (int64_t bir = ir0; bir < ir1; bir += blck_1) { - const int64_t bir1 = MIN(bir + blck_1, ir1); - for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) { - const int64_t bne01 = MIN(bi01 + blck_0, ne01); - for (int64_t ir = bir; ir < bir1; ++ir) { - // dst indices - const int64_t i3 = ir / (ne2 * ne1); - const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; - const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - const int64_t i02 = i2 / dps2; - const int64_t i03 = i3 / dps3; - - //const int64_t i10 = i1; - const int64_t i12 = i2; - const int64_t i13 = i3; - -#if GGML_VEC_MAD_UNROLL > 2 - const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL); - for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); - } - for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32(ne0, d, s0, *s1); - } -#else - for (int64_t i01 = bi01; i01 < bne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32(ne0, d, s0, *s1); - } -#endif - } - } - } -} - -static void ggml_compute_forward_out_prod_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int ith = params->ith; - const int nth = params->nth; - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - // we don't support permuted src0 dim0 - GGML_ASSERT(nb00 == ggml_type_size(type)); - - // dst dim0 cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - - if (ith == 0) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } - ggml_barrier(params->threadpool); - - // parallelize by last three dimensions - - // total rows in dst - const int64_t nr = ne1 * ne2 * ne3; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - // dst[:,:,:,:] = 0 - // for i2,i3: - // for i1: - // for i01: - // for i0: - // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] - - float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - - for (int64_t ir = ir0; ir < ir1; ++ir) { - // dst indices - const int64_t i3 = ir / (ne2 * ne1); - const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; - const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - const int64_t i02 = i2; - const int64_t i03 = i3; - - //const int64_t i10 = i1; - const int64_t i12 = i2; - const int64_t i13 = i3; - - for (int64_t i01 = 0; i01 < ne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - dequantize_row_q(s0, wdata, ne0); - ggml_vec_mad_f32(ne0, d, wdata, *s1); - } - } -} - -void ggml_compute_forward_out_prod(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_out_prod_q_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - GGML_ABORT("fatal error"); // todo - // ggml_compute_forward_out_prod_f16_f32(params, dst); - } - case GGML_TYPE_F32: - { - ggml_compute_forward_out_prod_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_scale - -static void ggml_compute_forward_scale_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - float s; // scale factor - float b; // bias - - memcpy(&s, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&b, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const size_t nb01 = src0->nb[1]; - - const size_t nb1 = dst->nb[1]; - - if (b == 0.0f) { - for (int i1 = ir0; i1 < ir1; i1++) { - if (dst->data != src0->data) { - // src0 is same shape as dst => same indices - // TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy - memcpy((char *) dst->data + i1 * nb1, (char *) src0->data + i1 * nb01, nc * sizeof(float)); - } - ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1 * nb1), s); - } - } else { - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_mad1_f32(nc, (float *) ((char *) dst->data + i1 * nb1), (float *) ((char *) src0->data + i1 * nb1), - s, b); - } - } -} - -void ggml_compute_forward_scale(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_scale_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_set - -static void ggml_compute_forward_set_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during set - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during set - const size_t nb0 = ggml_element_size(src0); - - const int im0 = (ne10 == 0 ? 0 : ne10 - 1); - const int im1 = (ne11 == 0 ? 0 : ne11 - 1); - const int im2 = (ne12 == 0 ? 0 : ne12 - 1); - const int im3 = (ne13 == 0 ? 0 : ne13 - 1); - - GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); - - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - - ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); - } -} - -static void ggml_compute_forward_set_i32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during set - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during set - const size_t nb0 = ggml_element_size(src0); - - const int im0 = (ne10 == 0 ? 0 : ne10 - 1); - const int im1 = (ne11 == 0 ? 0 : ne11 - 1); - const int im2 = (ne12 == 0 ? 0 : ne12 - 1); - const int im3 = (ne13 == 0 ? 0 : ne13 - 1); - - GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); - - GGML_ASSERT(nb10 == sizeof(int32_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - - ggml_vec_cpy_i32(nc, (int32_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (int32_t *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); - } -} - -void ggml_compute_forward_set(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_set_f32(params, dst); - } - break; - case GGML_TYPE_I32: - { - ggml_compute_forward_set_i32(params, dst); - } - break; - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cpy - -void ggml_compute_forward_cpy(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_dup(params, dst); -} - -// ggml_compute_forward_cont - -void ggml_compute_forward_cont(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_dup(params, dst); -} - -// ggml_compute_forward_get_rows - -static void ggml_compute_forward_get_rows_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == ggml_type_size(type)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - dequantize_row_q((const void *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(ggml_fp16_t)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_cpu_fp16_to_fp32((const ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(ggml_bf16_t)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_cpu_bf16_to_fp32((const ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(float)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), - (float *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03)); - } -} - -void ggml_compute_forward_get_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_get_rows_q(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_get_rows_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - { - ggml_compute_forward_get_rows_bf16(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_get_rows_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } - - //static bool first = true; - //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); - //if (first) { - // first = false; - //} else { - // for (int k = 0; k < dst->ne[1]; ++k) { - // for (int j = 0; j < dst->ne[0]/16; ++j) { - // for (int i = 0; i < 16; ++i) { - // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); - // } - // printf("\n"); - // } - // printf("\n"); - // } - // printf("\n"); - // exit(0); - //} -} - -template -static void ggml_compute_forward_set_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ne01; - - assert(ne0 == nc); - assert(ne2 == ne02); - assert(ne3 == ne03); - assert(src0->type == GGML_TYPE_F32); - assert(ne02 % ne11 == 0); - assert(ne03 % ne12 == 0); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = std::min(ir0 + dr, nr); - - const ggml_from_float_t from_float = ggml_get_type_traits_cpu(dst->type)->from_float; - - for (int64_t i03 = 0; i03 < ne03; ++i03) { - for (int64_t i02 = 0; i02 < ne02; ++i02) { - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i03 % ne12; - const int64_t i11 = i02 % ne11; - const int64_t i10 = i; - - const int64_t i1 = *(idx_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i1 >= 0 && i1 < ne1); - - from_float((const float *) ((char *) src0->data + i * nb01 + i02 * nb02 + i03 * nb03), - ((char *) dst->data + i1 * nb1 + i02 * nb2 + i03 * nb3), nc); - } - } - } -} - -void ggml_compute_forward_set_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - if (src1->type == GGML_TYPE_I64) { - ggml_compute_forward_set_rows_f32(params, dst); - } else if (src1->type == GGML_TYPE_I32) { - ggml_compute_forward_set_rows_f32(params, dst); - } else { - GGML_ABORT("src1->type = %d (%s) not supported", src1->type, ggml_type_name(src1->type)); - } - } - break; - default: - { - GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); - } - } -} - -// ggml_compute_forward_get_rows_back - -static void ggml_compute_forward_get_rows_back_f32_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_is_contiguous(dst)); - - // ggml_compute_forward_dup_same_cont(params, opt0, dst); - - memset(dst->data, 0, ggml_nbytes(dst)); - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t)); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - for (int j = 0; j < nc; ++j) { - ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i * src0->nb[1]))[j]; - ((float *) ((char *) dst->data + r * dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); - } - } -} - -static void ggml_compute_forward_get_rows_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_is_contiguous(dst)); - - // ggml_compute_forward_dup_same_cont(params, opt0, dst); - - memset(dst->data, 0, ggml_nbytes(dst)); - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - ggml_vec_add_f32(nc, (float *) ((char *) dst->data + r * dst->nb[1]), - (float *) ((char *) dst->data + r * dst->nb[1]), - (float *) ((char *) src0->data + i * src0->nb[1])); - } -} - -void ggml_compute_forward_get_rows_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_get_rows_back_f32_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_get_rows_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } - - //static bool first = true; - //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); - //if (first) { - // first = false; - //} else { - // for (int k = 0; k < dst->ne[1]; ++k) { - // for (int j = 0; j < dst->ne[0]/16; ++j) { - // for (int i = 0; i < 16; ++i) { - // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); - // } - // printf("\n"); - // } - // printf("\n"); - // } - // printf("\n"); - // exit(0); - //} -} - -// ggml_compute_forward_diag - -static void ggml_compute_forward_diag_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - // TODO: handle transposed/permuted matrices - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne00 == ne0); - GGML_ASSERT(ne00 == ne1); - GGML_ASSERT(ne01 == 1); - GGML_ASSERT(ne02 == ne2); - GGML_ASSERT(ne03 == ne3); - - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb0 == sizeof(float)); - - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = 0; i2 < ne2; i2++) { - for (int i1 = 0; i1 < ne1; i1++) { - float * d = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - float * s = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02); - for (int i0 = 0; i0 < i1; i0++) { - d[i0] = 0; - } - d[i1] = s[i1]; - for (int i0 = i1 + 1; i0 < ne0; i0++) { - d[i0] = 0; - } - } - } - } -} - -void ggml_compute_forward_diag(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_diag_mask_inf - -static void ggml_compute_forward_diag_mask_f32(const ggml_compute_params * params, - ggml_tensor * dst, - const float value) { - const ggml_tensor * src0 = dst->src[0]; - - const int ith = params->ith; - const int nth = params->nth; - - const int n_past = ((int32_t *) dst->op_params)[0]; - const bool inplace = src0->data == dst->data; - - GGML_ASSERT(n_past >= 0); - - if (!inplace) { - if (ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - // TODO: handle transposed/permuted matrices - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - const int nr = src0->ne[1]; - const int nz = n / nr; - - GGML_ASSERT(dst->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - for (int k = 0; k < nz; k++) { - for (int j = ith; j < nr; j += nth) { - for (int i = n_past; i < nc; i++) { - if (i > n_past + j) { - *(float *) ((char *) dst->data + k * dst->nb[2] + j * dst->nb[1] + i * dst->nb[0]) = value; - } - } - } - } -} - -void ggml_compute_forward_diag_mask_inf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -void ggml_compute_forward_diag_mask_zero(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_mask_f32(params, dst, 0); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_soft_max - -static void ggml_compute_forward_soft_max_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - assert(ggml_is_contiguous(dst)); - assert(ggml_are_same_shape(src0, dst)); - - float scale = 1.0f; - float max_bias = 0.0f; - - memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int64_t nb11 = src1 ? src1->nb[1] : 1; - const int64_t nb12 = src1 ? src1->nb[2] : 1; - const int64_t nb13 = src1 ? src1->nb[3] : 1; - - const int64_t ne12 = src1 ? src1->ne[2] : 1; - const int64_t ne13 = src1 ? src1->ne[3] : 1; - - // TODO: is this supposed to be ceil instead of floor? - // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 - const uint32_t n_head = ne02; - const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); - - const float m0 = powf(2.0f, -(max_bias) / n_head_log2); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - - float * wp = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); - - // sinks - const float * sk = src2 ? (float *) ((char *) src2->data) : nullptr; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const int64_t i11 = i01; - const int64_t i12 = i02 % ne12; - const int64_t i13 = i03 % ne13; - - // ALiBi - const uint32_t h = i02; // head - const float slope = - (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; - - float * sp = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * dp = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - // broadcast the mask across rows - ggml_fp16_t * mp_f16 = - src1 ? (ggml_fp16_t *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; - float * mp_f32 = src1 ? (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; - - ggml_vec_cpy_f32(ne00, wp, sp); - ggml_vec_scale_f32(ne00, wp, scale); - if (mp_f32) { - if (use_f16) { - for (int i = 0; i < ne00; ++i) { - wp[i] += slope * GGML_CPU_FP16_TO_FP32(mp_f16[i]); - } - } else { - for (int i = 0; i < ne00; ++i) { - wp[i] += slope * mp_f32[i]; - } - } - } - -#ifndef NDEBUG - for (int i = 0; i < ne00; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(wp[i])); - } -#endif - - float max = -INFINITY; - ggml_vec_max_f32(ne00, &max, wp); - - // if we have sinks, make a correction as if they were included in the softmax - if (sk) { - max = MAX(max, sk[i02]); - } - - ggml_float sum = ggml_vec_soft_max_f32(ne00, dp, wp, max); - assert(sum > 0.0); - - if (sk) { - sum += (ggml_float) expf(sk[i02] - max); - } - - sum = 1.0 / sum; - ggml_vec_scale_f32(ne00, dp, sum); - -#ifndef NDEBUG - for (int i = 0; i < ne00; ++i) { - assert(!isnan(dp[i])); - assert(!isinf(dp[i])); - } -#endif - } - } - } -} - -void ggml_compute_forward_soft_max(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_soft_max_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_soft_max_ext_back - -static void ggml_compute_forward_soft_max_ext_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(src1)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_are_same_shape(src1, dst)); - - float scale = 1.0f; - float max_bias = 0.0f; - - memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); - - GGML_ASSERT(max_bias == 0.0f); - - // TODO: handle transposed/permuted matrices - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dy = (float *) ((char *) src0->data + i1 * src0->nb[1]); - float * y = (float *) ((char *) src1->data + i1 * src1->nb[1]); - float * dx = (float *) ((char *) dst->data + i1 * dst->nb[1]); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(dy[i])); - assert(!isnan(y[i])); - } -#endif - // Jii = yi - yi*yi - // Jij = -yi*yj - // J = diag(y)-y.T*y - // dx = J * dy - // dxk = sum_i(Jki * dyi) - // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk - // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk - // dxk = sum_i(-yk*yi * dyi) + yk*dyk - // dxk = -yk * sum_i(yi * dyi) + yk*dyk - // dxk = -yk * dot(y, dy) + yk*dyk - // dxk = yk * (- dot(y, dy) + dyk) - // dxk = yk * (dyk - dot(y, dy)) - // - // post-order: - // dot_y_dy := dot(y, dy) - // dx := dy - // dx := dx - dot_y_dy - // dx := dx * y - - // linear runtime, no additional memory - float dot_y_dy = 0; - ggml_vec_dot_f32(nc, &dot_y_dy, 0, y, 0, dy, 0, 1); - ggml_vec_cpy_f32(nc, dx, dy); - ggml_vec_acc1_f32(nc, dx, -dot_y_dy); - ggml_vec_mul_f32(nc, dx, dx, y); - ggml_vec_scale_f32(nc, dx, scale); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - assert(!isnan(dx[i])); - assert(!isinf(dx[i])); - } -#endif - } -} - -void ggml_compute_forward_soft_max_ext_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_soft_max_ext_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_clamp - -static void ggml_compute_forward_clamp_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - float min; - float max; - memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - for (int j = ith; j < n; j += nth) { - float * dst_ptr = (float *) ((char *) dst->data + j * nb1); - float * src0_ptr = (float *) ((char *) src0->data + j * nb01); - - for (int i = 0; i < nc; i++) { - dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min); - } - } -} - -static void ggml_compute_forward_clamp_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - float min; - float max; - memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - for (int j = ith; j < n; j += nth) { - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j * nb01); - - for (int i = 0; i < nc; i++) { - float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); - } - } -} - -void ggml_compute_forward_clamp(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_clamp_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_clamp_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q8_K: - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_I64: - case GGML_TYPE_F64: - case GGML_TYPE_COUNT: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rope - -static float rope_yarn_ramp(const float low, const float high, const int i0) { - const float y = (i0 / 2 - low) / MAX(0.001f, high - low); - return 1 - MIN(1, MAX(0, y)); -} - -// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn -// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. -static void rope_yarn(float theta_extrap, - float freq_scale, - float corr_dims[2], - int64_t i0, - float ext_factor, - float mscale, - float * cos_theta, - float * sin_theta) { - // Get n-d rotational scaling corrected for extrapolation - float theta_interp = freq_scale * theta_extrap; - float theta = theta_interp; - if (ext_factor != 0.0f) { - float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; - theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; - - // Get n-d magnitude scaling corrected for interpolation - mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); - } - *cos_theta = cosf(theta) * mscale; - *sin_theta = sinf(theta) * mscale; -} - -static void ggml_rope_cache_init(float theta_base, - float freq_scale, - const float * freq_factors, - float corr_dims[2], - int64_t ne0, - float ext_factor, - float mscale, - float * cache, - float sin_sign, - float theta_scale) { - // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py - float theta = theta_base; - for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; - rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); - cache[i0 + 1] *= sin_sign; - - theta *= theta_scale; - } -} - -static void ggml_mrope_cache_init(float theta_base_t, - float theta_base_h, - float theta_base_w, - float theta_base_e, - int sections[4], - bool is_imrope, - bool indep_sects, - float freq_scale, - const float * freq_factors, - float corr_dims[2], - int64_t ne0, - float ext_factor, - float mscale, - float * cache, - float sin_sign, - float theta_scale) { - // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py - float theta_t = theta_base_t; - float theta_h = theta_base_h; - float theta_w = theta_base_w; - float theta_e = theta_base_e; // extra position id for vision encoder - int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; - int sec_w = sections[1] + sections[0]; - int sec_e = sections[2] + sec_w; - GGML_ASSERT(sect_dims <= ne0); - - for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; - - int sector = (i0 / 2) % sect_dims; - if (indep_sects) { - // compute theta independently for each dim sections - // (i.e. reset corresponding theta when `i0` go from one section to another) - if (sector == 0) { - theta_t = theta_base_t; - } else if (sector == sections[0]) { - theta_h = theta_base_h; - ; - } else if (sector == sec_w) { - theta_w = theta_base_w; - } else if (sector == sec_e) { - theta_e = theta_base_e; - } - } - - float theta = theta_t; - if (is_imrope) { // qwen3vl apply interleaved mrope - if (sector % 3 == 1 && sector < 3 * sections[1]) { - theta = theta_h; - } else if (sector % 3 == 2 && sector < 3 * sections[2]) { - theta = theta_w; - } else if (sector % 3 == 0 && sector < 3 * sections[0]) { - theta = theta_t; - } else { - theta = theta_e; - } - } else { - if (sector >= sections[0] && sector < sec_w) { - theta = theta_h; - } else if (sector >= sec_w && sector < sec_w + sections[2]) { - theta = theta_w; - } else if (sector >= sec_w + sections[2]) { - theta = theta_e; - } - } - - rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); - cache[i0 + 1] *= sin_sign; - - theta_t *= theta_scale; - theta_w *= theta_scale; - theta_h *= theta_scale; - theta_e *= theta_scale; - } -} - -template -static void rotate_pairs(const int64_t n, - const int64_t n_offset, - const float * cache, - const T * src_data, - T * dst_data, - const int scale = 2) { - for (int64_t i0 = 0; i0 < n; i0 += 2) { - const int64_t ic = - i0 / scale; // hack for GGML_ROPE_TYPE_NORMAL, where we need ic = i0; for all other cases, ic = i0/2 - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const T * const src = src_data + ic; - T * dst = dst_data + ic; - - const float x0 = type_conversion_table::to_f32(src[0]); - const float x1 = type_conversion_table::to_f32(src[n_offset]); - - dst[0] = type_conversion_table::from_f32(x0 * cos_theta - x1 * sin_theta); - dst[n_offset] = type_conversion_table::from_f32(x0 * sin_theta + x1 * cos_theta); - } -} - -template //float or ggml_fp16_t -static void ggml_compute_forward_rope_flt(const ggml_compute_params * params, ggml_tensor * dst, const bool forward) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_I32); - - float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; - int sections[4]; - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - //const int n_ctx = ((int32_t *) dst->op_params)[3]; - const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; - - memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); - memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); - memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); - memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); - memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); - memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int) * 4); - - GGML_TENSOR_UNARY_OP_LOCALS - - //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); - //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - - GGML_ASSERT(nb0 == nb00); - GGML_ASSERT(nb0 == sizeof(T)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(dst); - - GGML_ASSERT(n_dims <= ne0); - GGML_ASSERT(n_dims % 2 == 0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // row index used to determine which thread to use - int ir = 0; - - const float theta_scale = powf(freq_base, -2.0f / n_dims); - - float corr_dims[2]; - ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); - - const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope - const bool mrope_used = - mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, note: also true for vision (24 & 8 == true) and for imrope - const bool is_vision = mode == GGML_ROPE_TYPE_VISION; - - if (mrope_used) { - GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); - } - - if (is_vision) { - GGML_ASSERT(n_dims == ne0 / 2); - } - - const float * freq_factors = NULL; - if (src2 != NULL) { - GGML_ASSERT(src2->type == GGML_TYPE_F32); - GGML_ASSERT(src2->ne[0] >= n_dims / 2); - freq_factors = (const float *) src2->data; - } - - // backward process uses inverse rotation by cos and sin. - // cos and sin build a rotation matrix, where the inverse is the transpose. - // this essentially just switches the sign of sin. - const float sin_sign = forward ? 1.0f : -1.0f; - - const int32_t * pos = (const int32_t *) src1->data; - - for (int64_t i3 = 0; i3 < ne3; i3++) { // batch - for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len - - float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - if (!mrope_used) { - const int64_t p = pos[i2]; - ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, - sin_sign, theta_scale); - } else { - const int64_t p_t = pos[i2]; - const int64_t p_h = pos[i2 + ne2]; - const int64_t p_w = pos[i2 + ne2 * 2]; - const int64_t p_e = pos[i2 + ne2 * 3]; - ggml_mrope_cache_init(p_t, p_h, p_w, p_e, sections, is_imrope, is_vision, freq_scale, freq_factors, - corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); - } - - for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads - if (ir++ < ir0) { - continue; - } - if (ir > ir1) { - break; - } - - T * src = (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - - switch (mode) { - case GGML_ROPE_TYPE_NORMAL: - rotate_pairs(n_dims, 1, cache, src, dst_data, 1); - break; - case GGML_ROPE_TYPE_NEOX: - case GGML_ROPE_TYPE_MROPE: - case GGML_ROPE_TYPE_IMROPE: - rotate_pairs(n_dims, n_dims / 2, cache, src, dst_data); - break; - case GGML_ROPE_TYPE_VISION: - rotate_pairs(ne0, n_dims, cache, src, dst_data); - break; - default: - GGML_ABORT("rope type not supported"); - } - - if (!is_vision) { - // fill the remain channels with data from src tensor - for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const T * const src = - (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + i0 * nb00); - T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0); - - dst_data[0] = src[0]; - dst_data[1] = src[1]; - } - } - } //attn-heads - } - } -} - -void ggml_compute_forward_rope(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_rope_flt(params, dst, true); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_rope_flt(params, dst, true); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rope_back - -void ggml_compute_forward_rope_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_rope_flt(params, dst, false); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_rope_flt(params, dst, false); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_conv_transpose_1d - -static void ggml_compute_forward_conv_transpose_1d_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i02 * nb02 + i01 * nb01); - ggml_fp16_t * dst_data = wdata + i01 * ne00 * ne02; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00 * ne02 + i02] = src[i00]; - } - } - } - } - - // permute source data (src1) from (L x Cin) to (Cin x L) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; - ggml_fp16_t * dst_data = wdata; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i11 * nb11); - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); - } - } - } - - // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - - // total rows in dst - const int nr = ne1; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - ggml_fp16_t * const wdata_src = wdata + nk; - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *) ((char *) dst->data + i1 * nb1); - ggml_fp16_t * wdata_kernel = wdata + i1 * ne02 * ne00; - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i10 * ne11; - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f16(ne02, &v, 0, (ggml_fp16_t *) wdata_src + i1n, 0, - (ggml_fp16_t *) wdata_kernel + i00 * ne02, 0, 1); - dst_data[i10 * s0 + i00] += v; - } - } - } -} - -static void ggml_compute_forward_conv_transpose_1d_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02; - - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) - { - float * const wdata = (float *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * const src = (float *) ((char *) src0->data + i02 * nb02 + i01 * nb01); - float * dst_data = wdata + i01 * ne00 * ne02; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00 * ne02 + i02] = src[i00]; - } - } - } - } - - // prepare source data (src1) - { - float * const wdata = (float *) params->wdata + nk; - float * dst_data = wdata; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i11 * nb11); - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne11 + i11] = src[i10]; - } - } - } - - // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - - // total rows in dst - const int nr = ne1; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * const wdata = (float *) params->wdata + 0; - float * const wdata_src = wdata + nk; - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *) ((char *) dst->data + i1 * nb1); - float * wdata_kernel = wdata + i1 * ne02 * ne00; - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i10 * ne11; - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f32(ne02, &v, 0, wdata_src + i1n, 0, wdata_kernel + i00 * ne02, 0, 1); - dst_data[i10 * s0 + i00] += v; - } - } - } -} - -void ggml_compute_forward_conv_transpose_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_conv_transpose_1d_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_im2col_f32 -// src0: kernel [OC, IC, KH, KW] -// src1: image [N, IC, IH, IW] -// dst: result [N, OH, OW, IC*KH*KW] -static void ggml_compute_forward_im2col_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne13 : ne12; - const int64_t IC = is_2D ? ne12 : ne11; - const int64_t IH = is_2D ? ne11 : 1; - const int64_t IW = ne10; - - const int64_t KH = is_2D ? ne01 : 1; - const int64_t KW = ne00; - - const int64_t OH = is_2D ? ne2 : 1; - const int64_t OW = ne1; - - int ofs0 = is_2D ? nb13 : nb12; - int ofs1 = is_2D ? nb12 : nb11; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - float * dst_data = wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - const float * const src_data = - (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] - - for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; - } else { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = (src_data[iih * IW + iiw]); - } - } - } - } - } - } - } - } -} - -// ggml_compute_forward_im2col_f16 -// src0: kernel [OC, IC, KH, KW] -// src1: image [N, IC, IH, IW] -// dst: result [N, OH, OW, IC*KH*KW] -static void ggml_compute_forward_im2col_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne13 : ne12; - const int64_t IC = is_2D ? ne12 : ne11; - const int64_t IH = is_2D ? ne11 : 1; - const int64_t IW = ne10; - - const int64_t KH = is_2D ? ne01 : 1; - const int64_t KW = ne00; - - const int64_t OH = is_2D ? ne2 : 1; - const int64_t OW = ne1; - - int ofs0 = is_2D ? nb13 : nb12; - int ofs1 = is_2D ? nb12 : nb11; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - ggml_fp16_t * dst_data = - wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - const float * const src_data = - (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] - - for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; - } else { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = - GGML_CPU_FP32_TO_FP16(src_data[iih * IW + iiw]); - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_im2col(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_im2col_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_im2col_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_im2col_back_f32 - -void ggml_compute_forward_im2col_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output - const ggml_tensor * src1 = dst->src[1]; // convolution kernel - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne3 : ne2; - const int64_t IC = is_2D ? ne2 : ne1; - const int64_t IH = is_2D ? ne1 : 1; - const int64_t IW = ne0; - - const int64_t KH = is_2D ? ne11 : 1; - const int64_t KW = ne10; - - const int64_t OH = is_2D ? ne02 : 1; - const int64_t OW = ne01; - - int ofs0 = is_2D ? nb3 : nb2; - int ofs1 = is_2D ? nb2 : nb1; - - GGML_ASSERT(nb0 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - for (int64_t iih = 0; iih < IH; iih++) { - for (int64_t iiw = 0; iiw < IW; iiw++) { - // micro kernel - float grad = 0.0f; - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - // For s0 > 1 some values were skipped over in the forward pass. - // These values have tmpw % s0 != 0 and need to be skipped in the backwards pass as well. - const int64_t tmpw = (iiw + p0 - ikw * d0); - if (tmpw % s0 != 0) { - continue; - } - const int64_t iow = tmpw / s0; - - // Equivalent logic as above except for s1. - int64_t ioh; - if (is_2D) { - const int64_t tmph = iih + p1 - ikh * d1; - - if (tmph % s1 != 0) { - continue; - } - - ioh = tmph / s1; - } else { - ioh = 0; - } - - if (iow < 0 || iow >= OW || ioh < 0 || ioh >= OH) { - continue; - } - - const float * const grad_in = - (const float *) src0->data + - (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - grad += grad_in[iic * (KH * KW) + ikh * KW + ikw]; - } - } - float * dst_data = (float *) ((char *) wdata + (in * ofs0 + iic * ofs1)); // [IH, IW] - dst_data[iih * IW + iiw] = grad; - } - } - } - } - } -} - -// ggml_compute_forward_im2col_3d_f16 -// src0: kernel [OC*IC, KD, KH, KW] -// src1: image [N*IC, ID, IH, IW] -// dst: result [N*OD, OH, OW, IC * KD * KH * KW] -static void ggml_compute_forward_im2col_3d_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; - const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; - const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; - const int32_t IC = ((const int32_t *) (dst->op_params))[9]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = ne13 / IC; - const int64_t ID = ne12; - const int64_t IH = ne11; - const int64_t IW = ne10; - - const int64_t OC = ne03 / IC; - GGML_UNUSED(OC); - const int64_t KD = ne02; - const int64_t KH = ne01; - const int64_t KW = ne00; - - const int64_t OD = ne3 / N; - const int64_t OH = ne2; - const int64_t OW = ne1; - const int64_t OH_OW = OH * OW; - const int64_t KD_KH_KW = KD * KH * KW; - const int64_t KH_KW = KH * KW; - const int64_t IC_KD_KH_KW = IC * KD * KH * KW; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iod = 0; iod < OD; iod++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - ggml_fp16_t * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * - IC_KD_KH_KW; // [IC, KD, KH, KW] - const float * const src_data = - (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] - - for (int64_t ikd = 0; ikd < KD; ikd++) { - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - const int64_t iid = iod * s2 + ikd * d2 - p2; - - if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || - iid < 0 || iid >= ID) { - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; - } else { - const float * const s = - (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + - iiw * nb10); // [ID, IH, IW] - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = - GGML_CPU_FP32_TO_FP16(*s); - } - } - } - } - } - } - } - } - } - } -} - -// ggml_compute_forward_im2col_3d_f32 -// src0: kernel [OC*IC, KD, KH, KW] -// src1: image [N*IC, ID, IH, IW] -// dst: result [N*OD, OH, OW, IC * KD * KH * KW] -static void ggml_compute_forward_im2col_3d_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; - const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; - const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; - const int32_t IC = ((const int32_t *) (dst->op_params))[9]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = ne13 / IC; - const int64_t ID = ne12; - const int64_t IH = ne11; - const int64_t IW = ne10; - - const int64_t OC = ne03 / IC; - GGML_UNUSED(OC); - const int64_t KD = ne02; - const int64_t KH = ne01; - const int64_t KW = ne00; - - const int64_t OD = ne3 / N; - const int64_t OH = ne2; - const int64_t OW = ne1; - - const int64_t OH_OW = OH * OW; - const int64_t KD_KH_KW = KD * KH * KW; - const int64_t KH_KW = KH * KW; - const int64_t IC_KD_KH_KW = IC * KD * KH * KW; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iod = 0; iod < OD; iod++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - float * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * - IC_KD_KH_KW; // [IC, KD, KH, KW] - const float * const src_data = - (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] - - for (int64_t ikd = 0; ikd < KD; ikd++) { - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - const int64_t iid = iod * s2 + ikd * d2 - p2; - - if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || - iid < 0 || iid >= ID) { - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; - } else { - const float * const s = - (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + - iiw * nb10); // [ID, IH, IW] - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = *s; - } - } - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_im2col_3d(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_im2col_3d_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_im2col_3d_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_call_mul_mat(ggml_type type, - const ggml_compute_params * params, - int64_t m, - int64_t n, - int64_t k, - void * a, - void * b, - float * c) { - const ggml_type_traits * traits = ggml_get_type_traits(type); - struct ggml_tensor src1 = {}; - src1.type = type; - src1.ne[0] = k; - src1.ne[1] = m; - src1.ne[2] = 1; - src1.ne[3] = 1; - src1.nb[0] = traits->type_size; - src1.nb[1] = k * traits->type_size; - src1.nb[2] = src1.nb[1]; - src1.nb[3] = src1.nb[2]; - src1.data = a; - - struct ggml_tensor src0 = {}; - src0.type = type; - src0.ne[0] = k; - src0.ne[1] = n; - src0.ne[2] = 1; - src0.ne[3] = 1; - src0.nb[0] = traits->type_size; - src0.nb[1] = k * traits->type_size; - src0.nb[2] = src0.nb[1]; - src0.nb[3] = src0.nb[2]; - src0.data = b; - - struct ggml_tensor dst = {}; - dst.ne[0] = n; - dst.ne[1] = m; - dst.ne[2] = 1; - dst.ne[3] = 1; - dst.nb[0] = sizeof(float); - dst.nb[1] = n * sizeof(float); - dst.nb[2] = dst.nb[1]; - dst.nb[3] = dst.nb[2]; - dst.data = c; - dst.src[0] = &src0; - dst.src[1] = &src1; - - ggml_compute_forward_mul_mat(params, &dst); -} - -static inline int64_t ggml_wrap_around(int64_t coord, int64_t size) { - return (coord + size) % size; // adding size avoids negative number weirdness -} - -// ggml_compute_forward_conv_2d - -static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, - const ggml_tensor * kernel, // [KW, KH, IC, OC] - const ggml_tensor * src, // [W, H, C, N] - ggml_tensor * dst, // [OW, OH, OC, N] - ggml_type kernel_type) { - GGML_ASSERT(ggml_is_contiguous(kernel)); - GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); - GGML_ASSERT(kernel->type == kernel_type); - - const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); - - const int32_t stride_x = dst->op_params[0]; - const int32_t stride_y = dst->op_params[1]; - const int32_t pad_x = dst->op_params[2]; - const int32_t pad_y = dst->op_params[3]; - const int32_t dilation_x = dst->op_params[4]; - const int32_t dilation_y = dst->op_params[5]; - - const int64_t c_in = src->ne[2]; - const int64_t c_out = kernel->ne[3]; - GGML_ASSERT(c_in == kernel->ne[2]); - - const int64_t src_w = src->ne[0]; - const int64_t src_h = src->ne[1]; - const int64_t knl_w = kernel->ne[0]; - const int64_t knl_h = kernel->ne[1]; - const int64_t dst_w = dst->ne[0]; - const int64_t dst_h = dst->ne[1]; - - const float * src_data = (float *) src->data; - void * knl_data = kernel->data; - float * dst_data = (float *) dst->data; - - const int64_t knl_n = knl_w * knl_h * c_in; - const int64_t patch_total = dst->ne[3] * dst_w * dst_h; - - const int64_t space_per_patch = knl_n * traits->type_size + c_out * sizeof(float); - const int64_t batch_size = params->wsize / space_per_patch; - const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; - const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; - - GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); - - void * tmp = params->wdata; - - for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { - const int64_t patch_start_batch = batch_i * patches_per_batch; - const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); - const int64_t patch_n = patch_end_batch - patch_start_batch; - - const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth; - const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; - const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); - - //im2col for a patch - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t src_x = (p / dst_w) % dst_h; - const int64_t src_y = p % dst_w; - - const float * src_base = (const float *) ((const char *) src_data + batch_n * src->nb[3]); - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; - - for (int64_t ic = 0; ic < c_in; ++ic) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; - const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; - - int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; - - float src_val; - if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { - src_val = 0.0f; - } else { - const float * src_ptr = (const float *) ((const char *) src_base + sx * src->nb[0] + - sy * src->nb[1] + ic * src->nb[2]); - src_val = *src_ptr; - } - - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); - } - } - } - } - } // patches handled by this thread - - ggml_barrier(params->threadpool); - - float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n * traits->type_size); - - GGML_ASSERT(gemm_output + patch_n * c_out <= (float *) tmp + params->wsize); - - // GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out] - ggml_call_mul_mat(kernel_type, params, patch_n, c_out, knl_n, tmp, knl_data, gemm_output); - - ggml_barrier(params->threadpool); - - //permute back [OC, N, OH, OW] to [N, OC, OH, OW] - const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth; - const int64_t permute_start = params->ith * permute_per_thread; - const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n); - - for (int64_t i = permute_start; i < permute_end; ++i) { - const int64_t p = patch_start_batch + i; - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t dst_y = (p / dst_w) % dst_h; - const int64_t dst_x = p % dst_w; - - for (int64_t oc = 0; oc < c_out; ++oc) { - const float value = gemm_output[i * c_out + oc]; - float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + - oc * dst->nb[2] + batch_n * dst->nb[3]); - *dst_ptr = value; - } - } - } -} - -void ggml_compute_forward_conv_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - ggml_compute_forward_conv_2d_impl(params, src0, src1, dst, src0->type); -} - -// ggml_compute_forward_conv_3d - -static void ggml_compute_forward_conv_3d_impl(const ggml_compute_params * params, - const ggml_tensor * kernel, - const ggml_tensor * src, - ggml_tensor * dst, - ggml_type kernel_type) { - GGML_ASSERT(ggml_is_contiguous(kernel)); - GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); - GGML_ASSERT(kernel->type == kernel_type); - - const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); - - const int32_t s0 = dst->op_params[0]; - const int32_t s1 = dst->op_params[1]; - const int32_t s2 = dst->op_params[2]; - const int32_t p0 = dst->op_params[3]; - const int32_t p1 = dst->op_params[4]; - const int32_t p2 = dst->op_params[5]; - const int32_t d0 = dst->op_params[6]; - const int32_t d1 = dst->op_params[7]; - const int32_t d2 = dst->op_params[8]; - const int32_t c = dst->op_params[9]; - const int32_t n = dst->op_params[10]; - const int32_t oc = dst->op_params[11]; - - const int64_t src_w = src->ne[0]; - const int64_t src_h = src->ne[1]; - const int64_t src_d = src->ne[2]; - const int64_t knl_w = kernel->ne[0]; - const int64_t knl_h = kernel->ne[1]; - const int64_t knl_d = kernel->ne[2]; - const int64_t dst_w = dst->ne[0]; - const int64_t dst_h = dst->ne[1]; - const int64_t dst_d = dst->ne[2]; - - const float * src_data = (float *) src->data; - void * knl_data = kernel->data; - float * dst_data = (float *) dst->data; - - const int64_t knl_n_per_channel = knl_w * knl_h * knl_d; - const int64_t knl_n_total = knl_n_per_channel * c; - const int64_t patch_total = n * dst_w * dst_h * dst_d; - - const int64_t space_per_patch = knl_n_total * traits->type_size + oc * sizeof(float); - const int64_t batch_size = params->wsize / space_per_patch; - const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; - const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; - - GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); - - void * tmp = params->wdata; - - for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { - const int64_t patch_start_batch = batch_i * patches_per_batch; - const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); - const int64_t patch_n_in_batch = patch_end_batch - patch_start_batch; - - const int64_t patch_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; - const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; - const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); - - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); - const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); - const int64_t batch_idx = p / (dst_w * dst_h * dst_d); - const int64_t dst_z = p_in_batch / (dst_w * dst_h); - const int64_t dst_y = p_in_depth / dst_w; - const int64_t dst_x = p_in_depth % dst_w; - - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n_total * traits->type_size; - - for (int64_t ic = 0; ic < c; ++ic) { - for (int64_t kz = 0; kz < knl_d; ++kz) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sz = dst_z * s2 + kz * d2 - p2; - const int64_t sy = dst_y * s1 + ky * d1 - p1; - const int64_t sx = dst_x * s0 + kx * d0 - p0; - - int64_t dst_idx = ic * knl_n_per_channel + kz * (knl_h * knl_w) + ky * knl_w + kx; - - float src_val; - if (sz < 0 || sz >= src_d || sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { - src_val = 0.0f; - } else { - const int64_t cn_idx = batch_idx * c + ic; - const float * src_ptr = - (const float *) ((const char *) src_data + sx * src->nb[0] + sy * src->nb[1] + - sz * src->nb[2] + cn_idx * src->nb[3]); - src_val = *src_ptr; - } - - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); - } - } - } - } - } - } - - ggml_barrier(params->threadpool); - - float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n_total * traits->type_size); - ggml_call_mul_mat(kernel_type, params, patch_n_in_batch, oc, knl_n_total, tmp, knl_data, gemm_output); - - ggml_barrier(params->threadpool); - - const int64_t permute_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; - const int64_t permute_start = params->ith * permute_per_thread; - const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n_in_batch); - - for (int64_t i = permute_start; i < permute_end; ++i) { - const int64_t p = patch_start_batch + i; - const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); - const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); - const int64_t batch_idx = p / (dst_w * dst_h * dst_d); - const int64_t dst_z = p_in_batch / (dst_w * dst_h); - const int64_t dst_y = p_in_depth / dst_w; - const int64_t dst_x = p_in_depth % dst_w; - - for (int64_t ioc = 0; ioc < oc; ++ioc) { - const float value = gemm_output[i * oc + ioc]; - const int64_t ocn_idx = batch_idx * oc + ioc; - float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + - dst_z * dst->nb[2] + ocn_idx * dst->nb[3]); - *dst_ptr = value; - } - } - } -} - -void ggml_compute_forward_conv_3d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - ggml_compute_forward_conv_3d_impl(params, src0, src1, dst, src0->type); -} - -// ggml_compute_forward_conv_transpose_2d - -void ggml_compute_forward_conv_transpose_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02 * ne03; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i03 * nb03 + i02 * nb02); - ggml_fp16_t * dst_data = wdata + i02 * ne01 * ne00 * ne03; - for (int64_t i01 = 0; i01 < ne01; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i01 * ne00 * ne03 + i00 * ne03 + i03] = src[i01 * ne00 + i00]; - } - } - } - } - } - - // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; - for (int i12 = 0; i12 < ne12; i12++) { - for (int i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i12 * nb12 + i11 * nb11); - ggml_fp16_t * dst_data = wdata + i11 * ne10 * ne12; - for (int i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); - } - } - } - } - - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t stride = ggml_get_op_params_i32(dst, 0); - - // total patches in dst - const int np = ne2; - - // patches per thread - const int dp = (np + nth - 1) / nth; - - // patch range for this thread - const int ip0 = dp * ith; - const int ip1 = MIN(ip0 + dp, np); - - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - ggml_fp16_t * const wdata_src = wdata + nk; - - for (int i2 = ip0; i2 < ip1; i2++) { // Cout - float * dst_data = (float *) ((char *) dst->data + i2 * nb2); - ggml_fp16_t * wdata_kernel = wdata + i2 * ne01 * ne00 * ne03; - for (int i11 = 0; i11 < ne11; i11++) { - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i11 * ne10 * ne12 + i10 * ne12; - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f16(ne03, &v, 0, wdata_src + i1n, 0, wdata_kernel + i01 * ne00 * ne03 + i00 * ne03, - 0, 1); - dst_data[(i11 * stride + i01) * ne0 + i10 * stride + i00] += v; - } - } - } - } - } -} - -// ggml_compute_forward_conv_2d_dw - -struct ggml_conv_2d_dw_params { - int64_t channels; - int64_t batch; - int64_t src_w; - int64_t src_h; - int64_t dst_w; - int64_t dst_h; - int64_t knl_w; - int64_t knl_h; - int stride_x; - int stride_y; - int pad_x; - int pad_y; - int dilation_x; - int dilation_y; -}; - -static void ggml_compute_forward_conv_2d_dw_cwhn(const ggml_compute_params * params, - const ggml_tensor * src, - const ggml_tensor * kernel, - ggml_tensor * dst, - const ggml_conv_2d_dw_params & p) { - const int64_t c = p.channels; - const float * knl_data = (const float *) kernel->data; - - const int64_t rows_total = p.dst_h * p.batch; - const int64_t rows_per_thread = (rows_total + params->nth - 1) / params->nth; - const int64_t row_start = params->ith * rows_per_thread; - const int64_t row_end = MIN(row_start + rows_per_thread, rows_total); - -#ifdef GGML_SIMD -# if defined(__ARM_FEATURE_SVE) - const int64_t pkg_size = svcntw(); -# else - const int64_t pkg_size = GGML_F32_EPR; -# endif - const int64_t pkg_count = c / pkg_size; - const int64_t c_pkg_end = pkg_count * pkg_size; -#else - const int64_t c_pkg_end = 0; -#endif - - for (int64_t row = row_start; row < row_end; ++row) { - const int64_t dst_y = row % p.dst_h; - const float * src_data = (const float *) src->data + (row / p.dst_h) * p.src_w * p.src_h * c; - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float * dst_data = (float *) dst->data + (row * p.dst_w + dst_x) * c; - const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; - const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; - -#ifdef GGML_SIMD - // Vectorized loop - for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); - GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); - sum = GGML_F32_VEC_FMA(sum, k, s); - } - } - GGML_F32_VEC_STORE(dst_data + c_i, sum); - } -#endif - // Scalar loop - for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] * - src_data[(src_y * p.src_w + src_x) * c + c_i]; - } - } - dst_data[c_i] = sum; - } - } - } -} - -static void ggml_compute_forward_conv_2d_dw_whcn(const ggml_compute_params * params, - const ggml_tensor * src, - const ggml_tensor * kernel, - ggml_tensor * dst, - const ggml_conv_2d_dw_params & p) { - const int64_t n = p.channels * p.batch; - const int64_t per_thread = (n + params->nth - 1) / params->nth; - const int64_t start = params->ith * per_thread; - const int64_t end = MIN(start + per_thread, n); - - for (int64_t i = start; i < end; ++i) { - const float * knl_data = (const float *) kernel->data + (i % p.channels) * p.knl_w * p.knl_h; - const float * src_data = (const float *) src->data + i * p.src_w * p.src_h; - float * dst_data = (float *) dst->data + i * p.dst_w * p.dst_h; - - for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - sum += knl_data[knl_y * p.knl_w + knl_x] * src_data[src_y * p.src_w + src_x]; - } - } - dst_data[dst_y * p.dst_w + dst_x] = sum; - } - } - } -} - -void ggml_compute_forward_conv_2d_dw(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * kernel = dst->src[0]; - const ggml_tensor * src = dst->src[1]; - ggml_conv_2d_dw_params p; - p.channels = src->ne[2]; - p.batch = src->ne[3]; - p.src_w = src->ne[0]; - p.src_h = src->ne[1]; - p.dst_w = dst->ne[0]; - p.dst_h = dst->ne[1]; - p.knl_w = kernel->ne[0]; - p.knl_h = kernel->ne[1]; - p.stride_x = dst->op_params[0]; - p.stride_y = dst->op_params[1]; - p.pad_x = dst->op_params[2]; - p.pad_y = dst->op_params[3]; - p.dilation_x = dst->op_params[4]; - p.dilation_y = dst->op_params[5]; - - GGML_ASSERT(kernel->ne[3] == p.channels); - GGML_ASSERT(dst->ne[3] == p.batch); - - if (ggml_is_contiguous(src)) { - ggml_compute_forward_conv_2d_dw_whcn(params, src, kernel, dst, p); - } else if (ggml_is_contiguous_channels(src)) { - // kernel should also have channels most contiguous in memory - GGML_ASSERT(kernel->nb[0] >= kernel->nb[2] && kernel->nb[1] >= kernel->nb[0]); - ggml_compute_forward_conv_2d_dw_cwhn(params, src, kernel, dst, p); - } else { - GGML_ABORT("non-contiguous memory layout not supported"); - } -} - -// ggml_compute_forward_pool_1d_sk_p0 - -static void ggml_compute_forward_pool_1d_sk_p0(const ggml_compute_params * params, - const ggml_op_pool op, - const int k, - ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - - assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const char * cdata = (const char *) src->data; - const char * const data_end = cdata + ggml_nbytes(src); - float * drow = (float *) dst->data; - - const int64_t rs = dst->ne[0]; - - while (cdata < data_end) { - const void * srow = (const void *) cdata; - int j = 0; - for (int64_t i = 0; i < rs; ++i) { - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] = 0; - break; - case GGML_OP_POOL_MAX: - drow[i] = -FLT_MAX; - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - for (int ki = 0; ki < k; ++ki) { - const float srow_j = (src->type == GGML_TYPE_F32) ? - ((const float *) srow)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] += srow_j; - break; - case GGML_OP_POOL_MAX: - if (srow_j > drow[i]) { - drow[i] = srow_j; - } - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - ++j; - } - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] /= k; - break; - case GGML_OP_POOL_MAX: - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - - cdata += src->nb[1]; - drow += rs; - } -} - -// ggml_compute_forward_pool_1d - -void ggml_compute_forward_pool_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int s0 = opts[2]; - const int p0 = opts[3]; - GGML_ASSERT(p0 == 0); // padding not supported - GGML_ASSERT(k0 == s0); // only s = k supported - - ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst); -} - -// ggml_compute_forward_pool_2d - -void ggml_compute_forward_pool_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - - assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; - const char * cdata = (const char *) src->data; - const char * const data_end = cdata + ggml_nbytes(src); - - const int64_t px = dst->ne[0]; - const int64_t py = dst->ne[1]; - const int64_t pa = px * py; - - float * dplane = (float *) dst->data; - - const int ka = k0 * k1; - const int offset0 = -p0; - const int offset1 = -p1; - - while (cdata < data_end) { - for (int oy = 0; oy < py; ++oy) { - float * const drow = dplane + oy * px; - for (int ox = 0; ox < px; ++ox) { - float * const out = drow + ox; - switch (op) { - case GGML_OP_POOL_AVG: - *out = 0; - break; - case GGML_OP_POOL_MAX: - *out = -FLT_MAX; - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - - const int ix = offset0 + ox * s0; - const int iy = offset1 + oy * s1; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= src->ne[1]) { - continue; - } - const void * srow = (const void *) (cdata + src->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= src->ne[0]) { - continue; - } - const float srow_j = (src->type == GGML_TYPE_F32) ? - ((const float *) srow)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); - switch (op) { - case GGML_OP_POOL_AVG: - *out += srow_j; - break; - case GGML_OP_POOL_MAX: - if (srow_j > *out) { - *out = srow_j; - } - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - } - switch (op) { - case GGML_OP_POOL_AVG: - *out /= ka; - break; - case GGML_OP_POOL_MAX: - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - } - - cdata += src->nb[2]; - dplane += pa; - } -} - -// ggml_compute_forward_pool_2d_back - -void ggml_compute_forward_pool_2d_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - const ggml_tensor * dstf = dst->src[1]; // forward tensor of dst - - assert(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; - - char * cdata = (char *) dst->data; - const char * cdataf = (const char *) dstf->data; - const char * const data_end = cdata + ggml_nbytes(dst); - - GGML_ASSERT(params->ith == 0); - memset(cdata, 0, ggml_nbytes(dst)); - - const int64_t px = src->ne[0]; - const int64_t py = src->ne[1]; - const int64_t pa = px * py; - - const float * splane = (const float *) src->data; - - const int ka = k0 * k1; - const int offset0 = -p0; - const int offset1 = -p1; - - while (cdata < data_end) { - for (int oy = 0; oy < py; ++oy) { - const float * const srow = splane + oy * px; - for (int ox = 0; ox < px; ++ox) { - const float grad0 = srow[ox]; - - const int ix = offset0 + ox * s0; - const int iy = offset1 + oy * s1; - - if (op == GGML_OP_POOL_MAX) { - float maxval = -FLT_MAX; - int kxmax = -1; - int kymax = -1; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= dst->ne[1]) { - continue; - } - const void * drowf = (const void *) (cdataf + dst->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= dst->ne[0]) { - continue; - } - - const float val = dst->type == GGML_TYPE_F32 ? - ((const float *) drowf)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); - if (val <= maxval) { - continue; - } - - maxval = val; - kxmax = kx; - kymax = ky; - } - } - - if (kxmax == -1 || kymax == -1) { - continue; - } - - void * drow = (void *) (cdata + dst->nb[1] * (iy + kymax)); - const int j = ix + kxmax; - if (dst->type == GGML_TYPE_F32) { - ((float *) drow)[j] += grad0; - } else { - ((ggml_fp16_t *) drow)[j] = - GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); - } - } else if (op == GGML_OP_POOL_AVG) { - const float grad = grad0 / ka; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= dst->ne[1]) { - continue; - } - void * drow = (void *) (cdata + dst->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= dst->ne[0]) { - continue; - } - - if (dst->type == GGML_TYPE_F32) { - ((float *) drow)[j] += grad; - } else { - ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); - } - } - } - } else { - GGML_ASSERT(false); - } - } - } - - cdata += dst->nb[2]; - cdataf += dst->nb[2]; - splane += pa; - } -} - -// ggml_compute_forward_upscale - -static void ggml_compute_forward_upscale_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float sf0 = (float) ne0 / src0->ne[0]; - float sf1 = (float) ne1 / src0->ne[1]; - float sf2 = (float) ne2 / src0->ne[2]; - float sf3 = (float) ne3 / src0->ne[3]; - float pixel_offset = 0.5f; - - const int32_t mode_flags = ggml_get_op_params_i32(dst, 0); - const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); - - if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { - pixel_offset = 0.0f; - sf0 = ne0 > 1 && ne00 > 1 ? (float) (ne0 - 1) / (ne00 - 1) : sf0; - sf1 = ne1 > 1 && ne01 > 1 ? (float) (ne1 - 1) / (ne01 - 1) : sf1; - } - - if (mode == GGML_SCALE_MODE_NEAREST) { - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const int64_t i01 = i1 / sf1; - for (int64_t i0 = 0; i0 < ne0; i0++) { - const int64_t i00 = i0 / sf0; - - const float * x = - (float *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } - } else if (mode == GGML_SCALE_MODE_BILINEAR) { - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; - int64_t y0 = (int64_t) floorf(y); - int64_t y1 = y0 + 1; - - y0 = std::max(int64_t(0), std::min(y0, ne01 - 1)); - y1 = std::max(int64_t(0), std::min(y1, ne01 - 1)); - - float dy = y - (float) y0; - dy = std::max(0.0f, std::min(dy, 1.0f)); - - for (int64_t i0 = 0; i0 < ne0; i0++) { - const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; - int64_t x0 = (int64_t) floorf(x); - int64_t x1 = x0 + 1; - - x0 = std::max(int64_t(0), std::min(x0, ne00 - 1)); - x1 = std::max(int64_t(0), std::min(x1, ne00 - 1)); - - float dx = x - (float) x0; - dx = std::max(0.0f, std::min(dx, 1.0f)); - - // fetch the four surrounding pixel values and interpolate - const float a = *(const float *) ((const char *) src0->data + x0 * nb00 + y0 * nb01 + - i02 * nb02 + i03 * nb03); - const float b = *(const float *) ((const char *) src0->data + x1 * nb00 + y0 * nb01 + - i02 * nb02 + i03 * nb03); - const float c = *(const float *) ((const char *) src0->data + x0 * nb00 + y1 * nb01 + - i02 * nb02 + i03 * nb03); - const float d = *(const float *) ((const char *) src0->data + x1 * nb00 + y1 * nb01 + - i02 * nb02 + i03 * nb03); - - const float val = a * (1 - dx) * (1 - dy) + b * dx * (1 - dy) + c * (1 - dx) * dy + d * dx * dy; - - float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - *y_dst = val; - } - } - } - } - } else if (mode == GGML_SCALE_MODE_BICUBIC) { - // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm - const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) - auto weight1 = [a](float x) { - return ((a + 2) * x - (a + 3)) * x * x + 1; - }; - auto weight2 = [a](float x) { - return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; - }; - auto bicubic = [=](float p0, float p1, float p2, float p3, float x) { - const float w0 = weight2(x + 1); - const float w1 = weight1(x + 0); - const float w2 = weight1(1 - x); - const float w3 = weight2(2 - x); - return p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3; - }; - - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; - const int64_t y0 = (int64_t) floorf(y); - const float dy = y - (float) y0; - - for (int64_t i0 = 0; i0 < ne0; i0++) { - const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; - const int64_t x0 = (int64_t) floorf(x); - const float dx = x - (float) x0; - - auto p = [=](int64_t x_off, int64_t y_off) -> float { - int64_t i00 = std::max(int64_t(0), std::min(x0 + x_off, ne00 - 1)); - int64_t i01 = std::max(int64_t(0), std::min(y0 + y_off, ne01 - 1)); - return *(const float *) ((const char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + - i03 * nb03); - }; - - const float val = bicubic(bicubic(p(-1, -1), p(0, -1), p(1, -1), p(2, -1), dx), - bicubic(p(-1, 0), p(0, 0), p(1, 0), p(2, 0), dx), - bicubic(p(-1, 1), p(0, 1), p(1, 1), p(2, 1), dx), - bicubic(p(-1, 2), p(0, 2), p(1, 2), p(2, 2), dx), dy); - - float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - *y_dst = val; - } - } - } - } - } else { - GGML_ABORT("unsupported upscale mode"); - } -} - -void ggml_compute_forward_upscale(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_upscale_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_pad - -static void ggml_compute_forward_pad_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float * dst_ptr = (float *) dst->data; - const int32_t lp0 = ggml_get_op_params_i32(dst, 0); - const int32_t rp0 = ggml_get_op_params_i32(dst, 1); - const int32_t lp1 = ggml_get_op_params_i32(dst, 2); - const int32_t rp1 = ggml_get_op_params_i32(dst, 3); - const int32_t lp2 = ggml_get_op_params_i32(dst, 4); - const int32_t rp2 = ggml_get_op_params_i32(dst, 5); - const int32_t lp3 = ggml_get_op_params_i32(dst, 6); - const int32_t rp3 = ggml_get_op_params_i32(dst, 7); - const int32_t circular = ggml_get_op_params_i32(dst, 8); - - // TODO: optimize - - if (circular == 0) { - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - for (int64_t i3 = 0; i3 < ne3; ++i3) { - const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; - if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && - (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t src_idx = - (i3 - lp3) * nb03 + (i2 - lp2) * nb02 + (i1 - lp1) * nb01 + (i0 - lp0) * nb00; - const float * src_ptr = (const float *) ((char *) src0->data + src_idx); - dst_ptr[dst_idx] = *src_ptr; - } else { - dst_ptr[dst_idx] = 0; - } - } - } - } - } - } else { - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - for (int64_t i3 = 0; i3 < ne3; ++i3) { - const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; - const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); - const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); - const int64_t src_i2 = ggml_wrap_around(i2 - lp2, ne02); - const int64_t src_i3 = ggml_wrap_around(i3 - lp3, ne03); - - const int64_t src_idx = src_i3 * nb03 + src_i2 * nb02 + src_i1 * nb01 + src_i0 * nb00; - - const float * src_ptr = (const float *) ((char *) src0->data + src_idx); - dst_ptr[dst_idx] = *src_ptr; - } - } - } - } - } -} - -void ggml_compute_forward_pad(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_pad_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_pad_reflect_1d - -void ggml_compute_forward_pad_reflect_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - const int32_t * opts = (const int32_t *) dst->op_params; - const int p0 = opts[0]; - const int p1 = opts[1]; - - GGML_TENSOR_UNARY_OP_LOCALS - - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = 0; i2 < ne2; i2++) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - float * left = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + p0 * nb0); - float * right = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + (ne0 - p1 - 1) * nb0); - - ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01)); - - for (int i0 = 1; i0 <= p0; i0++) { - left[-i0] = left[i0]; - } - for (int i0 = 1; i0 <= p1; i0++) { - right[i0] = right[-i0]; - } - } - } - } -} - -// ggml_compute_forward_roll - -static int64_t ggml_wrap_index(int64_t i, int64_t ne) { - if (i < 0) { - return i + ne; - } else if (i >= ne) { - return i - ne; - } - return i; -} - -static void ggml_compute_forward_roll_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src_data = (const float *) src0->data; - float * dst_data = (float *) dst->data; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int s0 = ggml_get_op_params_i32(dst, 0); - const int s1 = ggml_get_op_params_i32(dst, 1); - const int s2 = ggml_get_op_params_i32(dst, 2); - const int s3 = ggml_get_op_params_i32(dst, 3); - - const int64_t total = ne1 * ne2 * ne3; - const int64_t per_thread = (total + params->nth) / params->nth; - const int64_t start = params->ith * per_thread; - const int64_t end = std::min(start + per_thread, total); - - for (int64_t i = start; i < end; ++i) { - const int64_t i1 = i % ne1; - const int64_t i2 = (i / ne1) % ne2; - const int64_t i3 = i / (ne2 * ne1); - float * dst_row = dst_data + (i3 * nb3 + i2 * nb2 + i1 * nb1) / sizeof(float); - - const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); - const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); - const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); - const float * src_row = src_data + (i03 * nb03 + i02 * nb02 + i01 * nb01) / sizeof(float); - - const int64_t s = ggml_wrap_index(-s0, ne00); - const int64_t n = ne00 - s; - ggml_vec_cpy_f32(n, dst_row, src_row + s); - ggml_vec_cpy_f32(s, dst_row + n, src_row); - } -} - -void ggml_compute_forward_roll(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_roll_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_arange - -static void ggml_compute_forward_arange_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const float start = ggml_get_op_params_f32(dst, 0); - const float stop = ggml_get_op_params_f32(dst, 1); - const float step = ggml_get_op_params_f32(dst, 2); - - const int64_t steps = (int64_t) ceilf((stop - start) / step); - - GGML_ASSERT(ggml_nelements(dst) == steps); - - for (int64_t i = ith; i < steps; i += nth) { - float value = start + step * i; - ((float *) dst->data)[i] = value; - } -} - -void ggml_compute_forward_arange(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_arange_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_timestep_embedding_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int dim = ggml_get_op_params_i32(dst, 0); - const int max_period = ggml_get_op_params_i32(dst, 1); - - int half = dim / 2; - - for (int64_t i = 0; i < ne00; i++) { - float * embed_data = (float *) ((char *) dst->data + i * nb1); - for (int64_t j = ith; j < half; j += nth) { - float timestep = ((float *) src0->data)[i]; - float freq = (float) expf(-logf(max_period) * j / half); - float arg = timestep * freq; - embed_data[j] = cosf(arg); - embed_data[j + half] = sinf(arg); - } - if (dim % 2 != 0 && ith == 0) { - embed_data[2 * half] = 0.f; - } - } -} - -void ggml_compute_forward_timestep_embedding(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_timestep_embedding_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_argsort - -template struct cmp_argsort { - const float * data; - - bool operator()(int32_t a, int32_t b) const { - if constexpr (order == GGML_SORT_ORDER_ASC) { - return data[a] < data[b]; - } else { - return data[a] > data[b]; - } - } -}; - -static void ggml_compute_forward_argsort_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nr = ggml_nrows(src0); - - ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0); - - for (int64_t i = ith; i < nr; i += nth) { - const float * src_data = (float *) ((char *) src0->data + i * nb01); - - int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); - - for (int64_t j = 0; j < ne0; j++) { - dst_data[j] = j; - } - - switch (order) { - case GGML_SORT_ORDER_ASC: - std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); - break; - - case GGML_SORT_ORDER_DESC: - std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); - break; - - default: - GGML_ABORT("invalid sort order"); - } - } -} - -void ggml_compute_forward_argsort(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_argsort_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_top_k - -struct cmp_top_k { - const float * data; - - bool operator()(int32_t a, int32_t b) const { return data[a] > data[b]; } -}; - -static void ggml_compute_forward_top_k_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nr = ggml_nrows(src0); - - const int top_k = ne0; - - int32_t * tmp = (int32_t *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - for (int64_t i = ith; i < nr; i += nth) { - const float * src_data = (float *) ((char *) src0->data + i * nb01); - - for (int64_t j = 0; j < ne00; j++) { - tmp[j] = j; - } - - std::partial_sort(tmp, tmp + top_k, tmp + ne00, cmp_top_k{ src_data }); - - int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); - - std::copy(tmp, tmp + top_k, dst_data); - - // emphasize that the order is not important - if (top_k > 1) { - std::swap(dst_data[0], dst_data[1]); - } - } -} - -void ggml_compute_forward_top_k(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_top_k_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_flash_attn_ext - -static void ggml_compute_forward_flash_attn_ext_f16_one_chunk(const ggml_compute_params * params, - ggml_tensor * dst, - int ir0, - int ir1) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - const ggml_tensor * mask = dst->src[3]; - const ggml_tensor * sinks = dst->src[4]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int64_t DK = nek0; - const int64_t DV = nev0; - const int64_t N = neq1; - - GGML_ASSERT(ne0 == DV); - GGML_ASSERT(ne2 == N); - - // input tensor rows must be contiguous - GGML_ASSERT(nbq0 == ggml_type_size(q->type)); - GGML_ASSERT(nbk0 == ggml_type_size(k->type)); - GGML_ASSERT(nbv0 == ggml_type_size(v->type)); - - GGML_ASSERT(neq0 == DK); - GGML_ASSERT(nek0 == DK); - GGML_ASSERT(nev0 == DV); - - GGML_ASSERT(neq1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - // broadcast factors - const int64_t rk2 = neq2 / nek2; - const int64_t rk3 = neq3 / nek3; - - const int64_t rv2 = neq2 / nev2; - const int64_t rv3 = neq3 / nev3; - - // parallelize by q rows using ggml_vec_dot_f32 - - float scale = 1.0f; - float max_bias = 0.0f; - float logit_softcap = 0.0f; - - memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); - memcpy(&logit_softcap, (float *) dst->op_params + 2, sizeof(float)); - - if (logit_softcap != 0) { - scale /= logit_softcap; - } - - const uint32_t n_head = neq2; - const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); - - const float m0 = powf(2.0f, -(max_bias) / n_head_log2); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - - const ggml_type k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; - const ggml_from_float_t q_to_vec_dot = ggml_get_type_traits_cpu(k_vec_dot_type)->from_float; - const ggml_vec_dot_t kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; - const ggml_to_float_t v_to_float = ggml_get_type_traits(v->type)->to_float; - - GGML_ASSERT((q_to_vec_dot) && "fattn: unsupported K-type"); - GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float) && "fattn: unsupported V-type"); - - int ith = params->ith; - - // loop over n_batch and n_head - for (int ir = ir0; ir < ir1; ++ir) { - // q indices - const int iq3 = ir / (neq2 * neq1); - const int iq2 = (ir - iq3 * neq2 * neq1) / neq1; - const int iq1 = (ir - iq3 * neq2 * neq1 - iq2 * neq1); - - const uint32_t h = iq2; // head index - const float slope = - (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; - - float S = 0.0f; // sum - float M = -INFINITY; // maximum KQ value - - float * VKQ32 = - (float *) params->wdata + ith * (1 * DK + 2 * DV + CACHE_LINE_SIZE_F32); // FP32 VKQ accumulator - float * V32 = (VKQ32 + 1 * DV); // (temporary) FP32 V buffer - ggml_fp16_t * VKQ16 = (ggml_fp16_t *) (VKQ32 + 1 * DV); // (temporary) FP16 VKQ accumulator - ggml_fp16_t * Q_q = (ggml_fp16_t *) (VKQ32 + 2 * DV); // (temporary) buffer for Q converted to quantized/FP16 - - if (v->type == GGML_TYPE_F16) { - memset(VKQ16, 0, DV * sizeof(ggml_fp16_t)); - } else { - memset(VKQ32, 0, DV * sizeof(float)); - } - - const ggml_fp16_t * mp = - mask ? (ggml_fp16_t *) ((char *) mask->data + iq1 * mask->nb[1] + (iq2 % mask->ne[2]) * mask->nb[2] + - (iq3 % mask->ne[3]) * mask->nb[3]) : - NULL; - - // k indices - const int ik3 = iq3 / rk3; - const int ik2 = iq2 / rk2; - - // v indices - const int iv3 = iq3 / rv3; - const int iv2 = iq2 / rv2; - - const float * pq = (const float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)); - q_to_vec_dot(pq, Q_q, DK); - - // online softmax / attention - // loop over n_kv and n_head_kv - // ref: https://arxiv.org/pdf/2112.05682.pdf - for (int64_t ic = 0; ic < nek1; ++ic) { - const float mv = mp ? slope * GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; - if (mv == -INFINITY) { - continue; - } - - float s; // KQ value - - const char * k_data = (const char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3); - kq_vec_dot(DK, &s, 0, k_data, 0, Q_q, 0, 1); - - s = s * scale; // scale KQ value - - if (logit_softcap != 0.0f) { - s = logit_softcap * tanhf(s); - } - - s += mv; // apply mask - - const float Mold = M; - - float ms = 1.0f; // upon new higher max val, scale VKQ and KQ sum with this value - float vs = 1.0f; // post-softmax KQ value, expf(s - M) - - const char * v_data = ((const char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)); - - if (v->type == GGML_TYPE_F16) { - if (s > M) { - // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f - M = s; - ms = expf(Mold - M); - - // V = V*expf(Mold - M) - ggml_vec_scale_f16(DV, VKQ16, ms); - } else { - // no new maximum, ms == 1.0f, vs != 1.0f - vs = expf(s - M); - } - - // V += v*expf(s - M) - ggml_vec_mad_f16(DV, VKQ16, (const ggml_fp16_t *) v_data, vs); - } else { - if (s > M) { - // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f - M = s; - ms = expf(Mold - M); - - // V = V*expf(Mold - M) - ggml_vec_scale_f32(DV, VKQ32, ms); - } else { - // no new maximum, ms == 1.0f, vs != 1.0f - vs = expf(s - M); - } - - // V += v*expf(s - M) - if (v_to_float) { - v_to_float(v_data, V32, DV); - ggml_vec_mad_f32(DV, VKQ32, V32, vs); - } else { - // V is F32 - ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); - } - } - - S = S * ms + vs; // scale and increment sum with partial sum - } - - if (v->type == GGML_TYPE_F16) { - for (int64_t d = 0; d < DV; ++d) { - VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); - } - } - - // sinks - if (sinks) { - const float s = ((float *) ((char *) sinks->data))[h]; - - float ms = 1.0f; - float vs = 1.0f; - - if (s > M) { - ms = expf(M - s); - ggml_vec_scale_f32(DV, VKQ32, ms); - } else { - vs = expf(s - M); - } - - S = S * ms + vs; - } - - // V /= S - const float S_inv = S == 0.0f ? 0.0f : 1.0f / S; - ggml_vec_scale_f32(DV, VKQ32, S_inv); - - // dst indices - const int i1 = iq1; - const int i2 = iq2; - const int i3 = iq3; - - // original - //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float)); - - // permute(0, 2, 1, 3) - memcpy((char *) dst->data + (i3 * ne2 * ne1 + i2 + i1 * ne1) * nb1, VKQ32, nb1); - } -} - -static void ggml_compute_forward_flash_attn_ext_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int64_t DK = nek0; - const int64_t DV = nev0; - const int64_t N = neq1; - - GGML_ASSERT(ne0 == DV); - GGML_ASSERT(ne2 == N); - - // input tensor rows must be contiguous - GGML_ASSERT(nbq0 == ggml_type_size(q->type)); - GGML_ASSERT(nbk0 == ggml_type_size(k->type)); - GGML_ASSERT(nbv0 == ggml_type_size(v->type)); - - GGML_ASSERT(neq0 == DK); - GGML_ASSERT(nek0 == DK); - GGML_ASSERT(nev0 == DV); - - GGML_ASSERT(neq1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - // parallelize by q rows using ggml_vec_dot_f32 - - // total rows in q - const int64_t nr = neq1 * neq2 * neq3; - - // rows per thread - const int ith = params->ith; - const int nth = params->nth; - - // disable for NUMA - const bool disable_chunking = ggml_is_numa(); - - // 4x chunks per thread - int nth_scaled = nth * 4; - int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; - int64_t nchunk = (nr + chunk_size - 1) / chunk_size; - - if (nth == 1 || nchunk < nth || disable_chunking) { - nchunk = nth; - } - - if (ith == 0) { - // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - ggml_threadpool_chunk_set(params->threadpool, nth); - } - - ggml_barrier(params->threadpool); - - // The number of elements in each chunk - const int64_t dr = (nr + nchunk - 1) / nchunk; - - // The first chunk comes from our thread_id, the rest will get auto-assigned. - int current_chunk = ith; - - while (current_chunk < nchunk) { - const int64_t ir0 = dr * current_chunk; - const int64_t ir1 = MIN(ir0 + dr, nr); - - ggml_compute_forward_flash_attn_ext_f16_one_chunk(params, dst, ir0, ir1); - - current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); - } -} - -void ggml_compute_forward_flash_attn_ext(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->op_params[3]) { - case GGML_PREC_DEFAULT: - case GGML_PREC_F32: - { - // uses F32 accumulators - ggml_compute_forward_flash_attn_ext_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_flash_attn_back - -static void ggml_compute_forward_flash_attn_back_f32(const ggml_compute_params * params, - const bool masked, - ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - const ggml_tensor * d = dst->src[3]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ned, d, ne) - GGML_TENSOR_LOCALS(size_t, nbd, d, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t D = neq0; - const int64_t N = neq1; - const int64_t P = nek1 - N; - const int64_t M = P + N; - - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); - const int mxDM = MAX(D, Mup); - - // GGML_ASSERT(ne0 == D); - // GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); - - GGML_ASSERT(nbq0 == sizeof(float)); - GGML_ASSERT(nbk0 == sizeof(float)); - GGML_ASSERT(nbv0 == sizeof(float)); - - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); - GGML_ASSERT(ned0 == D); - - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); - GGML_ASSERT(ned1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - if (ith == 0) { - memset(dst->data, 0, nb0 * ne0 * ne1 * ne2 * ne3); - } - ggml_barrier(params->threadpool); - - const int64_t elem_q = ggml_nelements(q); - const int64_t elem_k = ggml_nelements(k); - - ggml_type result_type = dst->type; - GGML_ASSERT(ggml_blck_size(result_type) == 1); - const size_t tsize = ggml_type_size(result_type); - - const size_t offs_q = 0; - const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); - const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); - - void * grad_q = (char *) dst->data; - void * grad_k = (char *) dst->data + offs_k; - void * grad_v = (char *) dst->data + offs_v; - - const size_t nbgq1 = nb0 * neq0; - const size_t nbgq2 = nb0 * neq0 * neq1; - const size_t nbgq3 = nb0 * neq0 * neq1 * neq2; - - const size_t nbgk1 = nb0 * nek0; - const size_t nbgk2 = nb0 * nek0 * nek1; - const size_t nbgk3 = nb0 * nek0 * nek1 * neq2; - - const size_t nbgv1 = nb0 * nev0; - const size_t nbgv2 = nb0 * nev0 * nev1; - const size_t nbgv3 = nb0 * nev0 * nev1 * neq2; - - // parallelize by k rows using ggml_vec_dot_f32 - - // total rows in k - const int nr = nek2 * nek3; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const float scale = 1.0f / sqrtf(D); - - //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); - - // how often k2 (and v2) is repeated in q2 - int nrep = neq2 / nek2; - - for (int ir = ir0; ir < ir1; ++ir) { - // q indices - const int ik3 = ir / (nek2); - const int ik2 = ir - ik3 * nek2; - - const int iq3 = ik3; - const int id3 = ik3; - const int iv3 = ik3; - const int iv2 = ik2; - - for (int irep = 0; irep < nrep; ++irep) { - const int iq2 = ik2 + irep * nek2; - const int id2 = iq2; - - // (ik2 + irep*nek2) % nek2 == ik2 - for (int iq1 = 0; iq1 < neq1; ++iq1) { - const int id1 = iq1; - - // not sure about CACHE_LINE_SIZE_F32.. - // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset? - float * S = - (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 0 * (mxDM + CACHE_LINE_SIZE_F32); - float * SM = - (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 1 * (mxDM + CACHE_LINE_SIZE_F32); - - for (int i = M; i < Mup; ++i) { - S[i] = -INFINITY; - } - - const int64_t masked_begin = masked ? (P + iq1 + 1) : M; - for (int64_t ic = 0; ic < masked_begin; ++ic) { - // k indices - const int ik1 = ic; - - // S indices - const int i1 = ik1; - - ggml_vec_dot_f32(neq0, S + i1, 0, - (float *) ((char *) k->data + (ik1 * nbk1 + ik2 * nbk2 + ik3 * nbk3)), 0, - (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), 0, 1); - } - - // scale - ggml_vec_scale_f32(masked_begin, S, scale); - - for (int64_t i = masked_begin; i < M; i++) { - S[i] = -INFINITY; - } - - // softmax - // exclude known -INF S[..] values from max and loop - // dont forget to set their SM values to zero - { - float max = -INFINITY; - ggml_vec_max_f32(masked_begin, &max, S); - - ggml_float sum = 0.0; - { -#ifdef GGML_SOFT_MAX_ACCELERATE - max = -max; - vDSP_vsadd(SM, 1, &max, SM, 1, Mup); - vvexpf(SM, SM, &Mup); - ggml_vec_sum_f32(Mup, &sum, SM); -#else - sum = ggml_vec_soft_max_f32(Mup, SM, S, max); -#endif - } - - assert(sum > 0.0); - - sum = 1.0 / sum; - ggml_vec_scale_f32(masked_begin, SM, sum); - } - - // step-by-step explanation - { - // forward-process shape grads from backward process - // parallel_for ik2,ik3: - // for irep: - // iq2 = ik2 + irep*nek2 - // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur] - // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur] - // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur] - // for iq1: - // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur - // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur - // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4 - // S0 = -Inf [D,1,1,1] - // ~S1[i] = dot(kcur[:D,i], qcur) - // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale - // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P) - // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur - // ~S5[i] = dot(vcur[:,i], S4) - // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3] - // ~dst[i,iq1,iq2,iq3] = S5[i] ^ - // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3] - // dst backward-/ grad[dst] = d - // - // output gradients with their dependencies: - // - // grad[kcur] = grad[S1].T @ qcur - // grad[S1] = diag_mask_zero(grad[S3], P) * scale - // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // grad[S4] = grad[S5] @ vcur - // grad[S4] = d[:D,id1,id2,id3] @ vcur - // grad[qcur] = grad[S1] @ kcur - // grad[vcur] = grad[S5].T @ S4 - // grad[vcur] = d[:D,id1,id2,id3].T @ S4 - // - // in post-order: - // - // S1 = qcur @ kcur.T - // S2 = S1 * scale - // S3 = diag_mask_inf(S2, P) - // S4 = softmax(S3) - // grad[S4] = d[:D,id1,id2,id3] @ vcur - // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // grad[S1] = diag_mask_zero(grad[S3], P) * scale - // grad[qcur] = grad[S1] @ kcur - // grad[kcur] = grad[S1].T @ qcur - // grad[vcur] = d[:D,id1,id2,id3].T @ S4 - // - // using less variables (SM=S4): - // - // S = diag_mask_inf(qcur @ kcur.T * scale, P) - // SM = softmax(S) - // S = d[:D,iq1,iq2,iq3] @ vcur - // dot_SM_gradSM = dot(SM, S) - // S = SM * (S - dot(SM, S)) - // S = diag_mask_zero(S, P) * scale - // - // grad[q][:D,iq1,iq2,iq3] += S @ kcur - // grad[k][:D,:M,ik2,ik3] += S.T @ qcur - // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM - } - - // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] - // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] - // for ic: - // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3] - // exclude known future zero S[..] values from operation - ggml_vec_set_f32(masked_begin, S, 0); - for (int64_t ic = 0; ic < D; ++ic) { - ggml_vec_mad_f32( - masked_begin, S, (float *) ((char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)), - *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); - } - - // S = SM * (S - dot(SM, S)) - float dot_SM_gradSM = 0; - ggml_vec_dot_f32(masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1); - ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); - ggml_vec_mul_f32(masked_begin, S, S, SM); - - // S = diag_mask_zero(S, P) * scale - // already done by above ggml_vec_set_f32 - - // exclude known zero S[..] values from operation - ggml_vec_scale_f32(masked_begin, S, scale); - - // S shape [M,1] - // SM shape [M,1] - // kcur shape [D,M] - // qcur shape [D,1] - // vcur shape [M,D] - - // grad[q][:D,iq1,iq2,iq3] += S @ kcur - // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M] - // for ic: - // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3] - // exclude known zero S[..] values from loop - for (int64_t ic = 0; ic < masked_begin; ++ic) { - ggml_vec_mad_f32(D, (float *) ((char *) grad_q + (iq1 * nbgq1 + iq2 * nbgq2 + iq3 * nbgq3)), - (float *) ((char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3)), S[ic]); - } - - // grad[k][:D,:M,iq2,iq3] += S.T @ qcur - // for ic: - // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0] - // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0] - // exclude known zero S[..] values from loop - for (int64_t ic = 0; ic < masked_begin; ++ic) { - ggml_vec_mad_f32(D, (float *) ((char *) grad_k + (ic * nbgk1 + ik2 * nbgk2 + ik3 * nbgk3)), - (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), S[ic]); - } - - // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM - // for ic: - // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M] - // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M] - // exclude known zero SM[..] values from mad - for (int64_t ic = 0; ic < D; ++ic) { - ggml_vec_mad_f32( - masked_begin, (float *) ((char *) grad_v + (ic * nbgv1 + iv2 * nbgv2 + iv3 * nbgv3)), SM, - *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); - } - } - } - } -} - -void ggml_compute_forward_flash_attn_back(const ggml_compute_params * params, const bool masked, ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - - switch (q->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_flash_attn_back_f32(params, masked, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_ssm_conv - -static void ggml_compute_forward_ssm_conv_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // conv_x - const ggml_tensor * src1 = dst->src[1]; // conv1d.weight - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; // d_conv - const int ncs = src0->ne[0]; // d_conv - 1 + n_t - const int nr = src0->ne[1]; // d_inner - const int n_t = dst->ne[1]; // tokens per sequence - const int n_s = dst->ne[2]; // number of sequences in the batch - - GGML_ASSERT(dst->ne[0] == nr); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - const int ir = ir1 - ir0; - - for (int i3 = 0; i3 < n_s; ++i3) { - for (int i2 = 0; i2 < n_t; ++i2) { - // {d_conv - 1 + n_t, d_inner, n_seqs} - // sliding window - const float * s = (const float *) ((const char *) src0->data + ir0 * (src0->nb[1]) + i2 * (src0->nb[0]) + - i3 * (src0->nb[2])); // {d_conv, d_inner, n_s} - const float * c = (const float *) ((const char *) src1->data + ir0 * (src1->nb[1])); // {d_conv, d_inner} - float * x = (float *) ((char *) dst->data + ir0 * (dst->nb[0]) + i2 * (dst->nb[1]) + - i3 * (dst->nb[2])); // {d_inner, n_t, n_s} - - // TODO: transpose the output for smaller strides for big batches? - // d_inner - for (int i1 = 0; i1 < ir; ++i1) { - // rowwise dot product - // NOTE: not using ggml_vec_dot_f32, because its sum is in double precision - float sumf = 0.0f; - - // d_conv - for (int i0 = 0; i0 < nc; ++i0) { - sumf += s[i0 + i1 * ncs] * c[i0 + i1 * nc]; - } - x[i1] = sumf; - } - } - } -} - -void ggml_compute_forward_ssm_conv(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->src[0]->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_ssm_conv_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_ssm_scan - -static void ggml_compute_forward_ssm_scan_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // s {d_state, dim, n_head, n_seqs+} - const ggml_tensor * src1 = dst->src[1]; // x {dim, n_head, n_seq_tokens, n_seqs} - const ggml_tensor * src2 = dst->src[2]; // dt {n_head, n_seq_tokens, n_seqs} - const ggml_tensor * src3 = dst->src[3]; // A {d_state, n_head} or {1, n_head} - const ggml_tensor * src4 = dst->src[4]; // B {d_state, n_group, n_seq_tokens, n_seqs} - const ggml_tensor * src5 = dst->src[5]; // C {d_state, n_group, n_seq_tokens, n_seqs} - const ggml_tensor * src6 = dst->src[6]; // ids {n_seqs} - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nc = src0->ne[0]; // d_state - const int64_t nr = src0->ne[1]; // dim - const int64_t nh = src1->ne[1]; // n_head - const int64_t ng = src4->ne[1]; - const int64_t nt = src1->ne[2]; // number of tokens per sequence - const int64_t ns = src1->ne[3]; // number of sequences in the batch - - // can't use ggml_nbytes because src1 is not necessarily contiguous - const int64_t s_off = ggml_nelements(src1) * ggml_element_size(src1); - - GGML_ASSERT(ggml_nelements(src1) + nc * nr * nh * ns == ggml_nelements(dst)); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - GGML_ASSERT(src2->nb[0] == sizeof(float)); - GGML_ASSERT(src3->nb[0] == sizeof(float)); - GGML_ASSERT(src4->nb[0] == sizeof(float)); - GGML_ASSERT(src5->nb[0] == sizeof(float)); - GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); - GGML_ASSERT(nh % ng == 0); - - // heads per thread - const int dh = (nh + nth - 1) / nth; - - // head range for this thread - const int ih0 = dh * ith; - const int ih1 = MIN(ih0 + dh, nh); - - const int32_t * ids = (const int32_t *) src6->data; - - for (int i3 = 0; i3 < ns; ++i3) { - const float * s0 = - (const float *) ((const char *) src0->data + ids[i3] * (src0->nb[3])); // {d_state, dim, nh, ns} - float * s = (float *) ((char *) dst->data + i3 * (src0->nb[3]) + s_off); // {d_state, dim, nh, ns} - - for (int i2 = 0; i2 < nt; ++i2) { - const float * x = (const float *) ((const char *) src1->data + i2 * (src1->nb[2]) + - i3 * (src1->nb[3])); // {dim, nh, nt, ns} - const float * dt = - (const float *) ((const char *) src2->data + i2 * (src2->nb[1]) + i3 * (src2->nb[2])); // {nh, nt, ns} - const float * A = (const float *) ((const char *) src3->data); // {d_state, nh} or {1, nh} - const float * B = (const float *) ((const char *) src4->data + i2 * (src4->nb[2]) + - i3 * (src4->nb[3])); // {d_state, ng, nt, ns} - const float * C = (const float *) ((const char *) src5->data + i2 * (src5->nb[2]) + - i3 * (src5->nb[3])); // {d_state, ng, nt, ns} - float * y = (float *) ((char *) dst->data + i2 * (nh * nr * sizeof(float)) + - i3 * (nt * nh * nr * sizeof(float))); // {dim, nh, nt, ns} - - if (src3->ne[0] == 1) { - // Mamba-2 has a scalar decay factor per head; dA can be outside the state-wise loop - - // n_head - for (int h = ih0; h < ih1; ++h) { - // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 - const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); - const float dA = expf(dt_soft_plus * A[h]); - const int g = h / (nh / ng); // repeat_interleave - - // dim - for (int i1 = 0; i1 < nr; ++i1) { - const int ii = i1 + h * nr; - const float x_dt = x[ii] * dt_soft_plus; - float sumf = 0.0f; -#if defined(GGML_SIMD) -# if defined(__ARM_FEATURE_SVE) - const int ggml_f32_epr = svcntw(); - const int ggml_f32_step = 1 * ggml_f32_epr; - - const int np = (nc & ~(ggml_f32_step - 1)); - - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - - GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); - GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); - - for (int i = 0; i < np; i += ggml_f32_step) { - // TODO: maybe unroll more? - for (int j = 0; j < 1; j++) { - GGML_F32_VEC t0 = GGML_F32_VEC_LOAD(s0 + i + j * ggml_f32_epr + ii * nc); - GGML_F32_VEC t1 = GGML_F32_VEC_LOAD(B + i + j * ggml_f32_epr + g * nc); - GGML_F32_VEC t2 = GGML_F32_VEC_LOAD(C + i + j * ggml_f32_epr + g * nc); - - t0 = GGML_F32_VEC_MUL(t0, adA); - t1 = GGML_F32_VEC_MUL(t1, axdt); - - t0 = GGML_F32_VEC_ADD(t0, t1); - - sum = GGML_F32_VEC_FMA(sum, t0, t2); - - GGML_F32_VEC_STORE(s + i + j * ggml_f32_epr + ii * nc, t0); - } - } - - sumf = GGML_F32xt_REDUCE_ONE(sum); -# elif defined(__riscv_v_intrinsic) - // todo: RVV implementation - const int np = 0; -# else - const int np = (nc & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - - GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); - GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); - - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - GGML_F32_VEC az[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(s0 + i + j * GGML_F32_EPR + ii * nc); - ay[j] = GGML_F32_VEC_LOAD(B + i + j * GGML_F32_EPR + g * nc); - az[j] = GGML_F32_VEC_LOAD(C + i + j * GGML_F32_EPR + g * nc); - - ax[j] = GGML_F32_VEC_MUL(ax[j], adA); - ay[j] = GGML_F32_VEC_MUL(ay[j], axdt); - - ax[j] = GGML_F32_VEC_ADD(ax[j], ay[j]); - - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], az[j]); - - GGML_F32_VEC_STORE(s + i + j * GGML_F32_EPR + ii * nc, ax[j]); - } - } - - // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); -# endif -#else - const int np = 0; -#endif - // d_state - for (int i0 = np; i0 < nc; ++i0) { - const int i = i0 + ii * nc; - const int ig = i0 + g * nc; - // state = prev_state * dA + dB * x - const float state = (s0[i] * dA) + (B[ig] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[ig]; - s[i] = state; - } - y[ii] = sumf; - } - } - } else { - // Mamba-1 has an element-wise decay factor for the states - - // n_head - for (int h = ih0; h < ih1; ++h) { - // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 - const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); - const int g = h / (nh / ng); // repeat_interleave - - // dim - for (int i1 = 0; i1 < nr; ++i1) { - const int ii = i1 + h * nr; - const float x_dt = x[ii] * dt_soft_plus; -#if defined(__ARM_FEATURE_SVE) - svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); - svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); - svfloat32_t r1_vector = GGML_F32_VEC_ZERO; - - // d_state - // TODO: what happens when (d_state % svcntw()) != 0? - for (int64_t k = 0; k < nc; k += svcntw()) { - svfloat32_t vA = GGML_F32_VEC_LOAD(&A[h * nc + k]); - svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k + g * nc]); - svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k + g * nc]); - svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[ii * nc + k]); - - svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); - t1 = exp_ps_sve(svptrue_b32(), t1); - svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); - - vs0 = GGML_F32_VEC_FMA(t2, vs0, t1); - r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); - - GGML_F32_VEC_STORE(&s[ii * nc + k], vs0); - } - y[ii] = GGML_F32xt_REDUCE_ONE(r1_vector); -#else - float sumf = 0.0f; - // NOTE: can't really use GGML_SIMD here because d_state is usually 16 - // and also because expf is used within the loop. - // d_state - for (int i0 = 0; i0 < nc; ++i0) { - const int i = i0 + ii * nc; - const int ig = i0 + g * nc; - // state = prev_state * dA + dB * x - const float state = (s0[i] * expf(dt_soft_plus * A[i0 + h * nc])) + (B[ig] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[ig]; - s[i] = state; - } - y[ii] = sumf; -#endif - } - } - } - // use the output as the source when it's not the first token-wise iteration - s0 = s; - } - } -} - -void ggml_compute_forward_ssm_scan(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->src[0]->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_ssm_scan_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_win_part - -static void ggml_compute_forward_win_part_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - - const int32_t nep0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t nep1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t w = ((const int32_t *) (dst->op_params))[2]; - - assert(ne00 == ne0); - assert(ne3 == nep0 * nep1); - - // TODO: optimize / multi-thread - for (int py = 0; py < nep1; ++py) { - for (int px = 0; px < nep0; ++px) { - const int64_t i3 = py * nep0 + px; - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - const int64_t i02 = py * w + i2; - const int64_t i01 = px * w + i1; - const int64_t i00 = i0; - - const int64_t i = i3 * ne2 * ne1 * ne0 + i2 * ne1 * ne0 + i1 * ne0 + i0; - const int64_t j = i02 * ne01 * ne00 + i01 * ne00 + i00; - - if (py * w + i2 >= ne02 || px * w + i1 >= ne01) { - ((float *) dst->data)[i] = 0.0f; - } else { - ((float *) dst->data)[i] = ((float *) src0->data)[j]; - } - } - } - } - } - } -} - -void ggml_compute_forward_win_part(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_win_part_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_win_unpart - -static void ggml_compute_forward_win_unpart_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - - const int32_t w = ((const int32_t *) (dst->op_params))[0]; - - // padding - const int px = (w - ne1 % w) % w; - //const int py = (w - ne2%w)%w; - - const int npx = (px + ne1) / w; - //const int npy = (py + ne2)/w; - - assert(ne0 == ne00); - - // TODO: optimize / multi-thread - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - const int ip2 = i2 / w; - const int ip1 = i1 / w; - - const int64_t i02 = i2 % w; - const int64_t i01 = i1 % w; - const int64_t i00 = i0; - - const int64_t i = (ip2 * npx + ip1) * ne02 * ne01 * ne00 + i02 * ne01 * ne00 + i01 * ne00 + i00; - const int64_t j = i2 * ne1 * ne0 + i1 * ne0 + i0; - - ((float *) dst->data)[j] = ((float *) src0->data)[i]; - } - } - } -} - -void ggml_compute_forward_win_unpart(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_win_unpart_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -//gmml_compute_forward_unary - -void ggml_compute_forward_unary(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_unary_op op = ggml_get_unary_op(dst); - - switch (op) { - case GGML_UNARY_OP_ABS: - { - ggml_compute_forward_abs(params, dst); - } - break; - case GGML_UNARY_OP_SGN: - { - ggml_compute_forward_sgn(params, dst); - } - break; - case GGML_UNARY_OP_NEG: - { - ggml_compute_forward_neg(params, dst); - } - break; - case GGML_UNARY_OP_STEP: - { - ggml_compute_forward_step(params, dst); - } - break; - case GGML_UNARY_OP_TANH: - { - ggml_compute_forward_tanh(params, dst); - } - break; - case GGML_UNARY_OP_ELU: - { - ggml_compute_forward_elu(params, dst); - } - break; - case GGML_UNARY_OP_RELU: - { - ggml_compute_forward_relu(params, dst); - } - break; - case GGML_UNARY_OP_SIGMOID: - { - ggml_compute_forward_sigmoid(params, dst); - } - break; - case GGML_UNARY_OP_GELU: - { - ggml_compute_forward_gelu(params, dst); - } - break; - case GGML_UNARY_OP_GELU_ERF: - { - ggml_compute_forward_gelu_erf(params, dst); - } - break; - case GGML_UNARY_OP_GELU_QUICK: - { - ggml_compute_forward_gelu_quick(params, dst); - } - break; - case GGML_UNARY_OP_SILU: - { - ggml_compute_forward_silu(params, dst); - } - break; - case GGML_UNARY_OP_HARDSWISH: - { - ggml_compute_forward_hardswish(params, dst); - } - break; - case GGML_UNARY_OP_HARDSIGMOID: - { - ggml_compute_forward_hardsigmoid(params, dst); - } - break; - case GGML_UNARY_OP_EXP: - { - ggml_compute_forward_exp(params, dst); - } - break; - case GGML_UNARY_OP_FLOOR: - { - ggml_compute_forward_floor(params, dst); - } - break; - case GGML_UNARY_OP_CEIL: - { - ggml_compute_forward_ceil(params, dst); - } - break; - case GGML_UNARY_OP_ROUND: - { - ggml_compute_forward_round(params, dst); - } - break; - case GGML_UNARY_OP_TRUNC: - { - ggml_compute_forward_trunc(params, dst); - } - break; - case GGML_UNARY_OP_XIELU: - { - ggml_compute_forward_xielu(params, dst); - } - break; - case GGML_UNARY_OP_EXPM1: - { - ggml_compute_forward_expm1(params, dst); - } - break; - case GGML_UNARY_OP_SOFTPLUS: - { - ggml_compute_forward_softplus(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -//ggml_compute_forward_glu - -void ggml_compute_forward_glu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_glu_op op = ggml_get_glu_op(dst); - - switch (op) { - case GGML_GLU_OP_REGLU: - { - ggml_compute_forward_reglu(params, dst); - } - break; - case GGML_GLU_OP_GEGLU: - { - ggml_compute_forward_geglu(params, dst); - } - break; - case GGML_GLU_OP_SWIGLU: - { - ggml_compute_forward_swiglu(params, dst); - } - break; - case GGML_GLU_OP_SWIGLU_OAI: - { - ggml_compute_forward_swiglu_oai(params, dst); - } - break; - case GGML_GLU_OP_GEGLU_ERF: - { - ggml_compute_forward_geglu_erf(params, dst); - } - break; - case GGML_GLU_OP_GEGLU_QUICK: - { - ggml_compute_forward_geglu_quick(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_get_rel_pos - -static void ggml_compute_forward_get_rel_pos_f16(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322 - - GGML_TENSOR_UNARY_OP_LOCALS - - const int64_t w = ne1; - - ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data; - ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data; - - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - const int64_t pos = (w - i1 - 1) + i2; - for (int64_t i0 = 0; i0 < ne0; ++i0) { - dst_data[i2 * ne1 * ne0 + i1 * ne0 + i0] = src0_data[pos * ne00 + i0]; - } - } - } -} - -void ggml_compute_forward_get_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - { - ggml_compute_forward_get_rel_pos_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add_rel_pos - -static void ggml_compute_forward_add_rel_pos_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; - if (!inplace) { - if (params->ith == 0) { - memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359 - - float * src1_data = (float *) src1->data; - float * src2_data = (float *) src2->data; - float * dst_data = (float *) dst->data; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - const int64_t ne12 = src1->ne[2]; - const int64_t ne13 = src1->ne[3]; - - const int ith = params->ith; - const int nth = params->nth; - - // total patches in dst - const int np = ne13; - - // patches per thread - const int dp = (np + nth - 1) / nth; - - // patch range for this thread - const int ip0 = dp * ith; - const int ip1 = MIN(ip0 + dp, np); - - for (int64_t i13 = ip0; i13 < ip1; ++i13) { - for (int64_t i12 = 0; i12 < ne12; ++i12) { - for (int64_t i11 = 0; i11 < ne11; ++i11) { - const int64_t jp1 = i13 * ne12 * ne11 * ne10 + i12 * ne11 * ne10 + i11 * ne10; - for (int64_t i10 = 0; i10 < ne10; ++i10) { - const int64_t jp0 = jp1 + i10; - const float src1_e = src1_data[jp0]; - const float src2_e = src2_data[jp0]; - - const int64_t jdh = jp0 * ne10; - const int64_t jdw = jdh - (ne10 - 1) * i10; - - for (int64_t j = 0; j < ne10; ++j) { - dst_data[jdh + j] += src2_e; - dst_data[jdw + j * ne10] += src1_e; - } - } - } - } - } -} - -void ggml_compute_forward_add_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add_rel_pos_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rwkv_wkv6 - -static void ggml_compute_forward_rwkv_wkv6_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[5]->ne[1]; - const int64_t head_size = C / HEADS; - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * k = (float *) dst->src[0]->data; - float * v = (float *) dst->src[1]->data; - float * r = (float *) dst->src[2]->data; - float * time_faaaa = (float *) dst->src[3]->data; - float * time_decay = (float *) dst->src[4]->data; - - size_t t_stride = HEADS * head_size; // Same to C - - size_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - size_t h_stride_2d = head_size * head_size; - - if (ith == 0) { - memset(dst_data, 0, T * C * sizeof(float)); - } - ggml_barrier(params->threadpool); - -#if defined(__AVX__) && !defined(__AVX512F__) -# define GGML_F32X GGML_F32x8 -# define GGML_F32X_SET1 GGML_F32x8_SET1 -# define GGML_F32X_LOAD GGML_F32x8_LOAD -# define GGML_F32X_STORE GGML_F32x8_STORE -# define GGML_F32X_MUL GGML_F32x8_MUL -# define GGML_F32X_FMA GGML_F32x8_FMA -# define WKV_VECTOR_SIZE 8 -#elif defined(__AVX512F__) -# define GGML_F32X GGML_F32x16 -# define GGML_F32X_SET1 GGML_F32x16_SET1 -# define GGML_F32X_LOAD GGML_F32x16_LOAD -# define GGML_F32X_STORE GGML_F32x16_STORE -# define GGML_F32X_MUL GGML_F32x16_MUL -# define GGML_F32X_FMA GGML_F32x16_FMA -# define WKV_VECTOR_SIZE 16 -#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) -# define GGML_F32X GGML_F32xt -# define GGML_F32X_SET1 GGML_F32xt_SET1 -# define GGML_F32X_LOAD GGML_F32xt_LOAD -# define GGML_F32X_STORE GGML_F32xt_STORE -# define GGML_F32X_MUL GGML_F32xt_MUL -# define GGML_F32X_FMA GGML_F32xt_FMA -# define WKV_VECTOR_SIZE 8 -#elif defined(__ARM_NEON) && defined(__aarch64__) -# define GGML_F32X GGML_F32x4 -# define GGML_F32X_SET1 GGML_F32x4_SET1 -# define GGML_F32X_LOAD GGML_F32x4_LOAD -# define GGML_F32X_STORE GGML_F32x4_STORE -# define GGML_F32X_MUL GGML_F32x4_MUL -# define GGML_F32X_FMA GGML_F32x4_FMA -# define WKV_VECTOR_SIZE 4 -#endif - -#ifdef WKV_VECTOR_SIZE - int wkv_vector_size; -# if defined(__ARM_FEATURE_SVE) - wkv_vector_size = svcntw(); -# else - wkv_vector_size = WKV_VECTOR_SIZE; -# endif - const int64_t vec_count = head_size / wkv_vector_size; - - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_i_offset = h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float r_val = r[t_h_i_offset]; - float time_faaaa_val = time_faaaa[h_i_offset]; - float time_decay_val = time_decay[t_h_i_offset]; - - // Broadcast scalar values to vectors - GGML_F32X k_vec = GGML_F32X_SET1(k_val); - GGML_F32X r_vec = GGML_F32X_SET1(r_val); - GGML_F32X time_faaaa_vec = GGML_F32X_SET1(time_faaaa_val); - GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); - - for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * wkv_vector_size; - size_t t_h_j_offset = t_h_offset + base_j; - size_t h_2d_i_j_offset = h_2d_i_offset + base_j; - - // Load x elements at once - GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); - GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); - GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); - - // Compute kv = v * k - GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); - - // Compute temp = kv * time_faaaa + prev_state - GGML_F32X temp_vec = GGML_F32X_FMA(prev_state_vec, kv_vec, time_faaaa_vec); - - // Update dst: dst += temp * r - dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, r_vec); - GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); - - // Update state: state = prev_state * time_decay + kv - GGML_F32X new_state_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, time_decay_vec); - GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], new_state_vec); - } - - // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val * time_faaaa_val + prev_state_val; - dst_data[t_h_j_offset] += temp_val * r_val; - state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; - } - } - } - } - -#else - // basically fused operations: - // dst = r @ (time_faaaa * (k @ v) + state), - // state = time_decay * state + (k @ v), - // recursive through each token - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_i_offset = h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float r_val = r[t_h_i_offset]; - float time_faaaa_val = time_faaaa[h_i_offset]; - // RWKV v6: different time_decay for each token. - float time_decay_val = time_decay[t_h_i_offset]; - - for (int64_t j = 0; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val * time_faaaa_val + prev_state_val; - dst_data[t_h_j_offset] += temp_val * r_val; - state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; - } - } - } - } -#endif -} - -void ggml_compute_forward_rwkv_wkv6(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rwkv_wkv6_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gla - -static void ggml_compute_forward_gla_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[4]->ne[1]; - const int64_t head_size = C / HEADS; - const float scale = ggml_get_op_params_f32(dst, 0); - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * k = (float *) dst->src[0]->data; - float * v = (float *) dst->src[1]->data; - float * q = (float *) dst->src[2]->data; - float * g = (float *) dst->src[3]->data; - - size_t t_stride = HEADS * head_size; // Same to C - - size_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - size_t h_stride_2d = head_size * head_size; - - if (ith == 0) { - memset(dst_data, 0, T * C * sizeof(float)); - } - ggml_barrier(params->threadpool); - -#if defined(__AVX__) && !defined(__AVX512F__) -# define GGML_F32X GGML_F32x8 -# define GGML_F32X_SET1 GGML_F32x8_SET1 -# define GGML_F32X_LOAD GGML_F32x8_LOAD -# define GGML_F32X_STORE GGML_F32x8_STORE -# define GGML_F32X_MUL GGML_F32x8_MUL -# define GGML_F32X_FMA GGML_F32x8_FMA -# define GLA_VECTOR_SIZE 8 -#elif defined(__AVX512F__) -# define GGML_F32X GGML_F32x16 -# define GGML_F32X_SET1 GGML_F32x16_SET1 -# define GGML_F32X_LOAD GGML_F32x16_LOAD -# define GGML_F32X_STORE GGML_F32x16_STORE -# define GGML_F32X_MUL GGML_F32x16_MUL -# define GGML_F32X_FMA GGML_F32x16_FMA -# define GLA_VECTOR_SIZE 16 -#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) -# define GGML_F32X GGML_F32xt -# define GGML_F32X_SET1 GGML_F32xt_SET1 -# define GGML_F32X_LOAD GGML_F32xt_LOAD -# define GGML_F32X_STORE GGML_F32xt_STORE -# define GGML_F32X_MUL GGML_F32xt_MUL -# define GGML_F32X_FMA GGML_F32xt_FMA -# define GLA_VECTOR_SIZE 8 -#elif defined(__ARM_NEON) && defined(__aarch64__) -# define GGML_F32X GGML_F32x4 -# define GGML_F32X_SET1 GGML_F32x4_SET1 -# define GGML_F32X_LOAD GGML_F32x4_LOAD -# define GGML_F32X_STORE GGML_F32x4_STORE -# define GGML_F32X_MUL GGML_F32x4_MUL -# define GGML_F32X_FMA GGML_F32x4_FMA -# define GLA_VECTOR_SIZE 4 -#endif - -#ifdef GLA_VECTOR_SIZE - int gla_vector_size; -# if defined(__ARM_FEATURE_SVE) - gla_vector_size = svcntw(); -# else - gla_vector_size = GLA_VECTOR_SIZE; -# endif - const int64_t vec_count = head_size / gla_vector_size; - - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float q_val = q[t_h_i_offset] * scale; - float g_val = g[t_h_i_offset]; - - // Broadcast scalar values to vectors - GGML_F32X k_vec = GGML_F32X_SET1(k_val); - GGML_F32X q_vec = GGML_F32X_SET1(q_val); - GGML_F32X g_vec = GGML_F32X_SET1(g_val); - - for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * gla_vector_size; - size_t t_h_j_offset = t_h_offset + base_j; - size_t h_2d_i_j_offset = h_2d_i_offset + base_j; - - // Load x elements at once - GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); - GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); - GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); - - // Compute kv = v * k - GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); - - // Compute temp = prev_state * g + kv - GGML_F32X temp_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, g_vec); - - // Update dst: dst += temp * q - dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, q_vec); - GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); - - // Update state - GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], temp_vec); - } - - // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val + prev_state_val * g_val; - dst_data[t_h_j_offset] += temp_val * q_val; - state_cur[h_2d_i_j_offset] = temp_val; - } - } - } - } - -#else - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float q_val = q[t_h_i_offset] * scale; - float g_val = g[t_h_i_offset]; - - for (int64_t j = 0; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = prev_state_val * g_val + kv_val; - dst_data[t_h_j_offset] += temp_val * q_val; - state_cur[h_2d_i_j_offset] = temp_val; - } - } - } - } -#endif -} - -void ggml_compute_forward_gla(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gla_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_solve_tri_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; // A (lower triangular) - const struct ggml_tensor * src1 = dst->src[1]; // B (RHS) - - GGML_TENSOR_BINARY_OP_LOCALS; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_ASSERT(ne00 == ne01); // A must be square - GGML_ASSERT(ne0 == ne10); // solution cols == B cols - GGML_ASSERT(ne1 == ne11); // solution rows == B rows - - GGML_ASSERT(ne02 == ne12 && ne12 == ne2); - GGML_ASSERT(ne03 == ne13 && ne13 == ne3); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t k = ne10; // number of RHS columns - const int64_t n = ne11; // A is n×n - const int64_t nr = - ne02 * ne03 * k; // we're parallelizing on columns here, so seq x token x column will be the unit - - // chunks per thread - const int64_t dr = (nr + nth - 1) / nth; - - // chunk range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - const float * A = (const float *) src0->data; // [n, n, B1, B2] - const float * B = (const float *) src1->data; // [n, k, B1, B2] - float * X = (float *) dst->data; // [n, k, B1, B2] - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * k); - const int64_t i02 = (ir - i03 * ne02 * k) / k; - const int64_t i01 = (ir - i03 * ne02 * k - i02 * k); - - const float * A_batch = A + i02 * nb02 / sizeof(float) + i03 * nb03 / sizeof(float); - const float * B_batch = B + i02 * nb12 / sizeof(float) + i03 * nb13 / sizeof(float); - - float * X_batch = X + i02 * nb2 / sizeof(float) + i03 * nb3 / sizeof(float); - - for (int64_t i00 = 0; i00 < n; ++i00) { - float sum = 0.0f; - for (int64_t t = 0; t < i00; ++t) { - sum += A_batch[i00 * n + t] * X_batch[t * k + i01]; - } - - const float diag = A_batch[i00 * n + i00]; - GGML_ASSERT(diag != 0.0f && "Zero diagonal in triangular matrix"); - X_batch[i00 * k + i01] = (B_batch[i00 * k + i01] - sum) / diag; - } - } -} - -void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { - ggml_compute_forward_solve_tri_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } -} - -// ggml_compute_forward_rwkv_wkv7 - -static void ggml_compute_forward_rwkv_wkv7_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[6]->ne[1]; - const int64_t head_size = C / HEADS; - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * r = (float *) dst->src[0]->data; - float * w = (float *) dst->src[1]->data; - float * k = (float *) dst->src[2]->data; - float * v = (float *) dst->src[3]->data; - float * a = (float *) dst->src[4]->data; - float * b = (float *) dst->src[5]->data; - - int64_t t_stride = HEADS * head_size; // Same to C - - int64_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - int64_t h_stride_2d = head_size * head_size; - -#if defined(GGML_SIMD) -# if defined(__ARM_FEATURE_SVE) || defined(__riscv_v_intrinsic) - // scalar Route to scalar implementation //TODO: Write SVE code and RVV code - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - int64_t t_h_i_offset = t_h_offset + i; - int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float v_val = v[t_h_i_offset]; - - float sa = 0, result = 0; - for (int64_t j = 0; j < head_size; j++) { - sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; - } - - for (int64_t j = 0; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - result += state_cur[h_2d_i_j_offset] * r_val; - } - dst_data[t_h_i_offset] = result; - } - } - } -# else - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t ii = 0; ii < head_size; ii++) { - int64_t t_h_i_offset = t_h_offset + ii; - int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; - - GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); - - float sa = 0; - { - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); - ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); - sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); - } - } - GGML_F32_VEC_REDUCE(sa, sum); - } - - GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); - - int64_t j = 0; - GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - for (; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; - int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; - - GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); - GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); - GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); - GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); - - k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); - - GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); - // kv + s * decay + sa * b - state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); - state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); - GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); - - result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); - } - } - GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); - - // There shouldn't be left-overs though. - for (; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v[t_h_i_offset] * k_val; - - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; - } - } - } - } -# endif -#else - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - int64_t t_h_i_offset = t_h_offset + i; - int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float v_val = v[t_h_i_offset]; - - float sa = 0, result = 0; - for (int64_t j = 0; j < head_size; j++) { - sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; - } - - for (int64_t j = 0; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - result += state_cur[h_2d_i_j_offset] * r_val; - } - dst_data[t_h_i_offset] = result; - } - } - } -#endif -} - -void ggml_compute_forward_rwkv_wkv7(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rwkv_wkv7_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_map_custom1 - -void ggml_compute_forward_map_custom1(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - - struct ggml_map_custom1_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_map_custom2 - -void ggml_compute_forward_map_custom2(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - const ggml_tensor * b = dst->src[1]; - - struct ggml_map_custom2_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, b, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_map_custom3 - -void ggml_compute_forward_map_custom3(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - const ggml_tensor * b = dst->src[1]; - const ggml_tensor * c = dst->src[2]; - - struct ggml_map_custom3_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, b, c, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_custom - -void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - struct ggml_custom_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_cross_entropy_loss - -static void ggml_compute_forward_cross_entropy_loss_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); - GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); - GGML_ASSERT(ggml_are_same_shape(src0, src1)); - GGML_ASSERT(ggml_is_scalar(dst)); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - // TODO: handle transposed/permuted matrices - const int64_t nc = src0->ne[0]; - const int64_t nr = ggml_nrows(src0); - - const int ith = params->ith; - const int nth = params->nth; - - float * sums = (float *) params->wdata; - float * st = ((float *) params->wdata) + nth + ith * nc; - float sum_thread = 0.0f; - - GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - for (int64_t i1 = ir0; i1 < ir1; ++i1) { - const float * s0 = (const float *) ((const char *) src0->data + i1 * src0->nb[1]); - const float * s1 = (const float *) ((const char *) src1->data + i1 * src1->nb[1]); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(s0[i])); - assert(!isnan(s1[i])); - } -#endif - - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - const ggml_float sum_softmax = ggml_vec_log_soft_max_f32(nc, st, s0, max); - assert(sum_softmax >= 0.0); - - ggml_vec_add1_f32(nc, st, st, -sum_softmax); - ggml_vec_mul_f32(nc, st, st, s1); - - float sum_st = 0.0f; - ggml_vec_sum_f32(nc, &sum_st, st); - sum_thread += sum_st; - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - assert(!isnan(st[i])); - assert(!isinf(st[i])); - } -#endif - } - sums[ith] = sum_thread; - ggml_barrier(params->threadpool); - - if (ith == 0) { - float * dp = (float *) dst->data; - ggml_vec_sum_f32(nth, dp, sums); - dp[0] *= -1.0f / (float) nr; - } -} - -void ggml_compute_forward_cross_entropy_loss(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cross_entropy_loss_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cross_entropy_loss_back - -static void ggml_compute_forward_cross_entropy_loss_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; // gradient of forward pass output - const ggml_tensor * src0f = dst->src[1]; // src0 of forward pass - const ggml_tensor * src1f = dst->src[2]; // src1 of forward pass - - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_is_contiguous(src0f)); - GGML_ASSERT(ggml_is_contiguous(src1f)); - GGML_ASSERT(ggml_is_contiguous(grad)); - GGML_ASSERT(ggml_are_same_shape(src0f, src1f) && ggml_are_same_shape(src0f, dst)); - - const int64_t ith = params->ith; - const int64_t nth = params->nth; - - // TODO: handle transposed/permuted matrices - const int64_t nc = src0f->ne[0]; - const int64_t nr = ggml_nrows(src0f); - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - const float d_by_nr = ((const float *) grad->data)[0] / (float) nr; - - for (int64_t i1 = ir0; i1 < ir1; i1++) { - float * ds0 = (float *) ((char *) dst->data + i1 * dst->nb[1]); - const float * s0 = (const float *) ((const char *) src0f->data + i1 * src0f->nb[1]); - const float * s1 = (const float *) ((const char *) src1f->data + i1 * src1f->nb[1]); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(s0[i])); - assert(!isnan(s1[i])); - } -#endif - - // soft_max - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - const ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); - assert(sum > 0.0); - ggml_vec_scale_f32(nc, ds0, 1.0 / sum); - - // grad(src0f) = (softmax(src0f) - src1f) * grad(cross_entropy_loss(src0f, src1f)) / nr - ggml_vec_sub_f32(nc, ds0, ds0, s1); - ggml_vec_scale_f32(nc, ds0, d_by_nr); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - assert(!isnan(ds0[i])); - assert(!isinf(ds0[i])); - } -#endif - } -} - -void ggml_compute_forward_cross_entropy_loss_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cross_entropy_loss_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_opt_step_adamw_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src0_grad = dst->src[1]; - const ggml_tensor * src0_grad_m = dst->src[2]; - const ggml_tensor * src0_grad_v = dst->src[3]; - const ggml_tensor * adamw_params = dst->src[4]; - - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m)); - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v)); - GGML_ASSERT(ggml_nelements(adamw_params) == 7); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const float * adamw_params_ptr = ggml_get_data_f32(adamw_params); - - const float alpha = adamw_params_ptr[0]; - const float beta1 = adamw_params_ptr[1]; - const float beta2 = adamw_params_ptr[2]; - const float eps = adamw_params_ptr[3]; - const float wd = adamw_params_ptr[4]; - const float beta1h = adamw_params_ptr[5]; - const float beta2h = adamw_params_ptr[6]; - const float keep = 1.f - alpha * wd; - for (int ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; - - float * w = (float *) ((char *) src0->data + offset); // weight - const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad - float * m = (float *) ((char *) src0_grad_m->data + offset); - float * v = (float *) ((char *) src0_grad_v->data + offset); - - for (int i00 = 0; i00 < ne00; ++i00) { - m[i00] = m[i00] * beta1 + g[i00] * (1.0f - beta1); - v[i00] = v[i00] * beta2 + g[i00] * g[i00] * (1.0f - beta2); - - const float mh = m[i00] * beta1h; - const float vh = sqrtf(v[i00] * beta2h) + eps; - - // The weight decay is applied independently of the Adam momenta m and v. - // This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss. - // See: https://arxiv.org/pdf/1711.05101v3.pdf - w[i00] = w[i00] * keep - alpha * mh / vh; - } - } -} - -void ggml_compute_forward_opt_step_adamw(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_opt_step_adamw_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_opt_step_sgd_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src0_grad = dst->src[1]; - const ggml_tensor * sgd_params = dst->src[2]; - - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); - GGML_ASSERT(ggml_nelements(sgd_params) == 2); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // using adamw param subset we care about - alpha, wd - could have a separate struct - const float * sgd_params_ptr = ggml_get_data_f32(sgd_params); - const float alpha = sgd_params_ptr[0]; - const float keep = 1.f - alpha * sgd_params_ptr[1]; - - for (int ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; - - float * w = (float *) ((char *) src0->data + offset); // weight - const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad - - for (int i00 = 0; i00 < ne00; ++i00) { - w[i00] = w[i00] * keep - alpha * g[i00]; - } - } -} - -void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_opt_step_sgd_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error - sgd is F32 only"); - } - } -} -#include "ops.h" - -#include "binary-ops.h" -#include "ggml-cpu.h" -#include "ggml-impl.h" -#include "ggml.h" -#include "unary-ops.h" -#include "vec.h" - -#include -#include -#include -#include - -// ggml_compute_forward_dup - -static void ggml_compute_forward_dup_same_cont(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - GGML_ASSERT(src0->type == dst->type); - - const size_t nb0 = ggml_type_size(src0->type); - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by blocks - const int nk = ggml_nelements(src0) / ggml_blck_size(src0->type); - const int dr = (nk + nth - 1) / nth; - const int k0 = dr * ith; - const int k1 = MIN(k0 + dr, nk); - - if (k0 < k1) { - memcpy(((char *) dst->data + k0 * nb0), ((char *) src0->data + k0 * nb0), (k1 - k0) * nb0); - } -} - -template -static void ggml_compute_forward_dup_flt(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(!ggml_is_quantized(src0->type) && !ggml_is_quantized(dst->type)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // case: type & row size equal - if (src0->type == dst->type && ne00 == ne0 && nb00 == ggml_type_size(src0->type) && - nb0 == ggml_type_size(dst->type)) { - // copy by rows - const size_t rs = ne00 * nb00; - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ir0; i01 < ir1; i01++) { - memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); - } - } - } - return; - } - - // case: dst tensor is contiguous - if (ggml_is_contiguous(dst)) { - if (nb00 == sizeof(src_t)) { - if constexpr (std::is_same_v) { - // same type - size_t id = 0; - const size_t rs = ne00 * nb00; - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - // casting between non-quantized types - size_t id = 0; - dst_t * dst_ptr = (dst_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const src_t * src0_ptr = - (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - for (int i00 = 0; i00 < ne00; i00++) { - float tmp = type_conversion_table::to_f32(src0_ptr[i00]); - dst_ptr[id] = type_conversion_table::from_f32(tmp); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } - } else { - //printf("%s: this is not optimal - fix me\n", __func__); - - size_t id = 0; - dst_t * dst_ptr = (dst_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - const src_t * src0_ptr = - (src_t *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float tmp = type_conversion_table::to_f32(*src0_ptr); - dst_ptr[id] = type_conversion_table::from_f32(tmp); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } - return; - } - - // dst counters - int64_t i10 = 0; - int64_t i11 = 0; - int64_t i12 = 0; - int64_t i13 = 0; - - if constexpr (std::is_same_v) { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - memcpy(dst_ptr, src0_ptr, sizeof(dst_t)); - - if (++i10 == ne00) { - i10 = 0; - if (++i11 == ne01) { - i11 = 0; - if (++i12 == ne02) { - i12 = 0; - if (++i13 == ne03) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - - } else { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - float tmp = type_conversion_table::to_f32(*(const src_t *) src0_ptr); - *(dst_t *) dst_ptr = type_conversion_table::from_f32(tmp); - - if (++i10 == ne0) { - i10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - } -} - -template -static void ggml_compute_forward_dup_to_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(!ggml_is_quantized(src0->type)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - if (ggml_is_contiguous(dst) && nb00 == sizeof(src_t) && ggml_get_type_traits_cpu(dst->type)->from_float) { - // casting non-quantized types --> intermediate f32 --> quantized - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; - float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - size_t id = 0; - size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const src_t * src0_ptr = (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - for (int i00 = 0; i00 < ne00; i00++) { - src0_f32[i00] = type_conversion_table::to_f32(src0_ptr[i00]); - } - - quantize_row_q(src0_f32, dst_ptr + id, ne00); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - // printf("%s %s\n", ggml_type_name(src0->type), ggml_type_name(dst->type)); - GGML_ABORT("not implemented"); - } -} - -// A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy. -static void ggml_compute_forward_dup_bytes(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(src0->type == dst->type); - - GGML_TENSOR_UNARY_OP_LOCALS; - - if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) { - ggml_compute_forward_dup_same_cont(params, dst); - return; - } - - const size_t type_size = ggml_type_size(src0->type); - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - if (src0->type == dst->type && ggml_are_same_shape(src0, dst) && nb00 == type_size && nb0 == type_size) { - // copy by rows - const size_t rs = ggml_row_size(src0->type, ne00); - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ir0; i01 < ir1; i01++) { - memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); - } - } - } - return; - } - - if (ggml_is_contiguous(dst)) { - size_t id = 0; - char * dst_ptr = (char *) dst->data; - const size_t rs = ne00 * type_size; - - if (nb00 == type_size) { - // src0 is contigous on first dimension, copy by rows - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int64_t i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - //printf("%s: this is not optimal - fix me\n", __func__); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - (char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, type_size); - - id += type_size; - } - } - id += rs * (ne01 - ir1); - } - } - } - - return; - } - - // dst counters - int64_t k10 = 0; - int64_t i11 = 0; - int64_t i12 = 0; - int64_t i13 = 0; - - // number of blocks in a row - const int64_t nk00 = ne00 / ggml_blck_size(src0->type); - const int64_t nk0 = ne0 / ggml_blck_size(dst->type); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - k10 += nk00 * ir0; - while (k10 >= nk0) { - k10 -= nk0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t k00 = 0; k00 < nk00; k00++) { - const char * src0_ptr = ((char *) src0->data + k00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + k10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - memcpy(dst_ptr, src0_ptr, type_size); - - if (++k10 == nk0) { - k10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - k10 += nk00 * (ne01 - ir1); - while (k10 >= nk0) { - k10 -= nk0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } -} - -static void ggml_compute_forward_dup_from_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - size_t qk = ggml_blck_size(type); - const int64_t nr = ggml_nelements(src1) / qk; - - // destination must be contiguous in the first dimension - GGML_ASSERT(nb10 == ggml_type_size(dst->type)); - // must either have first dimension large enough to hold a row, or fully contiguous - GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - uint32_t i = ir * qk; - - const int64_t i03 = i / (ne00 * ne01 * ne02); - const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); - const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; - const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; - const int64_t x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; - - const int64_t i13 = i / (ne10 * ne11 * ne12); - const int64_t i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); - const int64_t i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; - const int64_t i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; - const int64_t dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; - - dequantize_row_q((const void *) ((char *) src0->data + x_offset), (float *) ((char *) dst->data + dst_offset), - qk); - } -} - -void ggml_compute_forward_dup(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (src0->type == dst->type) { - ggml_compute_forward_dup_bytes(params, dst); - return; - } - - switch (src0->type) { - case GGML_TYPE_F16: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_BF16: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_F32: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_I32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_I32: - { - if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - GGML_ABORT("not implemented"); - } - } - break; - default: - { - if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_from_q(params, dst); - break; - } - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add - -static void ggml_compute_forward_add_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const ggml_type type = src0->type; - const ggml_type dtype = dst->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dtype)->from_float; - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(type)); - GGML_ASSERT(nb10 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i03 = ir / (ne02 * ne01); - const int i02 = (ir - i03 * ne02 * ne01) / ne01; - const int i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - // src1 and dst are same shape as src0 => same indices - const int i13 = i03; - const int i12 = i02; - const int i11 = i01; - - const int i3 = i03; - const int i2 = i02; - const int i1 = i01; - - void * src0_row = (void *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * src1_row = (float *) ((char *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13)); - void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - assert(ne00 % 32 == 0); - - // unquantize row from src0 to temp buffer - dequantize_row_q(src0_row, wdata, ne00); - // add src1 - ggml_vec_acc_f32(ne00, wdata, src1_row); - // quantize row to dst - if (quantize_row_q != NULL) { - quantize_row_q(wdata, dst_row, ne00); - } else { - memcpy(dst_row, wdata, ne0 * nb0); - } - } -} - -void ggml_compute_forward_add(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - { - ggml_compute_forward_add_non_quantized(params, dst); - } - break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_add_q_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add_id - -static void ggml_compute_forward_add_id_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(src2->type == GGML_TYPE_I32); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_TERNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - // src1 indices - const int i11 = *(int32_t *) ((char *) src2->data + i1 * nb20 + i2 * nb21); - - GGML_ASSERT(i11 >= 0 && i11 < ne11); - - ggml_vec_add_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), - (float *) ((char *) src1->data + i11 * nb11)); - } -} - -void ggml_compute_forward_add_id(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add_id_f32(params, dst); - } - break; - default: - { - GGML_ABORT("unsupported type for ggml_compute_forward_add_id: %s", ggml_type_name(src0->type)); - } - } -} - -// ggml_compute_forward_add1 - -static void ggml_compute_forward_add1_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - -#ifdef GGML_USE_ACCELERATE - GGML_UNUSED(ggml_vec_add1_f32); - - vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), 1, - (float *) ((char *) src1->data), 0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - 1, ne0); -#else - ggml_vec_add1_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), *(float *) src1->data); -#endif - } -} - -static void ggml_compute_forward_add1_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_f16_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(type)->from_float; - - // we don't support permuted src0 - GGML_ASSERT(nb00 == ggml_type_size(type)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(dst->type == src0->type); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - void * src0_row = (void *) ((char *) src0->data + (i1 * nb01 + i2 * nb02 + i3 * nb03)); - void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb0)); - - assert(ne0 % 32 == 0); - - // unquantize row from src0 to temp buffer - dequantize_row_q(src0_row, wdata, ne0); - // add src1 - ggml_vec_acc1_f32(ne0, wdata, v); - // quantize row to dst - quantize_row_q(wdata, dst_row, ne0); - } -} - -static void ggml_compute_forward_add1_bf16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_BF16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_BF16); - - GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_bf16_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_BF16); - GGML_ASSERT(src1->type == GGML_TYPE_BF16); - GGML_ASSERT(dst->type == GGML_TYPE_BF16); - - GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -void ggml_compute_forward_add1(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add1_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - if (src1->type == GGML_TYPE_F16) { - ggml_compute_forward_add1_f16_f16(params, dst); - } else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add1_f16_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } - } - break; - case GGML_TYPE_BF16: - { - if (src1->type == GGML_TYPE_BF16) { - ggml_compute_forward_add1_bf16_bf16(params, dst); - } else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add1_bf16_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } - } - break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_add1_q_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_acc - -static void ggml_compute_forward_acc_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during acc - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during acc - const size_t nb0 = ggml_element_size(src0); - - const size_t nb00 = nb0; - const size_t nb01 = nb1; - const size_t nb02 = nb2; - const size_t nb03 = nb3; - - GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb0 + (ne11 == 0 ? 0 : ne11 - 1) * nb1 + - (ne12 == 0 ? 0 : ne12 - 1) * nb2 + (ne13 == 0 ? 0 : ne13 - 1) * nb3 < - ggml_nbytes(dst)); - GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb00 + (ne11 == 0 ? 0 : ne11 - 1) * nb01 + - (ne12 == 0 ? 0 : ne12 - 1) * nb02 + (ne13 == 0 ? 0 : ne13 - 1) * nb03 < - ggml_nbytes(src0)); - - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - -#ifdef GGML_USE_ACCELERATE - vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), 1, - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11), 1, - (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), 1, nc); -#else - ggml_vec_add_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); -#endif - } -} - -void ggml_compute_forward_acc(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_acc_f32(params, dst); - } - break; - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_sum - -static void ggml_compute_forward_sum_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - assert(src0->nb[0] == sizeof(float)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - ggml_float sum = 0; - ggml_float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32_ggf(ne00, &row_sum, - (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((float *) dst->data)[0] = sum; -} - -static void ggml_compute_forward_sum_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - - assert(src0->nb[0] == sizeof(ggml_fp16_t)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - float sum = 0; - float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f16_ggf(ne00, &row_sum, - (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); -} - -static void ggml_compute_forward_sum_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - - assert(src0->nb[0] == sizeof(ggml_bf16_t)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - float sum = 0; - float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_bf16_ggf(ne00, &row_sum, - (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum); -} - -void ggml_compute_forward_sum(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sum_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_sum_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - { - ggml_compute_forward_sum_bf16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cumsum - -static void ggml_compute_forward_cumsum_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne01); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - const auto [ir0, ir1] = get_thread_range(params, src0); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - float * src_row = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * dst_row = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - ggml_vec_cumsum_f32(ne00, dst_row, src_row); - } -} - -void ggml_compute_forward_cumsum(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cumsum_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_sum_rows - -static void ggml_compute_forward_sum_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne0 == 1); - GGML_ASSERT(ne1 == ne01); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - for (int64_t i3 = 0; i3 < ne03; i3++) { - for (int64_t i2 = 0; i2 < ne02; i2++) { - for (int64_t i1 = 0; i1 < ne01; i1++) { - float * src_row = (float *) ((char *) src0->data + i1 * nb01 + i2 * nb02 + i3 * nb03); - float * dst_row = (float *) ((char *) dst->data + i1 * nb1 + i2 * nb2 + i3 * nb3); - float row_sum = 0; - ggml_vec_sum_f32(ne00, &row_sum, src_row); - dst_row[0] = row_sum; - } - } - } -} - -void ggml_compute_forward_sum_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sum_rows_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_mean - -static void ggml_compute_forward_mean_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(src0->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - assert(ne0 == 1); - assert(ne1 == ne01); - assert(ne2 == ne02); - assert(ne3 == ne03); - - GGML_UNUSED(ne0); - GGML_UNUSED(ne1); - GGML_UNUSED(ne2); - GGML_UNUSED(ne3); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - - *(float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3) /= (float) ne00; - } - } - } -} - -void ggml_compute_forward_mean(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_mean_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_argmax - -static void ggml_compute_forward_argmax_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(src0->nb[0] == sizeof(float)); - assert(dst->nb[0] == sizeof(float)); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - - const size_t nb01 = src0->nb[1]; - const size_t nb0 = dst->nb[0]; - - for (int64_t i1 = 0; i1 < ne01; i1++) { - float * src = (float *) ((char *) src0->data + i1 * nb01); - int32_t * dst_ = (int32_t *) ((char *) dst->data + i1 * nb0); - int v = 0; - ggml_vec_argmax_f32(ne00, &v, src); - dst_[0] = v; - } -} - -void ggml_compute_forward_argmax(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_argmax_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_count_equal - -static void ggml_compute_forward_count_equal_i32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS; - - GGML_ASSERT(src0->type == GGML_TYPE_I32); - GGML_ASSERT(src1->type == GGML_TYPE_I32); - GGML_ASSERT(ggml_are_same_shape(src0, src1)); - GGML_ASSERT(ggml_is_scalar(dst)); - GGML_ASSERT(dst->type == GGML_TYPE_I64); - - const int64_t nr = ggml_nrows(src0); - - const int ith = params->ith; - const int nth = params->nth; - - int64_t * sums = (int64_t *) params->wdata; - int64_t sum_thread = 0; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne03) / ne01; - const int64_t i01 = ir - i03 * ne03 - i02 * ne02; - - const char * data0 = (const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01; - const char * data1 = (const char *) src1->data + i03 * nb13 + i02 * nb12 + i01 * nb11; - - for (int64_t i00 = 0; i00 < ne00; ++i00) { - const int32_t val0 = *((const int32_t *) (data0 + i00 * nb00)); - const int32_t val1 = *((const int32_t *) (data1 + i00 * nb10)); - - sum_thread += val0 == val1; - } - } - if (ith != 0) { - sums[ith] = sum_thread; - } - ggml_barrier(params->threadpool); - - if (ith != 0) { - return; - } - - for (int ith_other = 1; ith_other < nth; ++ith_other) { - sum_thread += sums[ith_other]; - } - *((int64_t *) dst->data) = sum_thread; -} - -void ggml_compute_forward_count_equal(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_I32: - { - ggml_compute_forward_count_equal_i32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_repeat - -static void ggml_compute_forward_repeat_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(src0, dst)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne0 / ne00); - const int nr1 = (int) (ne1 / ne01); - const int nr2 = (int) (ne2 / ne02); - const int nr3 = (int) (ne3 / ne03); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne03; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne02; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne01; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_vec_cpy_f32( - ne00, - (float *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + (i2 * ne02 + k2) * nb2 + - (i1 * ne01 + k1) * nb1 + (i0 * ne00) * nb0), - (float *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01)); - } - } - } - } - } - } - } -} - -static void ggml_compute_forward_repeat_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(src0, dst)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne0 / ne00); - const int nr1 = (int) (ne1 / ne01); - const int nr2 = (int) (ne2 / ne02); - const int nr3 = (int) (ne3 / ne03); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne03; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne02; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne01; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + - (i2 * ne02 + k2) * nb2 + (i1 * ne01 + k1) * nb1 + - (i0 * ne00) * nb0); - ggml_fp16_t * x = - (ggml_fp16_t *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01); - // ggml_vec_cpy_f16(ne00, y, x) - for (int i = 0; i < ne00; ++i) { - y[i] = x[i]; - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_repeat(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_I16: - { - ggml_compute_forward_repeat_f16(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_repeat_f32(params, dst); - } - break; - // TODO: templateify the implemenation and support for I64 - // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 - //case GGML_TYPE_I64: - // { - // ggml_compute_forward_repeat_i64(params, dst); - // } break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_repeat_back - -static void ggml_compute_forward_repeat_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(dst, src0)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne00 / ne0); - const int nr1 = (int) (ne01 / ne1); - const int nr2 = (int) (ne02 / ne2); - const int nr3 = (int) (ne03 / ne3); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - if (ggml_is_contiguous(dst)) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } else { - for (int k3 = 0; k3 < ne3; k3++) { - for (int k2 = 0; k2 < ne2; k2++) { - for (int k1 = 0; k1 < ne1; k1++) { - ggml_vec_set_f32(ne0, (float *) ((char *) dst->data + k1 * nb1 + k2 * nb2 + k3 * nb3), 0); - } - } - } - } - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne3; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne2; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne1; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_vec_acc_f32( - ne0, (float *) ((char *) dst->data + (k3) *nb3 + (k2) *nb2 + (k1) *nb1), - (float *) ((char *) src0->data + (i3 * ne3 + k3) * nb03 + (i2 * ne2 + k2) * nb02 + - (i1 * ne1 + k1) * nb01 + (i0 * ne0) * nb00)); - } - } - } - } - } - } - } -} - -void ggml_compute_forward_repeat_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_repeat_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_concat - -static void ggml_compute_forward_concat_any(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - const size_t len = ggml_type_size(src0->type); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const char * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + (i3) *nb03; - } else { - x = (const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + (i2 - o[2]) * nb12 + - (i3 - o[3]) * nb13; - } - - char * y = (char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3; - - memcpy(y, x, len); - } - } - } - } -} - -static void ggml_compute_forward_concat_i8(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(int8_t)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const int8_t * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const int8_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const int8_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - int8_t * y = (int8_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -static void ggml_compute_forward_concat_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(ggml_fp16_t)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const ggml_fp16_t * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const ggml_fp16_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const ggml_fp16_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -static void ggml_compute_forward_concat_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const float * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const float *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const float *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -void ggml_compute_forward_concat(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_I16: - { - ggml_compute_forward_concat_f16(params, dst); - } - break; - case GGML_TYPE_I8: - { - ggml_compute_forward_concat_i8(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_concat_f32(params, dst); - } - break; - default: - { - ggml_compute_forward_concat_any(params, dst); - } - } -} - -// ggml_compute_forward_gelu - -static void ggml_compute_forward_gelu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_fill - -static void ggml_compute_forward_fill_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const float c = ggml_get_op_params_f32(dst, 0); - - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); - GGML_TENSOR_LOCALS(size_t, nb, dst, nb); - - const auto [ir0, ir1] = get_thread_range(params, dst); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne2 * ne1); - const int64_t i02 = (ir - i03 * ne2 * ne1) / ne1; - const int64_t i01 = (ir - i03 * ne2 * ne1 - i02 * ne1); - - float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); - - ggml_vec_set_f32(ne0, dst_ptr, c); - } -} - -void ggml_compute_forward_fill(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_fill_f32(params, dst); -} - -// ggml_compute_tri - -static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(ggml_is_contiguous(src0)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const auto [ir0, ir1] = get_thread_range(params, src0); - - bool (*bipred)(int, int); - - switch (ttype) { - case GGML_TRI_TYPE_LOWER: - bipred = [](int i, int r) { - return i < r; - }; - break; - case GGML_TRI_TYPE_LOWER_DIAG: - bipred = [](int i, int r) { - return i <= r; - }; - break; - case GGML_TRI_TYPE_UPPER: - bipred = [](int i, int r) { - return i > r; - }; - break; - case GGML_TRI_TYPE_UPPER_DIAG: - bipred = [](int i, int r) { - return i >= r; - }; - break; - default: - GGML_ABORT("invalid tri type"); - } - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const float * src_ptr = (const float *) ((const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01); - float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); - - for (int i0 = 0; i0 < ne0; ++i0) { - dst_ptr[i0] = bipred(i0, i01) ? src_ptr[i0] : 0.0f; - } - } -} - -void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_tri_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gelu_erf - -static void ggml_compute_forward_gelu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_erf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_erf_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_erf_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gelu_quick - -static void ggml_compute_forward_gelu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_quick(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_quick_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_quick_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_silu - -static void ggml_compute_forward_silu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_silu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_silu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_silu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_silu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_leaky_relu - -static void ggml_compute_forward_leaky_relu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - float negative_slope; - memcpy(&negative_slope, dst->op_params, sizeof(float)); - - assert(dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_leaky_relu_f32(nc, (float *) ((char *) dst->data + i * (dst->nb[1])), - (float *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); - } -} - -static void ggml_compute_forward_leaky_relu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - float negative_slope; - memcpy(&negative_slope, dst->op_params, sizeof(float)); - - assert(dst->nb[0] == sizeof(ggml_fp16_t)); - assert(src0->nb[0] == sizeof(ggml_fp16_t)); - - for (int i = 0; i < n; i++) { - ggml_vec_leaky_relu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); - } -} - -void ggml_compute_forward_leaky_relu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_leaky_relu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_leaky_relu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_silu_back - -static void ggml_compute_forward_silu_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - assert(ggml_is_contiguous_1(grad)); - assert(ggml_is_contiguous_1(src1)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src1, dst)); - assert(ggml_are_same_shape(src1, grad)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; - const int nr = ggml_nrows(src1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_backward_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src1->data + i1 * (src1->nb[1])), - (float *) ((char *) grad->data + i1 * (grad->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_silu_back_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - assert(ggml_is_contiguous_1(grad)); - assert(ggml_is_contiguous_1(src1)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src1, dst)); - assert(ggml_are_same_shape(src1, grad)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; - const int nr = ggml_nrows(src1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_backward_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src1->data + i1 * (src1->nb[1])), - (ggml_fp16_t *) ((char *) grad->data + i1 * (grad->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -void ggml_compute_forward_silu_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_silu_back_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_silu_back_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_reglu - -static void ggml_compute_forward_reglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_reglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_reglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_reglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_reglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu - -static void ggml_compute_forward_geglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_swiglu - -static void ggml_compute_forward_swiglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_swiglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_swiglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_swiglu_oai - -static void ggml_compute_forward_swiglu_oai_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - const float alpha = ggml_get_op_params_f32(dst, 2); - const float limit = ggml_get_op_params_f32(dst, 3); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - float * dst_p = (float *) ((char *) dst->data + i1 * (dst->nb[1])); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - for (int k = 0; k < nc; k++) { - const float x = std::min(src0_p[k], limit); - const float y = std::clamp(src1_p[k], -limit, limit); - const float out_glu = x / (1.f + expf(alpha * (-x))); - dst_p[k] = out_glu * (y + 1.f); - } - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = dst_p[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu_oai(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_swiglu_oai_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu_erf - -static void ggml_compute_forward_geglu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_erf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_erf_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_erf_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu_quick - -static void ggml_compute_forward_geglu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_quick(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_quick_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_quick_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_norm - -static void ggml_compute_forward_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float sum = 0.0; - ggml_vec_sum_f32(ne00, &sum, x); - float mean = sum / ne00; - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - float variance = 0; - -#ifdef GGML_USE_ACCELERATE - mean = -mean; - vDSP_vsadd(x, 1, &mean, y, 1, ne00); - vDSP_measqv(y, 1, &variance, ne00); -#else - variance = ggml_vec_cvar_f32(ne00, y, x, mean); -#endif //GGML_USE_ACCELERATE - - const float scale = 1.0f / sqrtf(variance + eps); - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_group_rms_norm - -static void ggml_compute_forward_rms_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sum = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float) (x[i00] * x[i00]); - } - - const float mean = sum / ne00; - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - memcpy(y, x, ne00 * sizeof(float)); - // for (int i00 = 0; i00 < ne00; i00++) { - // y[i00] = x[i00]; - // } - - const float scale = 1.0f / sqrtf(mean + eps); - - // if you hit this, likely you got an inf somewhere earlier - assert(scale > 0.0f); - - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_rms_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rms_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_rms_norm_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // gradients from forward pass output - const ggml_tensor * src1 = dst->src[1]; // src1 from forward pass - - GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - // src1 is same shape as src0 => same indices - const int64_t i11 = i01; - const int64_t i12 = i02; - const int64_t i13 = i03; - - const float * dz = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - const float * x = (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13); - - ggml_float sum_xx = 0.0; - ggml_float sum_xdz = 0.0; - - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum_xx += (ggml_float) (x[i00] * x[i00]); - sum_xdz += (ggml_float) (x[i00] * dz[i00]); - } - - //const float mean = (float)(sum_xx)/ne00; - const float mean_eps = (float) (sum_xx) / ne00 + eps; - const float sum_eps = (float) (sum_xx) + eps * ne00; - //const float mean_xdz = (float)(sum_xdz)/ne00; - // we could cache rms from forward pass to improve performance. - // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms. - //const float rms = sqrtf(mean_eps); - const float rrms = 1.0f / sqrtf(mean_eps); - //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3) - - { - // z = rms_norm(x) - // - // rms_norm(src1) = - // scale( - // src1, - // div( - // 1, - // sqrt( - // add( - // scale( - // sum( - // sqr( - // src1)), - // (1.0/N)), - // eps)))); - - // postorder: - // ## op args grad - // 00 param src1 grad[#00] - // 01 const 1 - // 02 sqr (#00) grad[#02] - // 03 sum (#02) grad[#03] - // 04 const 1/N - // 05 scale (#03, #04) grad[#05] - // 06 const eps - // 07 add (#05, #06) grad[#07] - // 08 sqrt (#07) grad[#08] - // 09 div (#01,#08) grad[#09] - // 10 scale (#00,#09) grad[#10] - // - // backward pass, given grad[#10] - // #10: scale - // grad[#00] += scale(grad[#10],#09) - // grad[#09] += sum(mul(grad[#10],#00)) - // #09: div - // grad[#08] += neg(mul(grad[#09], div(#09,#08))) - // #08: sqrt - // grad[#07] += mul(grad[#08], div(0.5, #08)) - // #07: add - // grad[#05] += grad[#07] - // #05: scale - // grad[#03] += scale(grad[#05],#04) - // #03: sum - // grad[#02] += repeat(grad[#03], #02) - // #02: - // grad[#00] += scale(mul(#00, grad[#02]), 2.0) - // - // substitute and simplify: - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) - // grad[#02] = repeat(grad[#03], #02) - // grad[#02] = repeat(scale(grad[#05],#04), #02) - // grad[#02] = repeat(scale(grad[#07],#04), #02) - // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02) - // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02) - // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02) - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N))) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps))) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps)) - // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps)) - // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps)) - // a = b*c + d*e - // a = b*c*f/f + d*e*f/f - // a = (b*c*f + d*e*f)*(1/f) - // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c)) - // a = (b + d*e/c)*c - // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps) - // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms - // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms - // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms - // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms - // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms - // a = (dz + x*div(-mean_xdz,mean_eps))*rrms - // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms) - // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - } - // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - // post-order: - // dx := x - // dx := scale(dx,-mean_xdz/mean_eps) - // dx := add(dx, dz) - // dx := scale(dx, rrms) - float * dx = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - // dx[i00] = (x*(-sum_xdz/sum_eps) + dz) / sqrtf(mean_eps) - ggml_vec_cpy_f32(ne00, dx, x); - // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); - ggml_vec_scale_f32(ne00, dx, (float) (-sum_xdz) / sum_eps); - ggml_vec_acc_f32(ne00, dx, dz); - ggml_vec_scale_f32(ne00, dx, rrms); - } - } - } -} - -void ggml_compute_forward_rms_norm_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rms_norm_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_group_norm - -static void ggml_compute_forward_group_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - // TODO: optimize - - float eps; - memcpy(&eps, dst->op_params + 1, sizeof(float)); - - int n_channels = src0->ne[2]; - int n_groups = dst->op_params[0]; - int n_channels_per_group = (n_channels + n_groups - 1) / n_groups; - for (int i = ith; i < n_groups; i += nth) { - int start = i * n_channels_per_group; - int end = start + n_channels_per_group; - if (end > n_channels) { - end = n_channels; - } - int step = end - start; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - ggml_float sum = 0.0; - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sumr = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sumr += (ggml_float) x[i00]; - } - sum += sumr; - } - } - const float mean = sum / (ne00 * ne01 * step); - - ggml_float sum2 = 0.0; - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - ggml_float sumr = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - float v = x[i00] - mean; - y[i00] = v; - sumr += (ggml_float) (v * v); - } - sum2 += sumr; - } - } - const float variance = sum2 / (ne00 * ne01 * step); - const float scale = 1.0f / sqrtf(variance + eps); - - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - ggml_vec_scale_f32(ne00, y, scale); - } - } - } - } -} - -void ggml_compute_forward_group_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_group_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_l2_norm - -static void ggml_compute_forward_l2_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sum = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float) (x[i00] * x[i00]); - } - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - memcpy(y, x, ne00 * sizeof(float)); - - const float scale = 1.0f / fmaxf(sqrtf(sum), eps); - - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_l2_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_l2_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_out_prod - -static void ggml_compute_forward_out_prod_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - GGML_ASSERT(ne2 % ne02 == 0); - GGML_ASSERT(ne3 % ne03 == 0); - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - - if (ith == 0) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } - ggml_barrier(params->threadpool); - - // dst[:,:,:,:] = 0 - // for i2,i3: - // for i1: - // for i01: - // for i0: - // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] - - // parallelize by last three dimensions - - // total rows in dst - const int64_t nr = ne1 * ne2 * ne3; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - // block-tiling attempt - const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32); - const int64_t blck_1 = 16; - - // dps == dst per src0, used for group query attention - const int64_t dps2 = ne2 / ne02; - const int64_t dps3 = ne3 / ne03; - - for (int64_t bir = ir0; bir < ir1; bir += blck_1) { - const int64_t bir1 = MIN(bir + blck_1, ir1); - for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) { - const int64_t bne01 = MIN(bi01 + blck_0, ne01); - for (int64_t ir = bir; ir < bir1; ++ir) { - // dst indices - const int64_t i3 = ir / (ne2 * ne1); - const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; - const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - const int64_t i02 = i2 / dps2; - const int64_t i03 = i3 / dps3; - - //const int64_t i10 = i1; - const int64_t i12 = i2; - const int64_t i13 = i3; - -#if GGML_VEC_MAD_UNROLL > 2 - const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL); - for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); - } - for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32(ne0, d, s0, *s1); - } -#else - for (int64_t i01 = bi01; i01 < bne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32(ne0, d, s0, *s1); - } -#endif - } - } - } -} - -static void ggml_compute_forward_out_prod_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int ith = params->ith; - const int nth = params->nth; - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - // we don't support permuted src0 dim0 - GGML_ASSERT(nb00 == ggml_type_size(type)); - - // dst dim0 cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - - if (ith == 0) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } - ggml_barrier(params->threadpool); - - // parallelize by last three dimensions - - // total rows in dst - const int64_t nr = ne1 * ne2 * ne3; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - // dst[:,:,:,:] = 0 - // for i2,i3: - // for i1: - // for i01: - // for i0: - // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] - - float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - - for (int64_t ir = ir0; ir < ir1; ++ir) { - // dst indices - const int64_t i3 = ir / (ne2 * ne1); - const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; - const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - const int64_t i02 = i2; - const int64_t i03 = i3; - - //const int64_t i10 = i1; - const int64_t i12 = i2; - const int64_t i13 = i3; - - for (int64_t i01 = 0; i01 < ne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - dequantize_row_q(s0, wdata, ne0); - ggml_vec_mad_f32(ne0, d, wdata, *s1); - } - } -} - -void ggml_compute_forward_out_prod(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_out_prod_q_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - GGML_ABORT("fatal error"); // todo - // ggml_compute_forward_out_prod_f16_f32(params, dst); - } - case GGML_TYPE_F32: - { - ggml_compute_forward_out_prod_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_scale - -static void ggml_compute_forward_scale_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - float s; // scale factor - float b; // bias - - memcpy(&s, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&b, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const size_t nb01 = src0->nb[1]; - - const size_t nb1 = dst->nb[1]; - - if (b == 0.0f) { - for (int i1 = ir0; i1 < ir1; i1++) { - if (dst->data != src0->data) { - // src0 is same shape as dst => same indices - // TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy - memcpy((char *) dst->data + i1 * nb1, (char *) src0->data + i1 * nb01, nc * sizeof(float)); - } - ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1 * nb1), s); - } - } else { - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_mad1_f32(nc, (float *) ((char *) dst->data + i1 * nb1), (float *) ((char *) src0->data + i1 * nb1), - s, b); - } - } -} - -void ggml_compute_forward_scale(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_scale_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_set - -static void ggml_compute_forward_set_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during set - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during set - const size_t nb0 = ggml_element_size(src0); - - const int im0 = (ne10 == 0 ? 0 : ne10 - 1); - const int im1 = (ne11 == 0 ? 0 : ne11 - 1); - const int im2 = (ne12 == 0 ? 0 : ne12 - 1); - const int im3 = (ne13 == 0 ? 0 : ne13 - 1); - - GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); - - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - - ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); - } -} - -static void ggml_compute_forward_set_i32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during set - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during set - const size_t nb0 = ggml_element_size(src0); - - const int im0 = (ne10 == 0 ? 0 : ne10 - 1); - const int im1 = (ne11 == 0 ? 0 : ne11 - 1); - const int im2 = (ne12 == 0 ? 0 : ne12 - 1); - const int im3 = (ne13 == 0 ? 0 : ne13 - 1); - - GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); - - GGML_ASSERT(nb10 == sizeof(int32_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - - ggml_vec_cpy_i32(nc, (int32_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (int32_t *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); - } -} - -void ggml_compute_forward_set(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_set_f32(params, dst); - } - break; - case GGML_TYPE_I32: - { - ggml_compute_forward_set_i32(params, dst); - } - break; - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cpy - -void ggml_compute_forward_cpy(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_dup(params, dst); -} - -// ggml_compute_forward_cont - -void ggml_compute_forward_cont(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_dup(params, dst); -} - -// ggml_compute_forward_get_rows - -static void ggml_compute_forward_get_rows_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == ggml_type_size(type)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - dequantize_row_q((const void *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(ggml_fp16_t)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_cpu_fp16_to_fp32((const ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(ggml_bf16_t)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_cpu_bf16_to_fp32((const ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(float)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), - (float *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03)); - } -} - -void ggml_compute_forward_get_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_get_rows_q(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_get_rows_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - { - ggml_compute_forward_get_rows_bf16(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_get_rows_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } - - //static bool first = true; - //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); - //if (first) { - // first = false; - //} else { - // for (int k = 0; k < dst->ne[1]; ++k) { - // for (int j = 0; j < dst->ne[0]/16; ++j) { - // for (int i = 0; i < 16; ++i) { - // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); - // } - // printf("\n"); - // } - // printf("\n"); - // } - // printf("\n"); - // exit(0); - //} -} - -template -static void ggml_compute_forward_set_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ne01; - - assert(ne0 == nc); - assert(ne2 == ne02); - assert(ne3 == ne03); - assert(src0->type == GGML_TYPE_F32); - assert(ne02 % ne11 == 0); - assert(ne03 % ne12 == 0); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = std::min(ir0 + dr, nr); - - const ggml_from_float_t from_float = ggml_get_type_traits_cpu(dst->type)->from_float; - - for (int64_t i03 = 0; i03 < ne03; ++i03) { - for (int64_t i02 = 0; i02 < ne02; ++i02) { - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i03 % ne12; - const int64_t i11 = i02 % ne11; - const int64_t i10 = i; - - const int64_t i1 = *(idx_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i1 >= 0 && i1 < ne1); - - from_float((const float *) ((char *) src0->data + i * nb01 + i02 * nb02 + i03 * nb03), - ((char *) dst->data + i1 * nb1 + i02 * nb2 + i03 * nb3), nc); - } - } - } -} - -void ggml_compute_forward_set_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - if (src1->type == GGML_TYPE_I64) { - ggml_compute_forward_set_rows_f32(params, dst); - } else if (src1->type == GGML_TYPE_I32) { - ggml_compute_forward_set_rows_f32(params, dst); - } else { - GGML_ABORT("src1->type = %d (%s) not supported", src1->type, ggml_type_name(src1->type)); - } - } - break; - default: - { - GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); - } - } -} - -// ggml_compute_forward_get_rows_back - -static void ggml_compute_forward_get_rows_back_f32_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_is_contiguous(dst)); - - // ggml_compute_forward_dup_same_cont(params, opt0, dst); - - memset(dst->data, 0, ggml_nbytes(dst)); - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t)); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - for (int j = 0; j < nc; ++j) { - ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i * src0->nb[1]))[j]; - ((float *) ((char *) dst->data + r * dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); - } - } -} - -static void ggml_compute_forward_get_rows_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_is_contiguous(dst)); - - // ggml_compute_forward_dup_same_cont(params, opt0, dst); - - memset(dst->data, 0, ggml_nbytes(dst)); - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - ggml_vec_add_f32(nc, (float *) ((char *) dst->data + r * dst->nb[1]), - (float *) ((char *) dst->data + r * dst->nb[1]), - (float *) ((char *) src0->data + i * src0->nb[1])); - } -} - -void ggml_compute_forward_get_rows_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_get_rows_back_f32_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_get_rows_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } - - //static bool first = true; - //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); - //if (first) { - // first = false; - //} else { - // for (int k = 0; k < dst->ne[1]; ++k) { - // for (int j = 0; j < dst->ne[0]/16; ++j) { - // for (int i = 0; i < 16; ++i) { - // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); - // } - // printf("\n"); - // } - // printf("\n"); - // } - // printf("\n"); - // exit(0); - //} -} - -// ggml_compute_forward_diag - -static void ggml_compute_forward_diag_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - // TODO: handle transposed/permuted matrices - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne00 == ne0); - GGML_ASSERT(ne00 == ne1); - GGML_ASSERT(ne01 == 1); - GGML_ASSERT(ne02 == ne2); - GGML_ASSERT(ne03 == ne3); - - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb0 == sizeof(float)); - - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = 0; i2 < ne2; i2++) { - for (int i1 = 0; i1 < ne1; i1++) { - float * d = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - float * s = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02); - for (int i0 = 0; i0 < i1; i0++) { - d[i0] = 0; - } - d[i1] = s[i1]; - for (int i0 = i1 + 1; i0 < ne0; i0++) { - d[i0] = 0; - } - } - } - } -} - -void ggml_compute_forward_diag(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_diag_mask_inf - -static void ggml_compute_forward_diag_mask_f32(const ggml_compute_params * params, - ggml_tensor * dst, - const float value) { - const ggml_tensor * src0 = dst->src[0]; - - const int ith = params->ith; - const int nth = params->nth; - - const int n_past = ((int32_t *) dst->op_params)[0]; - const bool inplace = src0->data == dst->data; - - GGML_ASSERT(n_past >= 0); - - if (!inplace) { - if (ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - // TODO: handle transposed/permuted matrices - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - const int nr = src0->ne[1]; - const int nz = n / nr; - - GGML_ASSERT(dst->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - for (int k = 0; k < nz; k++) { - for (int j = ith; j < nr; j += nth) { - for (int i = n_past; i < nc; i++) { - if (i > n_past + j) { - *(float *) ((char *) dst->data + k * dst->nb[2] + j * dst->nb[1] + i * dst->nb[0]) = value; - } - } - } - } -} - -void ggml_compute_forward_diag_mask_inf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -void ggml_compute_forward_diag_mask_zero(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_mask_f32(params, dst, 0); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_soft_max - -static void ggml_compute_forward_soft_max_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - assert(ggml_is_contiguous(dst)); - assert(ggml_are_same_shape(src0, dst)); - - float scale = 1.0f; - float max_bias = 0.0f; - - memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int64_t nb11 = src1 ? src1->nb[1] : 1; - const int64_t nb12 = src1 ? src1->nb[2] : 1; - const int64_t nb13 = src1 ? src1->nb[3] : 1; - - const int64_t ne12 = src1 ? src1->ne[2] : 1; - const int64_t ne13 = src1 ? src1->ne[3] : 1; - - // TODO: is this supposed to be ceil instead of floor? - // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 - const uint32_t n_head = ne02; - const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); - - const float m0 = powf(2.0f, -(max_bias) / n_head_log2); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - - float * wp = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); - - // sinks - const float * sk = src2 ? (float *) ((char *) src2->data) : nullptr; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const int64_t i11 = i01; - const int64_t i12 = i02 % ne12; - const int64_t i13 = i03 % ne13; - - // ALiBi - const uint32_t h = i02; // head - const float slope = - (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; - - float * sp = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * dp = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - // broadcast the mask across rows - ggml_fp16_t * mp_f16 = - src1 ? (ggml_fp16_t *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; - float * mp_f32 = src1 ? (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; - - ggml_vec_cpy_f32(ne00, wp, sp); - ggml_vec_scale_f32(ne00, wp, scale); - if (mp_f32) { - if (use_f16) { - for (int i = 0; i < ne00; ++i) { - wp[i] += slope * GGML_CPU_FP16_TO_FP32(mp_f16[i]); - } - } else { - for (int i = 0; i < ne00; ++i) { - wp[i] += slope * mp_f32[i]; - } - } - } - -#ifndef NDEBUG - for (int i = 0; i < ne00; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(wp[i])); - } -#endif - - float max = -INFINITY; - ggml_vec_max_f32(ne00, &max, wp); - - // if we have sinks, make a correction as if they were included in the softmax - if (sk) { - max = MAX(max, sk[i02]); - } - - ggml_float sum = ggml_vec_soft_max_f32(ne00, dp, wp, max); - assert(sum > 0.0); - - if (sk) { - sum += (ggml_float) expf(sk[i02] - max); - } - - sum = 1.0 / sum; - ggml_vec_scale_f32(ne00, dp, sum); - -#ifndef NDEBUG - for (int i = 0; i < ne00; ++i) { - assert(!isnan(dp[i])); - assert(!isinf(dp[i])); - } -#endif - } - } - } -} - -void ggml_compute_forward_soft_max(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_soft_max_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_soft_max_ext_back - -static void ggml_compute_forward_soft_max_ext_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(src1)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_are_same_shape(src1, dst)); - - float scale = 1.0f; - float max_bias = 0.0f; - - memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); - - GGML_ASSERT(max_bias == 0.0f); - - // TODO: handle transposed/permuted matrices - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dy = (float *) ((char *) src0->data + i1 * src0->nb[1]); - float * y = (float *) ((char *) src1->data + i1 * src1->nb[1]); - float * dx = (float *) ((char *) dst->data + i1 * dst->nb[1]); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(dy[i])); - assert(!isnan(y[i])); - } -#endif - // Jii = yi - yi*yi - // Jij = -yi*yj - // J = diag(y)-y.T*y - // dx = J * dy - // dxk = sum_i(Jki * dyi) - // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk - // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk - // dxk = sum_i(-yk*yi * dyi) + yk*dyk - // dxk = -yk * sum_i(yi * dyi) + yk*dyk - // dxk = -yk * dot(y, dy) + yk*dyk - // dxk = yk * (- dot(y, dy) + dyk) - // dxk = yk * (dyk - dot(y, dy)) - // - // post-order: - // dot_y_dy := dot(y, dy) - // dx := dy - // dx := dx - dot_y_dy - // dx := dx * y - - // linear runtime, no additional memory - float dot_y_dy = 0; - ggml_vec_dot_f32(nc, &dot_y_dy, 0, y, 0, dy, 0, 1); - ggml_vec_cpy_f32(nc, dx, dy); - ggml_vec_acc1_f32(nc, dx, -dot_y_dy); - ggml_vec_mul_f32(nc, dx, dx, y); - ggml_vec_scale_f32(nc, dx, scale); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - assert(!isnan(dx[i])); - assert(!isinf(dx[i])); - } -#endif - } -} - -void ggml_compute_forward_soft_max_ext_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_soft_max_ext_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_clamp - -static void ggml_compute_forward_clamp_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - float min; - float max; - memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - for (int j = ith; j < n; j += nth) { - float * dst_ptr = (float *) ((char *) dst->data + j * nb1); - float * src0_ptr = (float *) ((char *) src0->data + j * nb01); - - for (int i = 0; i < nc; i++) { - dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min); - } - } -} - -static void ggml_compute_forward_clamp_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - float min; - float max; - memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - for (int j = ith; j < n; j += nth) { - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j * nb01); - - for (int i = 0; i < nc; i++) { - float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); - } - } -} - -void ggml_compute_forward_clamp(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_clamp_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_clamp_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q8_K: - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_I64: - case GGML_TYPE_F64: - case GGML_TYPE_COUNT: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rope - -static float rope_yarn_ramp(const float low, const float high, const int i0) { - const float y = (i0 / 2 - low) / MAX(0.001f, high - low); - return 1 - MIN(1, MAX(0, y)); -} - -// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn -// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. -static void rope_yarn(float theta_extrap, - float freq_scale, - float corr_dims[2], - int64_t i0, - float ext_factor, - float mscale, - float * cos_theta, - float * sin_theta) { - // Get n-d rotational scaling corrected for extrapolation - float theta_interp = freq_scale * theta_extrap; - float theta = theta_interp; - if (ext_factor != 0.0f) { - float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; - theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; - - // Get n-d magnitude scaling corrected for interpolation - mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); - } - *cos_theta = cosf(theta) * mscale; - *sin_theta = sinf(theta) * mscale; -} - -static void ggml_rope_cache_init(float theta_base, - float freq_scale, - const float * freq_factors, - float corr_dims[2], - int64_t ne0, - float ext_factor, - float mscale, - float * cache, - float sin_sign, - float theta_scale) { - // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py - float theta = theta_base; - for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; - rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); - cache[i0 + 1] *= sin_sign; - - theta *= theta_scale; - } -} - -static void ggml_mrope_cache_init(float theta_base_t, - float theta_base_h, - float theta_base_w, - float theta_base_e, - int sections[4], - bool is_imrope, - bool indep_sects, - float freq_scale, - const float * freq_factors, - float corr_dims[2], - int64_t ne0, - float ext_factor, - float mscale, - float * cache, - float sin_sign, - float theta_scale) { - // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py - float theta_t = theta_base_t; - float theta_h = theta_base_h; - float theta_w = theta_base_w; - float theta_e = theta_base_e; // extra position id for vision encoder - int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; - int sec_w = sections[1] + sections[0]; - int sec_e = sections[2] + sec_w; - GGML_ASSERT(sect_dims <= ne0); - - for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; - - int sector = (i0 / 2) % sect_dims; - if (indep_sects) { - // compute theta independently for each dim sections - // (i.e. reset corresponding theta when `i0` go from one section to another) - if (sector == 0) { - theta_t = theta_base_t; - } else if (sector == sections[0]) { - theta_h = theta_base_h; - ; - } else if (sector == sec_w) { - theta_w = theta_base_w; - } else if (sector == sec_e) { - theta_e = theta_base_e; - } - } - - float theta = theta_t; - if (is_imrope) { // qwen3vl apply interleaved mrope - if (sector % 3 == 1 && sector < 3 * sections[1]) { - theta = theta_h; - } else if (sector % 3 == 2 && sector < 3 * sections[2]) { - theta = theta_w; - } else if (sector % 3 == 0 && sector < 3 * sections[0]) { - theta = theta_t; - } else { - theta = theta_e; - } - } else { - if (sector >= sections[0] && sector < sec_w) { - theta = theta_h; - } else if (sector >= sec_w && sector < sec_w + sections[2]) { - theta = theta_w; - } else if (sector >= sec_w + sections[2]) { - theta = theta_e; - } - } - - rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); - cache[i0 + 1] *= sin_sign; - - theta_t *= theta_scale; - theta_w *= theta_scale; - theta_h *= theta_scale; - theta_e *= theta_scale; - } -} - -template -static void rotate_pairs(const int64_t n, - const int64_t n_offset, - const float * cache, - const T * src_data, - T * dst_data, - const int scale = 2) { - for (int64_t i0 = 0; i0 < n; i0 += 2) { - const int64_t ic = - i0 / scale; // hack for GGML_ROPE_TYPE_NORMAL, where we need ic = i0; for all other cases, ic = i0/2 - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const T * const src = src_data + ic; - T * dst = dst_data + ic; - - const float x0 = type_conversion_table::to_f32(src[0]); - const float x1 = type_conversion_table::to_f32(src[n_offset]); - - dst[0] = type_conversion_table::from_f32(x0 * cos_theta - x1 * sin_theta); - dst[n_offset] = type_conversion_table::from_f32(x0 * sin_theta + x1 * cos_theta); - } -} - -template //float or ggml_fp16_t -static void ggml_compute_forward_rope_flt(const ggml_compute_params * params, ggml_tensor * dst, const bool forward) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_I32); - - float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; - int sections[4]; - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - //const int n_ctx = ((int32_t *) dst->op_params)[3]; - const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; - - memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); - memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); - memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); - memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); - memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); - memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int) * 4); - - GGML_TENSOR_UNARY_OP_LOCALS - - //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); - //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - - GGML_ASSERT(nb0 == nb00); - GGML_ASSERT(nb0 == sizeof(T)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(dst); - - GGML_ASSERT(n_dims <= ne0); - GGML_ASSERT(n_dims % 2 == 0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // row index used to determine which thread to use - int ir = 0; - - const float theta_scale = powf(freq_base, -2.0f / n_dims); - - float corr_dims[2]; - ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); - - const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope - const bool mrope_used = - mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, note: also true for vision (24 & 8 == true) and for imrope - const bool is_vision = mode == GGML_ROPE_TYPE_VISION; - - if (mrope_used) { - GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); - } - - if (is_vision) { - GGML_ASSERT(n_dims == ne0 / 2); - } - - const float * freq_factors = NULL; - if (src2 != NULL) { - GGML_ASSERT(src2->type == GGML_TYPE_F32); - GGML_ASSERT(src2->ne[0] >= n_dims / 2); - freq_factors = (const float *) src2->data; - } - - // backward process uses inverse rotation by cos and sin. - // cos and sin build a rotation matrix, where the inverse is the transpose. - // this essentially just switches the sign of sin. - const float sin_sign = forward ? 1.0f : -1.0f; - - const int32_t * pos = (const int32_t *) src1->data; - - for (int64_t i3 = 0; i3 < ne3; i3++) { // batch - for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len - - float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - if (!mrope_used) { - const int64_t p = pos[i2]; - ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, - sin_sign, theta_scale); - } else { - const int64_t p_t = pos[i2]; - const int64_t p_h = pos[i2 + ne2]; - const int64_t p_w = pos[i2 + ne2 * 2]; - const int64_t p_e = pos[i2 + ne2 * 3]; - ggml_mrope_cache_init(p_t, p_h, p_w, p_e, sections, is_imrope, is_vision, freq_scale, freq_factors, - corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); - } - - for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads - if (ir++ < ir0) { - continue; - } - if (ir > ir1) { - break; - } - - T * src = (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - - switch (mode) { - case GGML_ROPE_TYPE_NORMAL: - rotate_pairs(n_dims, 1, cache, src, dst_data, 1); - break; - case GGML_ROPE_TYPE_NEOX: - case GGML_ROPE_TYPE_MROPE: - case GGML_ROPE_TYPE_IMROPE: - rotate_pairs(n_dims, n_dims / 2, cache, src, dst_data); - break; - case GGML_ROPE_TYPE_VISION: - rotate_pairs(ne0, n_dims, cache, src, dst_data); - break; - default: - GGML_ABORT("rope type not supported"); - } - - if (!is_vision) { - // fill the remain channels with data from src tensor - for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const T * const src = - (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + i0 * nb00); - T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0); - - dst_data[0] = src[0]; - dst_data[1] = src[1]; - } - } - } //attn-heads - } - } -} - -void ggml_compute_forward_rope(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_rope_flt(params, dst, true); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_rope_flt(params, dst, true); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rope_back - -void ggml_compute_forward_rope_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_rope_flt(params, dst, false); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_rope_flt(params, dst, false); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_conv_transpose_1d - -static void ggml_compute_forward_conv_transpose_1d_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i02 * nb02 + i01 * nb01); - ggml_fp16_t * dst_data = wdata + i01 * ne00 * ne02; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00 * ne02 + i02] = src[i00]; - } - } - } - } - - // permute source data (src1) from (L x Cin) to (Cin x L) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; - ggml_fp16_t * dst_data = wdata; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i11 * nb11); - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); - } - } - } - - // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - - // total rows in dst - const int nr = ne1; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - ggml_fp16_t * const wdata_src = wdata + nk; - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *) ((char *) dst->data + i1 * nb1); - ggml_fp16_t * wdata_kernel = wdata + i1 * ne02 * ne00; - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i10 * ne11; - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f16(ne02, &v, 0, (ggml_fp16_t *) wdata_src + i1n, 0, - (ggml_fp16_t *) wdata_kernel + i00 * ne02, 0, 1); - dst_data[i10 * s0 + i00] += v; - } - } - } -} - -static void ggml_compute_forward_conv_transpose_1d_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02; - - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) - { - float * const wdata = (float *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * const src = (float *) ((char *) src0->data + i02 * nb02 + i01 * nb01); - float * dst_data = wdata + i01 * ne00 * ne02; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00 * ne02 + i02] = src[i00]; - } - } - } - } - - // prepare source data (src1) - { - float * const wdata = (float *) params->wdata + nk; - float * dst_data = wdata; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i11 * nb11); - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne11 + i11] = src[i10]; - } - } - } - - // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - - // total rows in dst - const int nr = ne1; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * const wdata = (float *) params->wdata + 0; - float * const wdata_src = wdata + nk; - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *) ((char *) dst->data + i1 * nb1); - float * wdata_kernel = wdata + i1 * ne02 * ne00; - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i10 * ne11; - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f32(ne02, &v, 0, wdata_src + i1n, 0, wdata_kernel + i00 * ne02, 0, 1); - dst_data[i10 * s0 + i00] += v; - } - } - } -} - -void ggml_compute_forward_conv_transpose_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_conv_transpose_1d_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_im2col_f32 -// src0: kernel [OC, IC, KH, KW] -// src1: image [N, IC, IH, IW] -// dst: result [N, OH, OW, IC*KH*KW] -static void ggml_compute_forward_im2col_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne13 : ne12; - const int64_t IC = is_2D ? ne12 : ne11; - const int64_t IH = is_2D ? ne11 : 1; - const int64_t IW = ne10; - - const int64_t KH = is_2D ? ne01 : 1; - const int64_t KW = ne00; - - const int64_t OH = is_2D ? ne2 : 1; - const int64_t OW = ne1; - - int ofs0 = is_2D ? nb13 : nb12; - int ofs1 = is_2D ? nb12 : nb11; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - float * dst_data = wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - const float * const src_data = - (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] - - for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; - } else { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = (src_data[iih * IW + iiw]); - } - } - } - } - } - } - } - } -} - -// ggml_compute_forward_im2col_f16 -// src0: kernel [OC, IC, KH, KW] -// src1: image [N, IC, IH, IW] -// dst: result [N, OH, OW, IC*KH*KW] -static void ggml_compute_forward_im2col_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne13 : ne12; - const int64_t IC = is_2D ? ne12 : ne11; - const int64_t IH = is_2D ? ne11 : 1; - const int64_t IW = ne10; - - const int64_t KH = is_2D ? ne01 : 1; - const int64_t KW = ne00; - - const int64_t OH = is_2D ? ne2 : 1; - const int64_t OW = ne1; - - int ofs0 = is_2D ? nb13 : nb12; - int ofs1 = is_2D ? nb12 : nb11; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - ggml_fp16_t * dst_data = - wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - const float * const src_data = - (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] - - for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; - } else { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = - GGML_CPU_FP32_TO_FP16(src_data[iih * IW + iiw]); - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_im2col(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_im2col_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_im2col_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_im2col_back_f32 - -void ggml_compute_forward_im2col_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output - const ggml_tensor * src1 = dst->src[1]; // convolution kernel - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne3 : ne2; - const int64_t IC = is_2D ? ne2 : ne1; - const int64_t IH = is_2D ? ne1 : 1; - const int64_t IW = ne0; - - const int64_t KH = is_2D ? ne11 : 1; - const int64_t KW = ne10; - - const int64_t OH = is_2D ? ne02 : 1; - const int64_t OW = ne01; - - int ofs0 = is_2D ? nb3 : nb2; - int ofs1 = is_2D ? nb2 : nb1; - - GGML_ASSERT(nb0 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - for (int64_t iih = 0; iih < IH; iih++) { - for (int64_t iiw = 0; iiw < IW; iiw++) { - // micro kernel - float grad = 0.0f; - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - // For s0 > 1 some values were skipped over in the forward pass. - // These values have tmpw % s0 != 0 and need to be skipped in the backwards pass as well. - const int64_t tmpw = (iiw + p0 - ikw * d0); - if (tmpw % s0 != 0) { - continue; - } - const int64_t iow = tmpw / s0; - - // Equivalent logic as above except for s1. - int64_t ioh; - if (is_2D) { - const int64_t tmph = iih + p1 - ikh * d1; - - if (tmph % s1 != 0) { - continue; - } - - ioh = tmph / s1; - } else { - ioh = 0; - } - - if (iow < 0 || iow >= OW || ioh < 0 || ioh >= OH) { - continue; - } - - const float * const grad_in = - (const float *) src0->data + - (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - grad += grad_in[iic * (KH * KW) + ikh * KW + ikw]; - } - } - float * dst_data = (float *) ((char *) wdata + (in * ofs0 + iic * ofs1)); // [IH, IW] - dst_data[iih * IW + iiw] = grad; - } - } - } - } - } -} - -// ggml_compute_forward_im2col_3d_f16 -// src0: kernel [OC*IC, KD, KH, KW] -// src1: image [N*IC, ID, IH, IW] -// dst: result [N*OD, OH, OW, IC * KD * KH * KW] -static void ggml_compute_forward_im2col_3d_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; - const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; - const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; - const int32_t IC = ((const int32_t *) (dst->op_params))[9]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = ne13 / IC; - const int64_t ID = ne12; - const int64_t IH = ne11; - const int64_t IW = ne10; - - const int64_t OC = ne03 / IC; - GGML_UNUSED(OC); - const int64_t KD = ne02; - const int64_t KH = ne01; - const int64_t KW = ne00; - - const int64_t OD = ne3 / N; - const int64_t OH = ne2; - const int64_t OW = ne1; - const int64_t OH_OW = OH * OW; - const int64_t KD_KH_KW = KD * KH * KW; - const int64_t KH_KW = KH * KW; - const int64_t IC_KD_KH_KW = IC * KD * KH * KW; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iod = 0; iod < OD; iod++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - ggml_fp16_t * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * - IC_KD_KH_KW; // [IC, KD, KH, KW] - const float * const src_data = - (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] - - for (int64_t ikd = 0; ikd < KD; ikd++) { - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - const int64_t iid = iod * s2 + ikd * d2 - p2; - - if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || - iid < 0 || iid >= ID) { - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; - } else { - const float * const s = - (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + - iiw * nb10); // [ID, IH, IW] - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = - GGML_CPU_FP32_TO_FP16(*s); - } - } - } - } - } - } - } - } - } - } -} - -// ggml_compute_forward_im2col_3d_f32 -// src0: kernel [OC*IC, KD, KH, KW] -// src1: image [N*IC, ID, IH, IW] -// dst: result [N*OD, OH, OW, IC * KD * KH * KW] -static void ggml_compute_forward_im2col_3d_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; - const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; - const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; - const int32_t IC = ((const int32_t *) (dst->op_params))[9]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = ne13 / IC; - const int64_t ID = ne12; - const int64_t IH = ne11; - const int64_t IW = ne10; - - const int64_t OC = ne03 / IC; - GGML_UNUSED(OC); - const int64_t KD = ne02; - const int64_t KH = ne01; - const int64_t KW = ne00; - - const int64_t OD = ne3 / N; - const int64_t OH = ne2; - const int64_t OW = ne1; - - const int64_t OH_OW = OH * OW; - const int64_t KD_KH_KW = KD * KH * KW; - const int64_t KH_KW = KH * KW; - const int64_t IC_KD_KH_KW = IC * KD * KH * KW; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iod = 0; iod < OD; iod++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - float * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * - IC_KD_KH_KW; // [IC, KD, KH, KW] - const float * const src_data = - (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] - - for (int64_t ikd = 0; ikd < KD; ikd++) { - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - const int64_t iid = iod * s2 + ikd * d2 - p2; - - if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || - iid < 0 || iid >= ID) { - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; - } else { - const float * const s = - (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + - iiw * nb10); // [ID, IH, IW] - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = *s; - } - } - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_im2col_3d(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_im2col_3d_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_im2col_3d_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_call_mul_mat(ggml_type type, - const ggml_compute_params * params, - int64_t m, - int64_t n, - int64_t k, - void * a, - void * b, - float * c) { - const ggml_type_traits * traits = ggml_get_type_traits(type); - struct ggml_tensor src1 = {}; - src1.type = type; - src1.ne[0] = k; - src1.ne[1] = m; - src1.ne[2] = 1; - src1.ne[3] = 1; - src1.nb[0] = traits->type_size; - src1.nb[1] = k * traits->type_size; - src1.nb[2] = src1.nb[1]; - src1.nb[3] = src1.nb[2]; - src1.data = a; - - struct ggml_tensor src0 = {}; - src0.type = type; - src0.ne[0] = k; - src0.ne[1] = n; - src0.ne[2] = 1; - src0.ne[3] = 1; - src0.nb[0] = traits->type_size; - src0.nb[1] = k * traits->type_size; - src0.nb[2] = src0.nb[1]; - src0.nb[3] = src0.nb[2]; - src0.data = b; - - struct ggml_tensor dst = {}; - dst.ne[0] = n; - dst.ne[1] = m; - dst.ne[2] = 1; - dst.ne[3] = 1; - dst.nb[0] = sizeof(float); - dst.nb[1] = n * sizeof(float); - dst.nb[2] = dst.nb[1]; - dst.nb[3] = dst.nb[2]; - dst.data = c; - dst.src[0] = &src0; - dst.src[1] = &src1; - - ggml_compute_forward_mul_mat(params, &dst); -} - -static inline int64_t ggml_wrap_around(int64_t coord, int64_t size) { - return (coord + size) % size; // adding size avoids negative number weirdness -} - -// ggml_compute_forward_conv_2d - -static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, - const ggml_tensor * kernel, // [KW, KH, IC, OC] - const ggml_tensor * src, // [W, H, C, N] - ggml_tensor * dst, // [OW, OH, OC, N] - ggml_type kernel_type) { - GGML_ASSERT(ggml_is_contiguous(kernel)); - GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); - GGML_ASSERT(kernel->type == kernel_type); - - const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); - - const int32_t stride_x = dst->op_params[0]; - const int32_t stride_y = dst->op_params[1]; - const int32_t pad_x = dst->op_params[2]; - const int32_t pad_y = dst->op_params[3]; - const int32_t dilation_x = dst->op_params[4]; - const int32_t dilation_y = dst->op_params[5]; - - const int64_t c_in = src->ne[2]; - const int64_t c_out = kernel->ne[3]; - GGML_ASSERT(c_in == kernel->ne[2]); - - const int64_t src_w = src->ne[0]; - const int64_t src_h = src->ne[1]; - const int64_t knl_w = kernel->ne[0]; - const int64_t knl_h = kernel->ne[1]; - const int64_t dst_w = dst->ne[0]; - const int64_t dst_h = dst->ne[1]; - - const float * src_data = (float *) src->data; - void * knl_data = kernel->data; - float * dst_data = (float *) dst->data; - - const int64_t knl_n = knl_w * knl_h * c_in; - const int64_t patch_total = dst->ne[3] * dst_w * dst_h; - - const int64_t space_per_patch = knl_n * traits->type_size + c_out * sizeof(float); - const int64_t batch_size = params->wsize / space_per_patch; - const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; - const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; - - GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); - - void * tmp = params->wdata; - - for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { - const int64_t patch_start_batch = batch_i * patches_per_batch; - const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); - const int64_t patch_n = patch_end_batch - patch_start_batch; - - const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth; - const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; - const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); - - //im2col for a patch - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t src_x = (p / dst_w) % dst_h; - const int64_t src_y = p % dst_w; - - const float * src_base = (const float *) ((const char *) src_data + batch_n * src->nb[3]); - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; - - for (int64_t ic = 0; ic < c_in; ++ic) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; - const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; - - int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; - - float src_val; - if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { - src_val = 0.0f; - } else { - const float * src_ptr = (const float *) ((const char *) src_base + sx * src->nb[0] + - sy * src->nb[1] + ic * src->nb[2]); - src_val = *src_ptr; - } - - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); - } - } - } - } - } // patches handled by this thread - - ggml_barrier(params->threadpool); - - float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n * traits->type_size); - - GGML_ASSERT(gemm_output + patch_n * c_out <= (float *) tmp + params->wsize); - - // GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out] - ggml_call_mul_mat(kernel_type, params, patch_n, c_out, knl_n, tmp, knl_data, gemm_output); - - ggml_barrier(params->threadpool); - - //permute back [OC, N, OH, OW] to [N, OC, OH, OW] - const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth; - const int64_t permute_start = params->ith * permute_per_thread; - const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n); - - for (int64_t i = permute_start; i < permute_end; ++i) { - const int64_t p = patch_start_batch + i; - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t dst_y = (p / dst_w) % dst_h; - const int64_t dst_x = p % dst_w; - - for (int64_t oc = 0; oc < c_out; ++oc) { - const float value = gemm_output[i * c_out + oc]; - float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + - oc * dst->nb[2] + batch_n * dst->nb[3]); - *dst_ptr = value; - } - } - } -} - -void ggml_compute_forward_conv_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - ggml_compute_forward_conv_2d_impl(params, src0, src1, dst, src0->type); -} - -// ggml_compute_forward_conv_3d - -static void ggml_compute_forward_conv_3d_impl(const ggml_compute_params * params, - const ggml_tensor * kernel, - const ggml_tensor * src, - ggml_tensor * dst, - ggml_type kernel_type) { - GGML_ASSERT(ggml_is_contiguous(kernel)); - GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); - GGML_ASSERT(kernel->type == kernel_type); - - const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); - - const int32_t s0 = dst->op_params[0]; - const int32_t s1 = dst->op_params[1]; - const int32_t s2 = dst->op_params[2]; - const int32_t p0 = dst->op_params[3]; - const int32_t p1 = dst->op_params[4]; - const int32_t p2 = dst->op_params[5]; - const int32_t d0 = dst->op_params[6]; - const int32_t d1 = dst->op_params[7]; - const int32_t d2 = dst->op_params[8]; - const int32_t c = dst->op_params[9]; - const int32_t n = dst->op_params[10]; - const int32_t oc = dst->op_params[11]; - - const int64_t src_w = src->ne[0]; - const int64_t src_h = src->ne[1]; - const int64_t src_d = src->ne[2]; - const int64_t knl_w = kernel->ne[0]; - const int64_t knl_h = kernel->ne[1]; - const int64_t knl_d = kernel->ne[2]; - const int64_t dst_w = dst->ne[0]; - const int64_t dst_h = dst->ne[1]; - const int64_t dst_d = dst->ne[2]; - - const float * src_data = (float *) src->data; - void * knl_data = kernel->data; - float * dst_data = (float *) dst->data; - - const int64_t knl_n_per_channel = knl_w * knl_h * knl_d; - const int64_t knl_n_total = knl_n_per_channel * c; - const int64_t patch_total = n * dst_w * dst_h * dst_d; - - const int64_t space_per_patch = knl_n_total * traits->type_size + oc * sizeof(float); - const int64_t batch_size = params->wsize / space_per_patch; - const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; - const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; - - GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); - - void * tmp = params->wdata; - - for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { - const int64_t patch_start_batch = batch_i * patches_per_batch; - const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); - const int64_t patch_n_in_batch = patch_end_batch - patch_start_batch; - - const int64_t patch_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; - const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; - const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); - - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); - const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); - const int64_t batch_idx = p / (dst_w * dst_h * dst_d); - const int64_t dst_z = p_in_batch / (dst_w * dst_h); - const int64_t dst_y = p_in_depth / dst_w; - const int64_t dst_x = p_in_depth % dst_w; - - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n_total * traits->type_size; - - for (int64_t ic = 0; ic < c; ++ic) { - for (int64_t kz = 0; kz < knl_d; ++kz) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sz = dst_z * s2 + kz * d2 - p2; - const int64_t sy = dst_y * s1 + ky * d1 - p1; - const int64_t sx = dst_x * s0 + kx * d0 - p0; - - int64_t dst_idx = ic * knl_n_per_channel + kz * (knl_h * knl_w) + ky * knl_w + kx; - - float src_val; - if (sz < 0 || sz >= src_d || sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { - src_val = 0.0f; - } else { - const int64_t cn_idx = batch_idx * c + ic; - const float * src_ptr = - (const float *) ((const char *) src_data + sx * src->nb[0] + sy * src->nb[1] + - sz * src->nb[2] + cn_idx * src->nb[3]); - src_val = *src_ptr; - } - - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); - } - } - } - } - } - } - - ggml_barrier(params->threadpool); - - float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n_total * traits->type_size); - ggml_call_mul_mat(kernel_type, params, patch_n_in_batch, oc, knl_n_total, tmp, knl_data, gemm_output); - - ggml_barrier(params->threadpool); - - const int64_t permute_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; - const int64_t permute_start = params->ith * permute_per_thread; - const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n_in_batch); - - for (int64_t i = permute_start; i < permute_end; ++i) { - const int64_t p = patch_start_batch + i; - const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); - const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); - const int64_t batch_idx = p / (dst_w * dst_h * dst_d); - const int64_t dst_z = p_in_batch / (dst_w * dst_h); - const int64_t dst_y = p_in_depth / dst_w; - const int64_t dst_x = p_in_depth % dst_w; - - for (int64_t ioc = 0; ioc < oc; ++ioc) { - const float value = gemm_output[i * oc + ioc]; - const int64_t ocn_idx = batch_idx * oc + ioc; - float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + - dst_z * dst->nb[2] + ocn_idx * dst->nb[3]); - *dst_ptr = value; - } - } - } -} - -void ggml_compute_forward_conv_3d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - ggml_compute_forward_conv_3d_impl(params, src0, src1, dst, src0->type); -} - -// ggml_compute_forward_conv_transpose_2d - -void ggml_compute_forward_conv_transpose_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02 * ne03; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i03 * nb03 + i02 * nb02); - ggml_fp16_t * dst_data = wdata + i02 * ne01 * ne00 * ne03; - for (int64_t i01 = 0; i01 < ne01; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i01 * ne00 * ne03 + i00 * ne03 + i03] = src[i01 * ne00 + i00]; - } - } - } - } - } - - // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; - for (int i12 = 0; i12 < ne12; i12++) { - for (int i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i12 * nb12 + i11 * nb11); - ggml_fp16_t * dst_data = wdata + i11 * ne10 * ne12; - for (int i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); - } - } - } - } - - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t stride = ggml_get_op_params_i32(dst, 0); - - // total patches in dst - const int np = ne2; - - // patches per thread - const int dp = (np + nth - 1) / nth; - - // patch range for this thread - const int ip0 = dp * ith; - const int ip1 = MIN(ip0 + dp, np); - - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - ggml_fp16_t * const wdata_src = wdata + nk; - - for (int i2 = ip0; i2 < ip1; i2++) { // Cout - float * dst_data = (float *) ((char *) dst->data + i2 * nb2); - ggml_fp16_t * wdata_kernel = wdata + i2 * ne01 * ne00 * ne03; - for (int i11 = 0; i11 < ne11; i11++) { - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i11 * ne10 * ne12 + i10 * ne12; - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f16(ne03, &v, 0, wdata_src + i1n, 0, wdata_kernel + i01 * ne00 * ne03 + i00 * ne03, - 0, 1); - dst_data[(i11 * stride + i01) * ne0 + i10 * stride + i00] += v; - } - } - } - } - } -} - -// ggml_compute_forward_conv_2d_dw - -struct ggml_conv_2d_dw_params { - int64_t channels; - int64_t batch; - int64_t src_w; - int64_t src_h; - int64_t dst_w; - int64_t dst_h; - int64_t knl_w; - int64_t knl_h; - int stride_x; - int stride_y; - int pad_x; - int pad_y; - int dilation_x; - int dilation_y; -}; - -static void ggml_compute_forward_conv_2d_dw_cwhn(const ggml_compute_params * params, - const ggml_tensor * src, - const ggml_tensor * kernel, - ggml_tensor * dst, - const ggml_conv_2d_dw_params & p) { - const int64_t c = p.channels; - const float * knl_data = (const float *) kernel->data; - - const int64_t rows_total = p.dst_h * p.batch; - const int64_t rows_per_thread = (rows_total + params->nth - 1) / params->nth; - const int64_t row_start = params->ith * rows_per_thread; - const int64_t row_end = MIN(row_start + rows_per_thread, rows_total); - -#ifdef GGML_SIMD -# if defined(__ARM_FEATURE_SVE) - const int64_t pkg_size = svcntw(); -# else - const int64_t pkg_size = GGML_F32_EPR; -# endif - const int64_t pkg_count = c / pkg_size; - const int64_t c_pkg_end = pkg_count * pkg_size; -#else - const int64_t c_pkg_end = 0; -#endif - - for (int64_t row = row_start; row < row_end; ++row) { - const int64_t dst_y = row % p.dst_h; - const float * src_data = (const float *) src->data + (row / p.dst_h) * p.src_w * p.src_h * c; - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float * dst_data = (float *) dst->data + (row * p.dst_w + dst_x) * c; - const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; - const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; - -#ifdef GGML_SIMD - // Vectorized loop - for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); - GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); - sum = GGML_F32_VEC_FMA(sum, k, s); - } - } - GGML_F32_VEC_STORE(dst_data + c_i, sum); - } -#endif - // Scalar loop - for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] * - src_data[(src_y * p.src_w + src_x) * c + c_i]; - } - } - dst_data[c_i] = sum; - } - } - } -} - -static void ggml_compute_forward_conv_2d_dw_whcn(const ggml_compute_params * params, - const ggml_tensor * src, - const ggml_tensor * kernel, - ggml_tensor * dst, - const ggml_conv_2d_dw_params & p) { - const int64_t n = p.channels * p.batch; - const int64_t per_thread = (n + params->nth - 1) / params->nth; - const int64_t start = params->ith * per_thread; - const int64_t end = MIN(start + per_thread, n); - - for (int64_t i = start; i < end; ++i) { - const float * knl_data = (const float *) kernel->data + (i % p.channels) * p.knl_w * p.knl_h; - const float * src_data = (const float *) src->data + i * p.src_w * p.src_h; - float * dst_data = (float *) dst->data + i * p.dst_w * p.dst_h; - - for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - sum += knl_data[knl_y * p.knl_w + knl_x] * src_data[src_y * p.src_w + src_x]; - } - } - dst_data[dst_y * p.dst_w + dst_x] = sum; - } - } - } -} - -void ggml_compute_forward_conv_2d_dw(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * kernel = dst->src[0]; - const ggml_tensor * src = dst->src[1]; - ggml_conv_2d_dw_params p; - p.channels = src->ne[2]; - p.batch = src->ne[3]; - p.src_w = src->ne[0]; - p.src_h = src->ne[1]; - p.dst_w = dst->ne[0]; - p.dst_h = dst->ne[1]; - p.knl_w = kernel->ne[0]; - p.knl_h = kernel->ne[1]; - p.stride_x = dst->op_params[0]; - p.stride_y = dst->op_params[1]; - p.pad_x = dst->op_params[2]; - p.pad_y = dst->op_params[3]; - p.dilation_x = dst->op_params[4]; - p.dilation_y = dst->op_params[5]; - - GGML_ASSERT(kernel->ne[3] == p.channels); - GGML_ASSERT(dst->ne[3] == p.batch); - - if (ggml_is_contiguous(src)) { - ggml_compute_forward_conv_2d_dw_whcn(params, src, kernel, dst, p); - } else if (ggml_is_contiguous_channels(src)) { - // kernel should also have channels most contiguous in memory - GGML_ASSERT(kernel->nb[0] >= kernel->nb[2] && kernel->nb[1] >= kernel->nb[0]); - ggml_compute_forward_conv_2d_dw_cwhn(params, src, kernel, dst, p); - } else { - GGML_ABORT("non-contiguous memory layout not supported"); - } -} - -// ggml_compute_forward_pool_1d_sk_p0 - -static void ggml_compute_forward_pool_1d_sk_p0(const ggml_compute_params * params, - const ggml_op_pool op, - const int k, - ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - - assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const char * cdata = (const char *) src->data; - const char * const data_end = cdata + ggml_nbytes(src); - float * drow = (float *) dst->data; - - const int64_t rs = dst->ne[0]; - - while (cdata < data_end) { - const void * srow = (const void *) cdata; - int j = 0; - for (int64_t i = 0; i < rs; ++i) { - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] = 0; - break; - case GGML_OP_POOL_MAX: - drow[i] = -FLT_MAX; - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - for (int ki = 0; ki < k; ++ki) { - const float srow_j = (src->type == GGML_TYPE_F32) ? - ((const float *) srow)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] += srow_j; - break; - case GGML_OP_POOL_MAX: - if (srow_j > drow[i]) { - drow[i] = srow_j; - } - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - ++j; - } - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] /= k; - break; - case GGML_OP_POOL_MAX: - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - - cdata += src->nb[1]; - drow += rs; - } -} - -// ggml_compute_forward_pool_1d - -void ggml_compute_forward_pool_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int s0 = opts[2]; - const int p0 = opts[3]; - GGML_ASSERT(p0 == 0); // padding not supported - GGML_ASSERT(k0 == s0); // only s = k supported - - ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst); -} - -// ggml_compute_forward_pool_2d - -void ggml_compute_forward_pool_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - - assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; - const char * cdata = (const char *) src->data; - const char * const data_end = cdata + ggml_nbytes(src); - - const int64_t px = dst->ne[0]; - const int64_t py = dst->ne[1]; - const int64_t pa = px * py; - - float * dplane = (float *) dst->data; - - const int ka = k0 * k1; - const int offset0 = -p0; - const int offset1 = -p1; - - while (cdata < data_end) { - for (int oy = 0; oy < py; ++oy) { - float * const drow = dplane + oy * px; - for (int ox = 0; ox < px; ++ox) { - float * const out = drow + ox; - switch (op) { - case GGML_OP_POOL_AVG: - *out = 0; - break; - case GGML_OP_POOL_MAX: - *out = -FLT_MAX; - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - - const int ix = offset0 + ox * s0; - const int iy = offset1 + oy * s1; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= src->ne[1]) { - continue; - } - const void * srow = (const void *) (cdata + src->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= src->ne[0]) { - continue; - } - const float srow_j = (src->type == GGML_TYPE_F32) ? - ((const float *) srow)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); - switch (op) { - case GGML_OP_POOL_AVG: - *out += srow_j; - break; - case GGML_OP_POOL_MAX: - if (srow_j > *out) { - *out = srow_j; - } - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - } - switch (op) { - case GGML_OP_POOL_AVG: - *out /= ka; - break; - case GGML_OP_POOL_MAX: - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - } - - cdata += src->nb[2]; - dplane += pa; - } -} - -// ggml_compute_forward_pool_2d_back - -void ggml_compute_forward_pool_2d_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - const ggml_tensor * dstf = dst->src[1]; // forward tensor of dst - - assert(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; - - char * cdata = (char *) dst->data; - const char * cdataf = (const char *) dstf->data; - const char * const data_end = cdata + ggml_nbytes(dst); - - GGML_ASSERT(params->ith == 0); - memset(cdata, 0, ggml_nbytes(dst)); - - const int64_t px = src->ne[0]; - const int64_t py = src->ne[1]; - const int64_t pa = px * py; - - const float * splane = (const float *) src->data; - - const int ka = k0 * k1; - const int offset0 = -p0; - const int offset1 = -p1; - - while (cdata < data_end) { - for (int oy = 0; oy < py; ++oy) { - const float * const srow = splane + oy * px; - for (int ox = 0; ox < px; ++ox) { - const float grad0 = srow[ox]; - - const int ix = offset0 + ox * s0; - const int iy = offset1 + oy * s1; - - if (op == GGML_OP_POOL_MAX) { - float maxval = -FLT_MAX; - int kxmax = -1; - int kymax = -1; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= dst->ne[1]) { - continue; - } - const void * drowf = (const void *) (cdataf + dst->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= dst->ne[0]) { - continue; - } - - const float val = dst->type == GGML_TYPE_F32 ? - ((const float *) drowf)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); - if (val <= maxval) { - continue; - } - - maxval = val; - kxmax = kx; - kymax = ky; - } - } - - if (kxmax == -1 || kymax == -1) { - continue; - } - - void * drow = (void *) (cdata + dst->nb[1] * (iy + kymax)); - const int j = ix + kxmax; - if (dst->type == GGML_TYPE_F32) { - ((float *) drow)[j] += grad0; - } else { - ((ggml_fp16_t *) drow)[j] = - GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); - } - } else if (op == GGML_OP_POOL_AVG) { - const float grad = grad0 / ka; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= dst->ne[1]) { - continue; - } - void * drow = (void *) (cdata + dst->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= dst->ne[0]) { - continue; - } - - if (dst->type == GGML_TYPE_F32) { - ((float *) drow)[j] += grad; - } else { - ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); - } - } - } - } else { - GGML_ASSERT(false); - } - } - } - - cdata += dst->nb[2]; - cdataf += dst->nb[2]; - splane += pa; - } -} - -// ggml_compute_forward_upscale - -static void ggml_compute_forward_upscale_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float sf0 = (float) ne0 / src0->ne[0]; - float sf1 = (float) ne1 / src0->ne[1]; - float sf2 = (float) ne2 / src0->ne[2]; - float sf3 = (float) ne3 / src0->ne[3]; - float pixel_offset = 0.5f; - - const int32_t mode_flags = ggml_get_op_params_i32(dst, 0); - const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); - - if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { - pixel_offset = 0.0f; - sf0 = ne0 > 1 && ne00 > 1 ? (float) (ne0 - 1) / (ne00 - 1) : sf0; - sf1 = ne1 > 1 && ne01 > 1 ? (float) (ne1 - 1) / (ne01 - 1) : sf1; - } - - if (mode == GGML_SCALE_MODE_NEAREST) { - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const int64_t i01 = i1 / sf1; - for (int64_t i0 = 0; i0 < ne0; i0++) { - const int64_t i00 = i0 / sf0; - - const float * x = - (float *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } - } else if (mode == GGML_SCALE_MODE_BILINEAR) { - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; - int64_t y0 = (int64_t) floorf(y); - int64_t y1 = y0 + 1; - - y0 = std::max(int64_t(0), std::min(y0, ne01 - 1)); - y1 = std::max(int64_t(0), std::min(y1, ne01 - 1)); - - float dy = y - (float) y0; - dy = std::max(0.0f, std::min(dy, 1.0f)); - - for (int64_t i0 = 0; i0 < ne0; i0++) { - const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; - int64_t x0 = (int64_t) floorf(x); - int64_t x1 = x0 + 1; - - x0 = std::max(int64_t(0), std::min(x0, ne00 - 1)); - x1 = std::max(int64_t(0), std::min(x1, ne00 - 1)); - - float dx = x - (float) x0; - dx = std::max(0.0f, std::min(dx, 1.0f)); - - // fetch the four surrounding pixel values and interpolate - const float a = *(const float *) ((const char *) src0->data + x0 * nb00 + y0 * nb01 + - i02 * nb02 + i03 * nb03); - const float b = *(const float *) ((const char *) src0->data + x1 * nb00 + y0 * nb01 + - i02 * nb02 + i03 * nb03); - const float c = *(const float *) ((const char *) src0->data + x0 * nb00 + y1 * nb01 + - i02 * nb02 + i03 * nb03); - const float d = *(const float *) ((const char *) src0->data + x1 * nb00 + y1 * nb01 + - i02 * nb02 + i03 * nb03); - - const float val = a * (1 - dx) * (1 - dy) + b * dx * (1 - dy) + c * (1 - dx) * dy + d * dx * dy; - - float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - *y_dst = val; - } - } - } - } - } else if (mode == GGML_SCALE_MODE_BICUBIC) { - // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm - const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) - auto weight1 = [a](float x) { - return ((a + 2) * x - (a + 3)) * x * x + 1; - }; - auto weight2 = [a](float x) { - return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; - }; - auto bicubic = [=](float p0, float p1, float p2, float p3, float x) { - const float w0 = weight2(x + 1); - const float w1 = weight1(x + 0); - const float w2 = weight1(1 - x); - const float w3 = weight2(2 - x); - return p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3; - }; - - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; - const int64_t y0 = (int64_t) floorf(y); - const float dy = y - (float) y0; - - for (int64_t i0 = 0; i0 < ne0; i0++) { - const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; - const int64_t x0 = (int64_t) floorf(x); - const float dx = x - (float) x0; - - auto p = [=](int64_t x_off, int64_t y_off) -> float { - int64_t i00 = std::max(int64_t(0), std::min(x0 + x_off, ne00 - 1)); - int64_t i01 = std::max(int64_t(0), std::min(y0 + y_off, ne01 - 1)); - return *(const float *) ((const char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + - i03 * nb03); - }; - - const float val = bicubic(bicubic(p(-1, -1), p(0, -1), p(1, -1), p(2, -1), dx), - bicubic(p(-1, 0), p(0, 0), p(1, 0), p(2, 0), dx), - bicubic(p(-1, 1), p(0, 1), p(1, 1), p(2, 1), dx), - bicubic(p(-1, 2), p(0, 2), p(1, 2), p(2, 2), dx), dy); - - float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - *y_dst = val; - } - } - } - } - } else { - GGML_ABORT("unsupported upscale mode"); - } -} - -void ggml_compute_forward_upscale(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_upscale_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_pad - -static void ggml_compute_forward_pad_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float * dst_ptr = (float *) dst->data; - const int32_t lp0 = ggml_get_op_params_i32(dst, 0); - const int32_t rp0 = ggml_get_op_params_i32(dst, 1); - const int32_t lp1 = ggml_get_op_params_i32(dst, 2); - const int32_t rp1 = ggml_get_op_params_i32(dst, 3); - const int32_t lp2 = ggml_get_op_params_i32(dst, 4); - const int32_t rp2 = ggml_get_op_params_i32(dst, 5); - const int32_t lp3 = ggml_get_op_params_i32(dst, 6); - const int32_t rp3 = ggml_get_op_params_i32(dst, 7); - const bool circular = (bool) ggml_get_op_params_i32(dst, 8); - - // TODO: optimize - - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - for (int64_t i3 = 0; i3 < ne3; ++i3) { - // circular means wrap around on a torus, so x and y loop around - if (circular) { - const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; - const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); - const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); - const int64_t src_i2 = ggml_wrap_around(i2 - lp2, ne02); - const int64_t src_i3 = ggml_wrap_around(i3 - lp3, ne03); - - const int64_t src_idx = src_i3 * nb03 + src_i2 * nb02 + src_i1 * nb01 + src_i0 * nb00; - - const float * src_ptr = (const float *) ((char *) src0->data + src_idx); - dst_ptr[dst_idx] = *src_ptr; - } else { - const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; - if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && - (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t src_idx = - (i3 - lp3) * nb03 + (i2 - lp2) * nb02 + (i1 - lp1) * nb01 + (i0 - lp0) * nb00; - const float * src_ptr = (const float *) ((char *) src0->data + src_idx); - dst_ptr[dst_idx] = *src_ptr; - } else { - dst_ptr[dst_idx] = 0; - } - } - } - } - } - } -} - -void ggml_compute_forward_pad(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_pad_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_pad_reflect_1d - -void ggml_compute_forward_pad_reflect_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - const int32_t * opts = (const int32_t *) dst->op_params; - const int p0 = opts[0]; - const int p1 = opts[1]; - - GGML_TENSOR_UNARY_OP_LOCALS - - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = 0; i2 < ne2; i2++) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - float * left = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + p0 * nb0); - float * right = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + (ne0 - p1 - 1) * nb0); - - ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01)); - - for (int i0 = 1; i0 <= p0; i0++) { - left[-i0] = left[i0]; - } - for (int i0 = 1; i0 <= p1; i0++) { - right[i0] = right[-i0]; - } - } - } - } -} - -// ggml_compute_forward_roll - -static int64_t ggml_wrap_index(int64_t i, int64_t ne) { - if (i < 0) { - return i + ne; - } else if (i >= ne) { - return i - ne; - } - return i; -} - -static void ggml_compute_forward_roll_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src_data = (const float *) src0->data; - float * dst_data = (float *) dst->data; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int s0 = ggml_get_op_params_i32(dst, 0); - const int s1 = ggml_get_op_params_i32(dst, 1); - const int s2 = ggml_get_op_params_i32(dst, 2); - const int s3 = ggml_get_op_params_i32(dst, 3); - - const int64_t total = ne1 * ne2 * ne3; - const int64_t per_thread = (total + params->nth) / params->nth; - const int64_t start = params->ith * per_thread; - const int64_t end = std::min(start + per_thread, total); - - for (int64_t i = start; i < end; ++i) { - const int64_t i1 = i % ne1; - const int64_t i2 = (i / ne1) % ne2; - const int64_t i3 = i / (ne2 * ne1); - float * dst_row = dst_data + (i3 * nb3 + i2 * nb2 + i1 * nb1) / sizeof(float); - - const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); - const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); - const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); - const float * src_row = src_data + (i03 * nb03 + i02 * nb02 + i01 * nb01) / sizeof(float); - - const int64_t s = ggml_wrap_index(-s0, ne00); - const int64_t n = ne00 - s; - ggml_vec_cpy_f32(n, dst_row, src_row + s); - ggml_vec_cpy_f32(s, dst_row + n, src_row); - } -} - -void ggml_compute_forward_roll(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_roll_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_arange - -static void ggml_compute_forward_arange_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const float start = ggml_get_op_params_f32(dst, 0); - const float stop = ggml_get_op_params_f32(dst, 1); - const float step = ggml_get_op_params_f32(dst, 2); - - const int64_t steps = (int64_t) ceilf((stop - start) / step); - - GGML_ASSERT(ggml_nelements(dst) == steps); - - for (int64_t i = ith; i < steps; i += nth) { - float value = start + step * i; - ((float *) dst->data)[i] = value; - } -} - -void ggml_compute_forward_arange(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_arange_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_timestep_embedding_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int dim = ggml_get_op_params_i32(dst, 0); - const int max_period = ggml_get_op_params_i32(dst, 1); - - int half = dim / 2; - - for (int64_t i = 0; i < ne00; i++) { - float * embed_data = (float *) ((char *) dst->data + i * nb1); - for (int64_t j = ith; j < half; j += nth) { - float timestep = ((float *) src0->data)[i]; - float freq = (float) expf(-logf(max_period) * j / half); - float arg = timestep * freq; - embed_data[j] = cosf(arg); - embed_data[j + half] = sinf(arg); - } - if (dim % 2 != 0 && ith == 0) { - embed_data[2 * half] = 0.f; - } - } -} - -void ggml_compute_forward_timestep_embedding(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_timestep_embedding_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_argsort - -template struct cmp_argsort { - const float * data; - - bool operator()(int32_t a, int32_t b) const { - if constexpr (order == GGML_SORT_ORDER_ASC) { - return data[a] < data[b]; - } else { - return data[a] > data[b]; - } - } -}; - -static void ggml_compute_forward_argsort_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nr = ggml_nrows(src0); - - ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0); - - for (int64_t i = ith; i < nr; i += nth) { - const float * src_data = (float *) ((char *) src0->data + i * nb01); - - int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); - - for (int64_t j = 0; j < ne0; j++) { - dst_data[j] = j; - } - - switch (order) { - case GGML_SORT_ORDER_ASC: - std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); - break; - - case GGML_SORT_ORDER_DESC: - std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); - break; - - default: - GGML_ABORT("invalid sort order"); - } - } -} - -void ggml_compute_forward_argsort(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_argsort_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_top_k - -struct cmp_top_k { - const float * data; - - bool operator()(int32_t a, int32_t b) const { return data[a] > data[b]; } -}; - -static void ggml_compute_forward_top_k_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nr = ggml_nrows(src0); - - const int top_k = ne0; - - int32_t * tmp = (int32_t *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - for (int64_t i = ith; i < nr; i += nth) { - const float * src_data = (float *) ((char *) src0->data + i * nb01); - - for (int64_t j = 0; j < ne00; j++) { - tmp[j] = j; - } - - std::partial_sort(tmp, tmp + top_k, tmp + ne00, cmp_top_k{ src_data }); - - int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); - - std::copy(tmp, tmp + top_k, dst_data); - - // emphasize that the order is not important - if (top_k > 1) { - std::swap(dst_data[0], dst_data[1]); - } - } -} - -void ggml_compute_forward_top_k(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_top_k_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_flash_attn_ext - -static void ggml_compute_forward_flash_attn_ext_f16_one_chunk(const ggml_compute_params * params, - ggml_tensor * dst, - int ir0, - int ir1) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - const ggml_tensor * mask = dst->src[3]; - const ggml_tensor * sinks = dst->src[4]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int64_t DK = nek0; - const int64_t DV = nev0; - const int64_t N = neq1; - - GGML_ASSERT(ne0 == DV); - GGML_ASSERT(ne2 == N); - - // input tensor rows must be contiguous - GGML_ASSERT(nbq0 == ggml_type_size(q->type)); - GGML_ASSERT(nbk0 == ggml_type_size(k->type)); - GGML_ASSERT(nbv0 == ggml_type_size(v->type)); - - GGML_ASSERT(neq0 == DK); - GGML_ASSERT(nek0 == DK); - GGML_ASSERT(nev0 == DV); - - GGML_ASSERT(neq1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - // broadcast factors - const int64_t rk2 = neq2 / nek2; - const int64_t rk3 = neq3 / nek3; - - const int64_t rv2 = neq2 / nev2; - const int64_t rv3 = neq3 / nev3; - - // parallelize by q rows using ggml_vec_dot_f32 - - float scale = 1.0f; - float max_bias = 0.0f; - float logit_softcap = 0.0f; - - memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); - memcpy(&logit_softcap, (float *) dst->op_params + 2, sizeof(float)); - - if (logit_softcap != 0) { - scale /= logit_softcap; - } - - const uint32_t n_head = neq2; - const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); - - const float m0 = powf(2.0f, -(max_bias) / n_head_log2); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - - const ggml_type k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; - const ggml_from_float_t q_to_vec_dot = ggml_get_type_traits_cpu(k_vec_dot_type)->from_float; - const ggml_vec_dot_t kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; - const ggml_to_float_t v_to_float = ggml_get_type_traits(v->type)->to_float; - - GGML_ASSERT((q_to_vec_dot) && "fattn: unsupported K-type"); - GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float) && "fattn: unsupported V-type"); - - int ith = params->ith; - - // loop over n_batch and n_head - for (int ir = ir0; ir < ir1; ++ir) { - // q indices - const int iq3 = ir / (neq2 * neq1); - const int iq2 = (ir - iq3 * neq2 * neq1) / neq1; - const int iq1 = (ir - iq3 * neq2 * neq1 - iq2 * neq1); - - const uint32_t h = iq2; // head index - const float slope = - (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; - - float S = 0.0f; // sum - float M = -INFINITY; // maximum KQ value - - float * VKQ32 = - (float *) params->wdata + ith * (1 * DK + 2 * DV + CACHE_LINE_SIZE_F32); // FP32 VKQ accumulator - float * V32 = (VKQ32 + 1 * DV); // (temporary) FP32 V buffer - ggml_fp16_t * VKQ16 = (ggml_fp16_t *) (VKQ32 + 1 * DV); // (temporary) FP16 VKQ accumulator - ggml_fp16_t * Q_q = (ggml_fp16_t *) (VKQ32 + 2 * DV); // (temporary) buffer for Q converted to quantized/FP16 - - if (v->type == GGML_TYPE_F16) { - memset(VKQ16, 0, DV * sizeof(ggml_fp16_t)); - } else { - memset(VKQ32, 0, DV * sizeof(float)); - } - - const ggml_fp16_t * mp = - mask ? (ggml_fp16_t *) ((char *) mask->data + iq1 * mask->nb[1] + (iq2 % mask->ne[2]) * mask->nb[2] + - (iq3 % mask->ne[3]) * mask->nb[3]) : - NULL; - - // k indices - const int ik3 = iq3 / rk3; - const int ik2 = iq2 / rk2; - - // v indices - const int iv3 = iq3 / rv3; - const int iv2 = iq2 / rv2; - - const float * pq = (const float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)); - q_to_vec_dot(pq, Q_q, DK); - - // online softmax / attention - // loop over n_kv and n_head_kv - // ref: https://arxiv.org/pdf/2112.05682.pdf - for (int64_t ic = 0; ic < nek1; ++ic) { - const float mv = mp ? slope * GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; - if (mv == -INFINITY) { - continue; - } - - float s; // KQ value - - const char * k_data = (const char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3); - kq_vec_dot(DK, &s, 0, k_data, 0, Q_q, 0, 1); - - s = s * scale; // scale KQ value - - if (logit_softcap != 0.0f) { - s = logit_softcap * tanhf(s); - } - - s += mv; // apply mask - - const float Mold = M; - - float ms = 1.0f; // upon new higher max val, scale VKQ and KQ sum with this value - float vs = 1.0f; // post-softmax KQ value, expf(s - M) - - const char * v_data = ((const char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)); - - if (v->type == GGML_TYPE_F16) { - if (s > M) { - // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f - M = s; - ms = expf(Mold - M); - - // V = V*expf(Mold - M) - ggml_vec_scale_f16(DV, VKQ16, ms); - } else { - // no new maximum, ms == 1.0f, vs != 1.0f - vs = expf(s - M); - } - - // V += v*expf(s - M) - ggml_vec_mad_f16(DV, VKQ16, (const ggml_fp16_t *) v_data, vs); - } else { - if (s > M) { - // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f - M = s; - ms = expf(Mold - M); - - // V = V*expf(Mold - M) - ggml_vec_scale_f32(DV, VKQ32, ms); - } else { - // no new maximum, ms == 1.0f, vs != 1.0f - vs = expf(s - M); - } - - // V += v*expf(s - M) - if (v_to_float) { - v_to_float(v_data, V32, DV); - ggml_vec_mad_f32(DV, VKQ32, V32, vs); - } else { - // V is F32 - ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); - } - } - - S = S * ms + vs; // scale and increment sum with partial sum - } - - if (v->type == GGML_TYPE_F16) { - for (int64_t d = 0; d < DV; ++d) { - VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); - } - } - - // sinks - if (sinks) { - const float s = ((float *) ((char *) sinks->data))[h]; - - float ms = 1.0f; - float vs = 1.0f; - - if (s > M) { - ms = expf(M - s); - ggml_vec_scale_f32(DV, VKQ32, ms); - } else { - vs = expf(s - M); - } - - S = S * ms + vs; - } - - // V /= S - const float S_inv = S == 0.0f ? 0.0f : 1.0f / S; - ggml_vec_scale_f32(DV, VKQ32, S_inv); - - // dst indices - const int i1 = iq1; - const int i2 = iq2; - const int i3 = iq3; - - // original - //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float)); - - // permute(0, 2, 1, 3) - memcpy((char *) dst->data + (i3 * ne2 * ne1 + i2 + i1 * ne1) * nb1, VKQ32, nb1); - } -} - -static void ggml_compute_forward_flash_attn_ext_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int64_t DK = nek0; - const int64_t DV = nev0; - const int64_t N = neq1; - - GGML_ASSERT(ne0 == DV); - GGML_ASSERT(ne2 == N); - - // input tensor rows must be contiguous - GGML_ASSERT(nbq0 == ggml_type_size(q->type)); - GGML_ASSERT(nbk0 == ggml_type_size(k->type)); - GGML_ASSERT(nbv0 == ggml_type_size(v->type)); - - GGML_ASSERT(neq0 == DK); - GGML_ASSERT(nek0 == DK); - GGML_ASSERT(nev0 == DV); - - GGML_ASSERT(neq1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - // parallelize by q rows using ggml_vec_dot_f32 - - // total rows in q - const int64_t nr = neq1 * neq2 * neq3; - - // rows per thread - const int ith = params->ith; - const int nth = params->nth; - - // disable for NUMA - const bool disable_chunking = ggml_is_numa(); - - // 4x chunks per thread - int nth_scaled = nth * 4; - int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; - int64_t nchunk = (nr + chunk_size - 1) / chunk_size; - - if (nth == 1 || nchunk < nth || disable_chunking) { - nchunk = nth; - } - - if (ith == 0) { - // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - ggml_threadpool_chunk_set(params->threadpool, nth); - } - - ggml_barrier(params->threadpool); - - // The number of elements in each chunk - const int64_t dr = (nr + nchunk - 1) / nchunk; - - // The first chunk comes from our thread_id, the rest will get auto-assigned. - int current_chunk = ith; - - while (current_chunk < nchunk) { - const int64_t ir0 = dr * current_chunk; - const int64_t ir1 = MIN(ir0 + dr, nr); - - ggml_compute_forward_flash_attn_ext_f16_one_chunk(params, dst, ir0, ir1); - - current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); - } -} - -void ggml_compute_forward_flash_attn_ext(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->op_params[3]) { - case GGML_PREC_DEFAULT: - case GGML_PREC_F32: - { - // uses F32 accumulators - ggml_compute_forward_flash_attn_ext_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_flash_attn_back - -static void ggml_compute_forward_flash_attn_back_f32(const ggml_compute_params * params, - const bool masked, - ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - const ggml_tensor * d = dst->src[3]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ned, d, ne) - GGML_TENSOR_LOCALS(size_t, nbd, d, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t D = neq0; - const int64_t N = neq1; - const int64_t P = nek1 - N; - const int64_t M = P + N; - - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); - const int mxDM = MAX(D, Mup); - - // GGML_ASSERT(ne0 == D); - // GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); - - GGML_ASSERT(nbq0 == sizeof(float)); - GGML_ASSERT(nbk0 == sizeof(float)); - GGML_ASSERT(nbv0 == sizeof(float)); - - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); - GGML_ASSERT(ned0 == D); - - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); - GGML_ASSERT(ned1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - if (ith == 0) { - memset(dst->data, 0, nb0 * ne0 * ne1 * ne2 * ne3); - } - ggml_barrier(params->threadpool); - - const int64_t elem_q = ggml_nelements(q); - const int64_t elem_k = ggml_nelements(k); - - ggml_type result_type = dst->type; - GGML_ASSERT(ggml_blck_size(result_type) == 1); - const size_t tsize = ggml_type_size(result_type); - - const size_t offs_q = 0; - const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); - const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); - - void * grad_q = (char *) dst->data; - void * grad_k = (char *) dst->data + offs_k; - void * grad_v = (char *) dst->data + offs_v; - - const size_t nbgq1 = nb0 * neq0; - const size_t nbgq2 = nb0 * neq0 * neq1; - const size_t nbgq3 = nb0 * neq0 * neq1 * neq2; - - const size_t nbgk1 = nb0 * nek0; - const size_t nbgk2 = nb0 * nek0 * nek1; - const size_t nbgk3 = nb0 * nek0 * nek1 * neq2; - - const size_t nbgv1 = nb0 * nev0; - const size_t nbgv2 = nb0 * nev0 * nev1; - const size_t nbgv3 = nb0 * nev0 * nev1 * neq2; - - // parallelize by k rows using ggml_vec_dot_f32 - - // total rows in k - const int nr = nek2 * nek3; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const float scale = 1.0f / sqrtf(D); - - //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); - - // how often k2 (and v2) is repeated in q2 - int nrep = neq2 / nek2; - - for (int ir = ir0; ir < ir1; ++ir) { - // q indices - const int ik3 = ir / (nek2); - const int ik2 = ir - ik3 * nek2; - - const int iq3 = ik3; - const int id3 = ik3; - const int iv3 = ik3; - const int iv2 = ik2; - - for (int irep = 0; irep < nrep; ++irep) { - const int iq2 = ik2 + irep * nek2; - const int id2 = iq2; - - // (ik2 + irep*nek2) % nek2 == ik2 - for (int iq1 = 0; iq1 < neq1; ++iq1) { - const int id1 = iq1; - - // not sure about CACHE_LINE_SIZE_F32.. - // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset? - float * S = - (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 0 * (mxDM + CACHE_LINE_SIZE_F32); - float * SM = - (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 1 * (mxDM + CACHE_LINE_SIZE_F32); - - for (int i = M; i < Mup; ++i) { - S[i] = -INFINITY; - } - - const int64_t masked_begin = masked ? (P + iq1 + 1) : M; - for (int64_t ic = 0; ic < masked_begin; ++ic) { - // k indices - const int ik1 = ic; - - // S indices - const int i1 = ik1; - - ggml_vec_dot_f32(neq0, S + i1, 0, - (float *) ((char *) k->data + (ik1 * nbk1 + ik2 * nbk2 + ik3 * nbk3)), 0, - (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), 0, 1); - } - - // scale - ggml_vec_scale_f32(masked_begin, S, scale); - - for (int64_t i = masked_begin; i < M; i++) { - S[i] = -INFINITY; - } - - // softmax - // exclude known -INF S[..] values from max and loop - // dont forget to set their SM values to zero - { - float max = -INFINITY; - ggml_vec_max_f32(masked_begin, &max, S); - - ggml_float sum = 0.0; - { -#ifdef GGML_SOFT_MAX_ACCELERATE - max = -max; - vDSP_vsadd(SM, 1, &max, SM, 1, Mup); - vvexpf(SM, SM, &Mup); - ggml_vec_sum_f32(Mup, &sum, SM); -#else - sum = ggml_vec_soft_max_f32(Mup, SM, S, max); -#endif - } - - assert(sum > 0.0); - - sum = 1.0 / sum; - ggml_vec_scale_f32(masked_begin, SM, sum); - } - - // step-by-step explanation - { - // forward-process shape grads from backward process - // parallel_for ik2,ik3: - // for irep: - // iq2 = ik2 + irep*nek2 - // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur] - // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur] - // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur] - // for iq1: - // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur - // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur - // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4 - // S0 = -Inf [D,1,1,1] - // ~S1[i] = dot(kcur[:D,i], qcur) - // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale - // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P) - // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur - // ~S5[i] = dot(vcur[:,i], S4) - // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3] - // ~dst[i,iq1,iq2,iq3] = S5[i] ^ - // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3] - // dst backward-/ grad[dst] = d - // - // output gradients with their dependencies: - // - // grad[kcur] = grad[S1].T @ qcur - // grad[S1] = diag_mask_zero(grad[S3], P) * scale - // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // grad[S4] = grad[S5] @ vcur - // grad[S4] = d[:D,id1,id2,id3] @ vcur - // grad[qcur] = grad[S1] @ kcur - // grad[vcur] = grad[S5].T @ S4 - // grad[vcur] = d[:D,id1,id2,id3].T @ S4 - // - // in post-order: - // - // S1 = qcur @ kcur.T - // S2 = S1 * scale - // S3 = diag_mask_inf(S2, P) - // S4 = softmax(S3) - // grad[S4] = d[:D,id1,id2,id3] @ vcur - // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // grad[S1] = diag_mask_zero(grad[S3], P) * scale - // grad[qcur] = grad[S1] @ kcur - // grad[kcur] = grad[S1].T @ qcur - // grad[vcur] = d[:D,id1,id2,id3].T @ S4 - // - // using less variables (SM=S4): - // - // S = diag_mask_inf(qcur @ kcur.T * scale, P) - // SM = softmax(S) - // S = d[:D,iq1,iq2,iq3] @ vcur - // dot_SM_gradSM = dot(SM, S) - // S = SM * (S - dot(SM, S)) - // S = diag_mask_zero(S, P) * scale - // - // grad[q][:D,iq1,iq2,iq3] += S @ kcur - // grad[k][:D,:M,ik2,ik3] += S.T @ qcur - // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM - } - - // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] - // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] - // for ic: - // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3] - // exclude known future zero S[..] values from operation - ggml_vec_set_f32(masked_begin, S, 0); - for (int64_t ic = 0; ic < D; ++ic) { - ggml_vec_mad_f32( - masked_begin, S, (float *) ((char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)), - *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); - } - - // S = SM * (S - dot(SM, S)) - float dot_SM_gradSM = 0; - ggml_vec_dot_f32(masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1); - ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); - ggml_vec_mul_f32(masked_begin, S, S, SM); - - // S = diag_mask_zero(S, P) * scale - // already done by above ggml_vec_set_f32 - - // exclude known zero S[..] values from operation - ggml_vec_scale_f32(masked_begin, S, scale); - - // S shape [M,1] - // SM shape [M,1] - // kcur shape [D,M] - // qcur shape [D,1] - // vcur shape [M,D] - - // grad[q][:D,iq1,iq2,iq3] += S @ kcur - // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M] - // for ic: - // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3] - // exclude known zero S[..] values from loop - for (int64_t ic = 0; ic < masked_begin; ++ic) { - ggml_vec_mad_f32(D, (float *) ((char *) grad_q + (iq1 * nbgq1 + iq2 * nbgq2 + iq3 * nbgq3)), - (float *) ((char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3)), S[ic]); - } - - // grad[k][:D,:M,iq2,iq3] += S.T @ qcur - // for ic: - // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0] - // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0] - // exclude known zero S[..] values from loop - for (int64_t ic = 0; ic < masked_begin; ++ic) { - ggml_vec_mad_f32(D, (float *) ((char *) grad_k + (ic * nbgk1 + ik2 * nbgk2 + ik3 * nbgk3)), - (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), S[ic]); - } - - // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM - // for ic: - // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M] - // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M] - // exclude known zero SM[..] values from mad - for (int64_t ic = 0; ic < D; ++ic) { - ggml_vec_mad_f32( - masked_begin, (float *) ((char *) grad_v + (ic * nbgv1 + iv2 * nbgv2 + iv3 * nbgv3)), SM, - *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); - } - } - } - } -} - -void ggml_compute_forward_flash_attn_back(const ggml_compute_params * params, const bool masked, ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - - switch (q->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_flash_attn_back_f32(params, masked, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_ssm_conv - -static void ggml_compute_forward_ssm_conv_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // conv_x - const ggml_tensor * src1 = dst->src[1]; // conv1d.weight - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; // d_conv - const int ncs = src0->ne[0]; // d_conv - 1 + n_t - const int nr = src0->ne[1]; // d_inner - const int n_t = dst->ne[1]; // tokens per sequence - const int n_s = dst->ne[2]; // number of sequences in the batch - - GGML_ASSERT(dst->ne[0] == nr); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - const int ir = ir1 - ir0; - - for (int i3 = 0; i3 < n_s; ++i3) { - for (int i2 = 0; i2 < n_t; ++i2) { - // {d_conv - 1 + n_t, d_inner, n_seqs} - // sliding window - const float * s = (const float *) ((const char *) src0->data + ir0 * (src0->nb[1]) + i2 * (src0->nb[0]) + - i3 * (src0->nb[2])); // {d_conv, d_inner, n_s} - const float * c = (const float *) ((const char *) src1->data + ir0 * (src1->nb[1])); // {d_conv, d_inner} - float * x = (float *) ((char *) dst->data + ir0 * (dst->nb[0]) + i2 * (dst->nb[1]) + - i3 * (dst->nb[2])); // {d_inner, n_t, n_s} - - // TODO: transpose the output for smaller strides for big batches? - // d_inner - for (int i1 = 0; i1 < ir; ++i1) { - // rowwise dot product - // NOTE: not using ggml_vec_dot_f32, because its sum is in double precision - float sumf = 0.0f; - - // d_conv - for (int i0 = 0; i0 < nc; ++i0) { - sumf += s[i0 + i1 * ncs] * c[i0 + i1 * nc]; - } - x[i1] = sumf; - } - } - } -} - -void ggml_compute_forward_ssm_conv(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->src[0]->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_ssm_conv_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_ssm_scan - -static void ggml_compute_forward_ssm_scan_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // s {d_state, dim, n_head, n_seqs+} - const ggml_tensor * src1 = dst->src[1]; // x {dim, n_head, n_seq_tokens, n_seqs} - const ggml_tensor * src2 = dst->src[2]; // dt {n_head, n_seq_tokens, n_seqs} - const ggml_tensor * src3 = dst->src[3]; // A {d_state, n_head} or {1, n_head} - const ggml_tensor * src4 = dst->src[4]; // B {d_state, n_group, n_seq_tokens, n_seqs} - const ggml_tensor * src5 = dst->src[5]; // C {d_state, n_group, n_seq_tokens, n_seqs} - const ggml_tensor * src6 = dst->src[6]; // ids {n_seqs} - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nc = src0->ne[0]; // d_state - const int64_t nr = src0->ne[1]; // dim - const int64_t nh = src1->ne[1]; // n_head - const int64_t ng = src4->ne[1]; - const int64_t nt = src1->ne[2]; // number of tokens per sequence - const int64_t ns = src1->ne[3]; // number of sequences in the batch - - // can't use ggml_nbytes because src1 is not necessarily contiguous - const int64_t s_off = ggml_nelements(src1) * ggml_element_size(src1); - - GGML_ASSERT(ggml_nelements(src1) + nc * nr * nh * ns == ggml_nelements(dst)); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - GGML_ASSERT(src2->nb[0] == sizeof(float)); - GGML_ASSERT(src3->nb[0] == sizeof(float)); - GGML_ASSERT(src4->nb[0] == sizeof(float)); - GGML_ASSERT(src5->nb[0] == sizeof(float)); - GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); - GGML_ASSERT(nh % ng == 0); - - // heads per thread - const int dh = (nh + nth - 1) / nth; - - // head range for this thread - const int ih0 = dh * ith; - const int ih1 = MIN(ih0 + dh, nh); - - const int32_t * ids = (const int32_t *) src6->data; - - for (int i3 = 0; i3 < ns; ++i3) { - const float * s0 = - (const float *) ((const char *) src0->data + ids[i3] * (src0->nb[3])); // {d_state, dim, nh, ns} - float * s = (float *) ((char *) dst->data + i3 * (src0->nb[3]) + s_off); // {d_state, dim, nh, ns} - - for (int i2 = 0; i2 < nt; ++i2) { - const float * x = (const float *) ((const char *) src1->data + i2 * (src1->nb[2]) + - i3 * (src1->nb[3])); // {dim, nh, nt, ns} - const float * dt = - (const float *) ((const char *) src2->data + i2 * (src2->nb[1]) + i3 * (src2->nb[2])); // {nh, nt, ns} - const float * A = (const float *) ((const char *) src3->data); // {d_state, nh} or {1, nh} - const float * B = (const float *) ((const char *) src4->data + i2 * (src4->nb[2]) + - i3 * (src4->nb[3])); // {d_state, ng, nt, ns} - const float * C = (const float *) ((const char *) src5->data + i2 * (src5->nb[2]) + - i3 * (src5->nb[3])); // {d_state, ng, nt, ns} - float * y = (float *) ((char *) dst->data + i2 * (nh * nr * sizeof(float)) + - i3 * (nt * nh * nr * sizeof(float))); // {dim, nh, nt, ns} - - if (src3->ne[0] == 1) { - // Mamba-2 has a scalar decay factor per head; dA can be outside the state-wise loop - - // n_head - for (int h = ih0; h < ih1; ++h) { - // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 - const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); - const float dA = expf(dt_soft_plus * A[h]); - const int g = h / (nh / ng); // repeat_interleave - - // dim - for (int i1 = 0; i1 < nr; ++i1) { - const int ii = i1 + h * nr; - const float x_dt = x[ii] * dt_soft_plus; - float sumf = 0.0f; -#if defined(GGML_SIMD) -# if defined(__ARM_FEATURE_SVE) - const int ggml_f32_epr = svcntw(); - const int ggml_f32_step = 1 * ggml_f32_epr; - - const int np = (nc & ~(ggml_f32_step - 1)); - - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - - GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); - GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); - - for (int i = 0; i < np; i += ggml_f32_step) { - // TODO: maybe unroll more? - for (int j = 0; j < 1; j++) { - GGML_F32_VEC t0 = GGML_F32_VEC_LOAD(s0 + i + j * ggml_f32_epr + ii * nc); - GGML_F32_VEC t1 = GGML_F32_VEC_LOAD(B + i + j * ggml_f32_epr + g * nc); - GGML_F32_VEC t2 = GGML_F32_VEC_LOAD(C + i + j * ggml_f32_epr + g * nc); - - t0 = GGML_F32_VEC_MUL(t0, adA); - t1 = GGML_F32_VEC_MUL(t1, axdt); - - t0 = GGML_F32_VEC_ADD(t0, t1); - - sum = GGML_F32_VEC_FMA(sum, t0, t2); - - GGML_F32_VEC_STORE(s + i + j * ggml_f32_epr + ii * nc, t0); - } - } - - sumf = GGML_F32xt_REDUCE_ONE(sum); -# elif defined(__riscv_v_intrinsic) - // todo: RVV implementation - const int np = 0; -# else - const int np = (nc & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - - GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); - GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); - - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - GGML_F32_VEC az[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(s0 + i + j * GGML_F32_EPR + ii * nc); - ay[j] = GGML_F32_VEC_LOAD(B + i + j * GGML_F32_EPR + g * nc); - az[j] = GGML_F32_VEC_LOAD(C + i + j * GGML_F32_EPR + g * nc); - - ax[j] = GGML_F32_VEC_MUL(ax[j], adA); - ay[j] = GGML_F32_VEC_MUL(ay[j], axdt); - - ax[j] = GGML_F32_VEC_ADD(ax[j], ay[j]); - - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], az[j]); - - GGML_F32_VEC_STORE(s + i + j * GGML_F32_EPR + ii * nc, ax[j]); - } - } - - // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); -# endif -#else - const int np = 0; -#endif - // d_state - for (int i0 = np; i0 < nc; ++i0) { - const int i = i0 + ii * nc; - const int ig = i0 + g * nc; - // state = prev_state * dA + dB * x - const float state = (s0[i] * dA) + (B[ig] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[ig]; - s[i] = state; - } - y[ii] = sumf; - } - } - } else { - // Mamba-1 has an element-wise decay factor for the states - - // n_head - for (int h = ih0; h < ih1; ++h) { - // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 - const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); - const int g = h / (nh / ng); // repeat_interleave - - // dim - for (int i1 = 0; i1 < nr; ++i1) { - const int ii = i1 + h * nr; - const float x_dt = x[ii] * dt_soft_plus; -#if defined(__ARM_FEATURE_SVE) - svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); - svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); - svfloat32_t r1_vector = GGML_F32_VEC_ZERO; - - // d_state - // TODO: what happens when (d_state % svcntw()) != 0? - for (int64_t k = 0; k < nc; k += svcntw()) { - svfloat32_t vA = GGML_F32_VEC_LOAD(&A[h * nc + k]); - svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k + g * nc]); - svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k + g * nc]); - svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[ii * nc + k]); - - svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); - t1 = exp_ps_sve(svptrue_b32(), t1); - svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); - - vs0 = GGML_F32_VEC_FMA(t2, vs0, t1); - r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); - - GGML_F32_VEC_STORE(&s[ii * nc + k], vs0); - } - y[ii] = GGML_F32xt_REDUCE_ONE(r1_vector); -#else - float sumf = 0.0f; - // NOTE: can't really use GGML_SIMD here because d_state is usually 16 - // and also because expf is used within the loop. - // d_state - for (int i0 = 0; i0 < nc; ++i0) { - const int i = i0 + ii * nc; - const int ig = i0 + g * nc; - // state = prev_state * dA + dB * x - const float state = (s0[i] * expf(dt_soft_plus * A[i0 + h * nc])) + (B[ig] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[ig]; - s[i] = state; - } - y[ii] = sumf; -#endif - } - } - } - // use the output as the source when it's not the first token-wise iteration - s0 = s; - } - } -} - -void ggml_compute_forward_ssm_scan(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->src[0]->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_ssm_scan_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_win_part - -static void ggml_compute_forward_win_part_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - - const int32_t nep0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t nep1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t w = ((const int32_t *) (dst->op_params))[2]; - - assert(ne00 == ne0); - assert(ne3 == nep0 * nep1); - - // TODO: optimize / multi-thread - for (int py = 0; py < nep1; ++py) { - for (int px = 0; px < nep0; ++px) { - const int64_t i3 = py * nep0 + px; - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - const int64_t i02 = py * w + i2; - const int64_t i01 = px * w + i1; - const int64_t i00 = i0; - - const int64_t i = i3 * ne2 * ne1 * ne0 + i2 * ne1 * ne0 + i1 * ne0 + i0; - const int64_t j = i02 * ne01 * ne00 + i01 * ne00 + i00; - - if (py * w + i2 >= ne02 || px * w + i1 >= ne01) { - ((float *) dst->data)[i] = 0.0f; - } else { - ((float *) dst->data)[i] = ((float *) src0->data)[j]; - } - } - } - } - } - } -} - -void ggml_compute_forward_win_part(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_win_part_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_win_unpart - -static void ggml_compute_forward_win_unpart_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - - const int32_t w = ((const int32_t *) (dst->op_params))[0]; - - // padding - const int px = (w - ne1 % w) % w; - //const int py = (w - ne2%w)%w; - - const int npx = (px + ne1) / w; - //const int npy = (py + ne2)/w; - - assert(ne0 == ne00); - - // TODO: optimize / multi-thread - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - const int ip2 = i2 / w; - const int ip1 = i1 / w; - - const int64_t i02 = i2 % w; - const int64_t i01 = i1 % w; - const int64_t i00 = i0; - - const int64_t i = (ip2 * npx + ip1) * ne02 * ne01 * ne00 + i02 * ne01 * ne00 + i01 * ne00 + i00; - const int64_t j = i2 * ne1 * ne0 + i1 * ne0 + i0; - - ((float *) dst->data)[j] = ((float *) src0->data)[i]; - } - } - } -} - -void ggml_compute_forward_win_unpart(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_win_unpart_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -//gmml_compute_forward_unary - -void ggml_compute_forward_unary(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_unary_op op = ggml_get_unary_op(dst); - - switch (op) { - case GGML_UNARY_OP_ABS: - { - ggml_compute_forward_abs(params, dst); - } - break; - case GGML_UNARY_OP_SGN: - { - ggml_compute_forward_sgn(params, dst); - } - break; - case GGML_UNARY_OP_NEG: - { - ggml_compute_forward_neg(params, dst); - } - break; - case GGML_UNARY_OP_STEP: - { - ggml_compute_forward_step(params, dst); - } - break; - case GGML_UNARY_OP_TANH: - { - ggml_compute_forward_tanh(params, dst); - } - break; - case GGML_UNARY_OP_ELU: - { - ggml_compute_forward_elu(params, dst); - } - break; - case GGML_UNARY_OP_RELU: - { - ggml_compute_forward_relu(params, dst); - } - break; - case GGML_UNARY_OP_SIGMOID: - { - ggml_compute_forward_sigmoid(params, dst); - } - break; - case GGML_UNARY_OP_GELU: - { - ggml_compute_forward_gelu(params, dst); - } - break; - case GGML_UNARY_OP_GELU_ERF: - { - ggml_compute_forward_gelu_erf(params, dst); - } - break; - case GGML_UNARY_OP_GELU_QUICK: - { - ggml_compute_forward_gelu_quick(params, dst); - } - break; - case GGML_UNARY_OP_SILU: - { - ggml_compute_forward_silu(params, dst); - } - break; - case GGML_UNARY_OP_HARDSWISH: - { - ggml_compute_forward_hardswish(params, dst); - } - break; - case GGML_UNARY_OP_HARDSIGMOID: - { - ggml_compute_forward_hardsigmoid(params, dst); - } - break; - case GGML_UNARY_OP_EXP: - { - ggml_compute_forward_exp(params, dst); - } - break; - case GGML_UNARY_OP_FLOOR: - { - ggml_compute_forward_floor(params, dst); - } - break; - case GGML_UNARY_OP_CEIL: - { - ggml_compute_forward_ceil(params, dst); - } - break; - case GGML_UNARY_OP_ROUND: - { - ggml_compute_forward_round(params, dst); - } - break; - case GGML_UNARY_OP_TRUNC: - { - ggml_compute_forward_trunc(params, dst); - } - break; - case GGML_UNARY_OP_XIELU: - { - ggml_compute_forward_xielu(params, dst); - } - break; - case GGML_UNARY_OP_EXPM1: - { - ggml_compute_forward_expm1(params, dst); - } - break; - case GGML_UNARY_OP_SOFTPLUS: - { - ggml_compute_forward_softplus(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -//ggml_compute_forward_glu - -void ggml_compute_forward_glu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_glu_op op = ggml_get_glu_op(dst); - - switch (op) { - case GGML_GLU_OP_REGLU: - { - ggml_compute_forward_reglu(params, dst); - } - break; - case GGML_GLU_OP_GEGLU: - { - ggml_compute_forward_geglu(params, dst); - } - break; - case GGML_GLU_OP_SWIGLU: - { - ggml_compute_forward_swiglu(params, dst); - } - break; - case GGML_GLU_OP_SWIGLU_OAI: - { - ggml_compute_forward_swiglu_oai(params, dst); - } - break; - case GGML_GLU_OP_GEGLU_ERF: - { - ggml_compute_forward_geglu_erf(params, dst); - } - break; - case GGML_GLU_OP_GEGLU_QUICK: - { - ggml_compute_forward_geglu_quick(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_get_rel_pos - -static void ggml_compute_forward_get_rel_pos_f16(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322 - - GGML_TENSOR_UNARY_OP_LOCALS - - const int64_t w = ne1; - - ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data; - ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data; - - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - const int64_t pos = (w - i1 - 1) + i2; - for (int64_t i0 = 0; i0 < ne0; ++i0) { - dst_data[i2 * ne1 * ne0 + i1 * ne0 + i0] = src0_data[pos * ne00 + i0]; - } - } - } -} - -void ggml_compute_forward_get_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - { - ggml_compute_forward_get_rel_pos_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add_rel_pos - -static void ggml_compute_forward_add_rel_pos_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; - if (!inplace) { - if (params->ith == 0) { - memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359 - - float * src1_data = (float *) src1->data; - float * src2_data = (float *) src2->data; - float * dst_data = (float *) dst->data; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - const int64_t ne12 = src1->ne[2]; - const int64_t ne13 = src1->ne[3]; - - const int ith = params->ith; - const int nth = params->nth; - - // total patches in dst - const int np = ne13; - - // patches per thread - const int dp = (np + nth - 1) / nth; - - // patch range for this thread - const int ip0 = dp * ith; - const int ip1 = MIN(ip0 + dp, np); - - for (int64_t i13 = ip0; i13 < ip1; ++i13) { - for (int64_t i12 = 0; i12 < ne12; ++i12) { - for (int64_t i11 = 0; i11 < ne11; ++i11) { - const int64_t jp1 = i13 * ne12 * ne11 * ne10 + i12 * ne11 * ne10 + i11 * ne10; - for (int64_t i10 = 0; i10 < ne10; ++i10) { - const int64_t jp0 = jp1 + i10; - const float src1_e = src1_data[jp0]; - const float src2_e = src2_data[jp0]; - - const int64_t jdh = jp0 * ne10; - const int64_t jdw = jdh - (ne10 - 1) * i10; - - for (int64_t j = 0; j < ne10; ++j) { - dst_data[jdh + j] += src2_e; - dst_data[jdw + j * ne10] += src1_e; - } - } - } - } - } -} - -void ggml_compute_forward_add_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add_rel_pos_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rwkv_wkv6 - -static void ggml_compute_forward_rwkv_wkv6_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[5]->ne[1]; - const int64_t head_size = C / HEADS; - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * k = (float *) dst->src[0]->data; - float * v = (float *) dst->src[1]->data; - float * r = (float *) dst->src[2]->data; - float * time_faaaa = (float *) dst->src[3]->data; - float * time_decay = (float *) dst->src[4]->data; - - size_t t_stride = HEADS * head_size; // Same to C - - size_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - size_t h_stride_2d = head_size * head_size; - - if (ith == 0) { - memset(dst_data, 0, T * C * sizeof(float)); - } - ggml_barrier(params->threadpool); - -#if defined(__AVX__) && !defined(__AVX512F__) -# define GGML_F32X GGML_F32x8 -# define GGML_F32X_SET1 GGML_F32x8_SET1 -# define GGML_F32X_LOAD GGML_F32x8_LOAD -# define GGML_F32X_STORE GGML_F32x8_STORE -# define GGML_F32X_MUL GGML_F32x8_MUL -# define GGML_F32X_FMA GGML_F32x8_FMA -# define WKV_VECTOR_SIZE 8 -#elif defined(__AVX512F__) -# define GGML_F32X GGML_F32x16 -# define GGML_F32X_SET1 GGML_F32x16_SET1 -# define GGML_F32X_LOAD GGML_F32x16_LOAD -# define GGML_F32X_STORE GGML_F32x16_STORE -# define GGML_F32X_MUL GGML_F32x16_MUL -# define GGML_F32X_FMA GGML_F32x16_FMA -# define WKV_VECTOR_SIZE 16 -#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) -# define GGML_F32X GGML_F32xt -# define GGML_F32X_SET1 GGML_F32xt_SET1 -# define GGML_F32X_LOAD GGML_F32xt_LOAD -# define GGML_F32X_STORE GGML_F32xt_STORE -# define GGML_F32X_MUL GGML_F32xt_MUL -# define GGML_F32X_FMA GGML_F32xt_FMA -# define WKV_VECTOR_SIZE 8 -#elif defined(__ARM_NEON) && defined(__aarch64__) -# define GGML_F32X GGML_F32x4 -# define GGML_F32X_SET1 GGML_F32x4_SET1 -# define GGML_F32X_LOAD GGML_F32x4_LOAD -# define GGML_F32X_STORE GGML_F32x4_STORE -# define GGML_F32X_MUL GGML_F32x4_MUL -# define GGML_F32X_FMA GGML_F32x4_FMA -# define WKV_VECTOR_SIZE 4 -#endif - -#ifdef WKV_VECTOR_SIZE - int wkv_vector_size; -# if defined(__ARM_FEATURE_SVE) - wkv_vector_size = svcntw(); -# else - wkv_vector_size = WKV_VECTOR_SIZE; -# endif - const int64_t vec_count = head_size / wkv_vector_size; - - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_i_offset = h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float r_val = r[t_h_i_offset]; - float time_faaaa_val = time_faaaa[h_i_offset]; - float time_decay_val = time_decay[t_h_i_offset]; - - // Broadcast scalar values to vectors - GGML_F32X k_vec = GGML_F32X_SET1(k_val); - GGML_F32X r_vec = GGML_F32X_SET1(r_val); - GGML_F32X time_faaaa_vec = GGML_F32X_SET1(time_faaaa_val); - GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); - - for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * wkv_vector_size; - size_t t_h_j_offset = t_h_offset + base_j; - size_t h_2d_i_j_offset = h_2d_i_offset + base_j; - - // Load x elements at once - GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); - GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); - GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); - - // Compute kv = v * k - GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); - - // Compute temp = kv * time_faaaa + prev_state - GGML_F32X temp_vec = GGML_F32X_FMA(prev_state_vec, kv_vec, time_faaaa_vec); - - // Update dst: dst += temp * r - dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, r_vec); - GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); - - // Update state: state = prev_state * time_decay + kv - GGML_F32X new_state_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, time_decay_vec); - GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], new_state_vec); - } - - // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val * time_faaaa_val + prev_state_val; - dst_data[t_h_j_offset] += temp_val * r_val; - state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; - } - } - } - } - -#else - // basically fused operations: - // dst = r @ (time_faaaa * (k @ v) + state), - // state = time_decay * state + (k @ v), - // recursive through each token - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_i_offset = h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float r_val = r[t_h_i_offset]; - float time_faaaa_val = time_faaaa[h_i_offset]; - // RWKV v6: different time_decay for each token. - float time_decay_val = time_decay[t_h_i_offset]; - - for (int64_t j = 0; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val * time_faaaa_val + prev_state_val; - dst_data[t_h_j_offset] += temp_val * r_val; - state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; - } - } - } - } -#endif -} - -void ggml_compute_forward_rwkv_wkv6(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rwkv_wkv6_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gla - -static void ggml_compute_forward_gla_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[4]->ne[1]; - const int64_t head_size = C / HEADS; - const float scale = ggml_get_op_params_f32(dst, 0); - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * k = (float *) dst->src[0]->data; - float * v = (float *) dst->src[1]->data; - float * q = (float *) dst->src[2]->data; - float * g = (float *) dst->src[3]->data; - - size_t t_stride = HEADS * head_size; // Same to C - - size_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - size_t h_stride_2d = head_size * head_size; - - if (ith == 0) { - memset(dst_data, 0, T * C * sizeof(float)); - } - ggml_barrier(params->threadpool); - -#if defined(__AVX__) && !defined(__AVX512F__) -# define GGML_F32X GGML_F32x8 -# define GGML_F32X_SET1 GGML_F32x8_SET1 -# define GGML_F32X_LOAD GGML_F32x8_LOAD -# define GGML_F32X_STORE GGML_F32x8_STORE -# define GGML_F32X_MUL GGML_F32x8_MUL -# define GGML_F32X_FMA GGML_F32x8_FMA -# define GLA_VECTOR_SIZE 8 -#elif defined(__AVX512F__) -# define GGML_F32X GGML_F32x16 -# define GGML_F32X_SET1 GGML_F32x16_SET1 -# define GGML_F32X_LOAD GGML_F32x16_LOAD -# define GGML_F32X_STORE GGML_F32x16_STORE -# define GGML_F32X_MUL GGML_F32x16_MUL -# define GGML_F32X_FMA GGML_F32x16_FMA -# define GLA_VECTOR_SIZE 16 -#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) -# define GGML_F32X GGML_F32xt -# define GGML_F32X_SET1 GGML_F32xt_SET1 -# define GGML_F32X_LOAD GGML_F32xt_LOAD -# define GGML_F32X_STORE GGML_F32xt_STORE -# define GGML_F32X_MUL GGML_F32xt_MUL -# define GGML_F32X_FMA GGML_F32xt_FMA -# define GLA_VECTOR_SIZE 8 -#elif defined(__ARM_NEON) && defined(__aarch64__) -# define GGML_F32X GGML_F32x4 -# define GGML_F32X_SET1 GGML_F32x4_SET1 -# define GGML_F32X_LOAD GGML_F32x4_LOAD -# define GGML_F32X_STORE GGML_F32x4_STORE -# define GGML_F32X_MUL GGML_F32x4_MUL -# define GGML_F32X_FMA GGML_F32x4_FMA -# define GLA_VECTOR_SIZE 4 -#endif - -#ifdef GLA_VECTOR_SIZE - int gla_vector_size; -# if defined(__ARM_FEATURE_SVE) - gla_vector_size = svcntw(); -# else - gla_vector_size = GLA_VECTOR_SIZE; -# endif - const int64_t vec_count = head_size / gla_vector_size; - - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float q_val = q[t_h_i_offset] * scale; - float g_val = g[t_h_i_offset]; - - // Broadcast scalar values to vectors - GGML_F32X k_vec = GGML_F32X_SET1(k_val); - GGML_F32X q_vec = GGML_F32X_SET1(q_val); - GGML_F32X g_vec = GGML_F32X_SET1(g_val); - - for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * gla_vector_size; - size_t t_h_j_offset = t_h_offset + base_j; - size_t h_2d_i_j_offset = h_2d_i_offset + base_j; - - // Load x elements at once - GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); - GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); - GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); - - // Compute kv = v * k - GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); - - // Compute temp = prev_state * g + kv - GGML_F32X temp_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, g_vec); - - // Update dst: dst += temp * q - dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, q_vec); - GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); - - // Update state - GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], temp_vec); - } - - // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val + prev_state_val * g_val; - dst_data[t_h_j_offset] += temp_val * q_val; - state_cur[h_2d_i_j_offset] = temp_val; - } - } - } - } - -#else - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float q_val = q[t_h_i_offset] * scale; - float g_val = g[t_h_i_offset]; - - for (int64_t j = 0; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = prev_state_val * g_val + kv_val; - dst_data[t_h_j_offset] += temp_val * q_val; - state_cur[h_2d_i_j_offset] = temp_val; - } - } - } - } -#endif -} - -void ggml_compute_forward_gla(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gla_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_solve_tri_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; // A (lower triangular) - const struct ggml_tensor * src1 = dst->src[1]; // B (RHS) - - GGML_TENSOR_BINARY_OP_LOCALS; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_ASSERT(ne00 == ne01); // A must be square - GGML_ASSERT(ne0 == ne10); // solution cols == B cols - GGML_ASSERT(ne1 == ne11); // solution rows == B rows - - GGML_ASSERT(ne02 == ne12 && ne12 == ne2); - GGML_ASSERT(ne03 == ne13 && ne13 == ne3); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t k = ne10; // number of RHS columns - const int64_t n = ne11; // A is n×n - const int64_t nr = - ne02 * ne03 * k; // we're parallelizing on columns here, so seq x token x column will be the unit - - // chunks per thread - const int64_t dr = (nr + nth - 1) / nth; - - // chunk range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - const float * A = (const float *) src0->data; // [n, n, B1, B2] - const float * B = (const float *) src1->data; // [n, k, B1, B2] - float * X = (float *) dst->data; // [n, k, B1, B2] - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * k); - const int64_t i02 = (ir - i03 * ne02 * k) / k; - const int64_t i01 = (ir - i03 * ne02 * k - i02 * k); - - const float * A_batch = A + i02 * nb02 / sizeof(float) + i03 * nb03 / sizeof(float); - const float * B_batch = B + i02 * nb12 / sizeof(float) + i03 * nb13 / sizeof(float); - - float * X_batch = X + i02 * nb2 / sizeof(float) + i03 * nb3 / sizeof(float); - - for (int64_t i00 = 0; i00 < n; ++i00) { - float sum = 0.0f; - for (int64_t t = 0; t < i00; ++t) { - sum += A_batch[i00 * n + t] * X_batch[t * k + i01]; - } - - const float diag = A_batch[i00 * n + i00]; - GGML_ASSERT(diag != 0.0f && "Zero diagonal in triangular matrix"); - X_batch[i00 * k + i01] = (B_batch[i00 * k + i01] - sum) / diag; - } - } -} - -void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { - ggml_compute_forward_solve_tri_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } -} - -// ggml_compute_forward_rwkv_wkv7 - -static void ggml_compute_forward_rwkv_wkv7_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[6]->ne[1]; - const int64_t head_size = C / HEADS; - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * r = (float *) dst->src[0]->data; - float * w = (float *) dst->src[1]->data; - float * k = (float *) dst->src[2]->data; - float * v = (float *) dst->src[3]->data; - float * a = (float *) dst->src[4]->data; - float * b = (float *) dst->src[5]->data; - - int64_t t_stride = HEADS * head_size; // Same to C - - int64_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - int64_t h_stride_2d = head_size * head_size; - -#if defined(GGML_SIMD) -# if defined(__ARM_FEATURE_SVE) || defined(__riscv_v_intrinsic) - // scalar Route to scalar implementation //TODO: Write SVE code and RVV code - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - int64_t t_h_i_offset = t_h_offset + i; - int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float v_val = v[t_h_i_offset]; - - float sa = 0, result = 0; - for (int64_t j = 0; j < head_size; j++) { - sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; - } - - for (int64_t j = 0; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - result += state_cur[h_2d_i_j_offset] * r_val; - } - dst_data[t_h_i_offset] = result; - } - } - } -# else - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t ii = 0; ii < head_size; ii++) { - int64_t t_h_i_offset = t_h_offset + ii; - int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; - - GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); - - float sa = 0; - { - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); - ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); - sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); - } - } - GGML_F32_VEC_REDUCE(sa, sum); - } - - GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); - - int64_t j = 0; - GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - for (; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; - int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; - - GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); - GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); - GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); - GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); - - k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); - - GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); - // kv + s * decay + sa * b - state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); - state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); - GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); - - result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); - } - } - GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); - - // There shouldn't be left-overs though. - for (; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v[t_h_i_offset] * k_val; - - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; - } - } - } - } -# endif -#else - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - int64_t t_h_i_offset = t_h_offset + i; - int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float v_val = v[t_h_i_offset]; - - float sa = 0, result = 0; - for (int64_t j = 0; j < head_size; j++) { - sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; - } - - for (int64_t j = 0; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - result += state_cur[h_2d_i_j_offset] * r_val; - } - dst_data[t_h_i_offset] = result; - } - } - } -#endif -} - -void ggml_compute_forward_rwkv_wkv7(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rwkv_wkv7_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_map_custom1 - -void ggml_compute_forward_map_custom1(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - - struct ggml_map_custom1_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_map_custom2 - -void ggml_compute_forward_map_custom2(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - const ggml_tensor * b = dst->src[1]; - - struct ggml_map_custom2_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, b, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_map_custom3 - -void ggml_compute_forward_map_custom3(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - const ggml_tensor * b = dst->src[1]; - const ggml_tensor * c = dst->src[2]; - - struct ggml_map_custom3_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, b, c, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_custom - -void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - struct ggml_custom_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_cross_entropy_loss - -static void ggml_compute_forward_cross_entropy_loss_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); - GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); - GGML_ASSERT(ggml_are_same_shape(src0, src1)); - GGML_ASSERT(ggml_is_scalar(dst)); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - // TODO: handle transposed/permuted matrices - const int64_t nc = src0->ne[0]; - const int64_t nr = ggml_nrows(src0); - - const int ith = params->ith; - const int nth = params->nth; - - float * sums = (float *) params->wdata; - float * st = ((float *) params->wdata) + nth + ith * nc; - float sum_thread = 0.0f; - - GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - for (int64_t i1 = ir0; i1 < ir1; ++i1) { - const float * s0 = (const float *) ((const char *) src0->data + i1 * src0->nb[1]); - const float * s1 = (const float *) ((const char *) src1->data + i1 * src1->nb[1]); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(s0[i])); - assert(!isnan(s1[i])); - } -#endif - - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - const ggml_float sum_softmax = ggml_vec_log_soft_max_f32(nc, st, s0, max); - assert(sum_softmax >= 0.0); - - ggml_vec_add1_f32(nc, st, st, -sum_softmax); - ggml_vec_mul_f32(nc, st, st, s1); - - float sum_st = 0.0f; - ggml_vec_sum_f32(nc, &sum_st, st); - sum_thread += sum_st; - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - assert(!isnan(st[i])); - assert(!isinf(st[i])); - } -#endif - } - sums[ith] = sum_thread; - ggml_barrier(params->threadpool); - - if (ith == 0) { - float * dp = (float *) dst->data; - ggml_vec_sum_f32(nth, dp, sums); - dp[0] *= -1.0f / (float) nr; - } -} - -void ggml_compute_forward_cross_entropy_loss(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cross_entropy_loss_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cross_entropy_loss_back - -static void ggml_compute_forward_cross_entropy_loss_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; // gradient of forward pass output - const ggml_tensor * src0f = dst->src[1]; // src0 of forward pass - const ggml_tensor * src1f = dst->src[2]; // src1 of forward pass - - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_is_contiguous(src0f)); - GGML_ASSERT(ggml_is_contiguous(src1f)); - GGML_ASSERT(ggml_is_contiguous(grad)); - GGML_ASSERT(ggml_are_same_shape(src0f, src1f) && ggml_are_same_shape(src0f, dst)); - - const int64_t ith = params->ith; - const int64_t nth = params->nth; - - // TODO: handle transposed/permuted matrices - const int64_t nc = src0f->ne[0]; - const int64_t nr = ggml_nrows(src0f); - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - const float d_by_nr = ((const float *) grad->data)[0] / (float) nr; - - for (int64_t i1 = ir0; i1 < ir1; i1++) { - float * ds0 = (float *) ((char *) dst->data + i1 * dst->nb[1]); - const float * s0 = (const float *) ((const char *) src0f->data + i1 * src0f->nb[1]); - const float * s1 = (const float *) ((const char *) src1f->data + i1 * src1f->nb[1]); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(s0[i])); - assert(!isnan(s1[i])); - } -#endif - - // soft_max - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - const ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); - assert(sum > 0.0); - ggml_vec_scale_f32(nc, ds0, 1.0 / sum); - - // grad(src0f) = (softmax(src0f) - src1f) * grad(cross_entropy_loss(src0f, src1f)) / nr - ggml_vec_sub_f32(nc, ds0, ds0, s1); - ggml_vec_scale_f32(nc, ds0, d_by_nr); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - assert(!isnan(ds0[i])); - assert(!isinf(ds0[i])); - } -#endif - } -} - -void ggml_compute_forward_cross_entropy_loss_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cross_entropy_loss_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_opt_step_adamw_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src0_grad = dst->src[1]; - const ggml_tensor * src0_grad_m = dst->src[2]; - const ggml_tensor * src0_grad_v = dst->src[3]; - const ggml_tensor * adamw_params = dst->src[4]; - - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m)); - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v)); - GGML_ASSERT(ggml_nelements(adamw_params) == 7); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const float * adamw_params_ptr = ggml_get_data_f32(adamw_params); - - const float alpha = adamw_params_ptr[0]; - const float beta1 = adamw_params_ptr[1]; - const float beta2 = adamw_params_ptr[2]; - const float eps = adamw_params_ptr[3]; - const float wd = adamw_params_ptr[4]; - const float beta1h = adamw_params_ptr[5]; - const float beta2h = adamw_params_ptr[6]; - const float keep = 1.f - alpha * wd; - for (int ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; - - float * w = (float *) ((char *) src0->data + offset); // weight - const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad - float * m = (float *) ((char *) src0_grad_m->data + offset); - float * v = (float *) ((char *) src0_grad_v->data + offset); - - for (int i00 = 0; i00 < ne00; ++i00) { - m[i00] = m[i00] * beta1 + g[i00] * (1.0f - beta1); - v[i00] = v[i00] * beta2 + g[i00] * g[i00] * (1.0f - beta2); - - const float mh = m[i00] * beta1h; - const float vh = sqrtf(v[i00] * beta2h) + eps; - - // The weight decay is applied independently of the Adam momenta m and v. - // This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss. - // See: https://arxiv.org/pdf/1711.05101v3.pdf - w[i00] = w[i00] * keep - alpha * mh / vh; - } - } -} - -void ggml_compute_forward_opt_step_adamw(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_opt_step_adamw_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_opt_step_sgd_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src0_grad = dst->src[1]; - const ggml_tensor * sgd_params = dst->src[2]; - - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); - GGML_ASSERT(ggml_nelements(sgd_params) == 2); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // using adamw param subset we care about - alpha, wd - could have a separate struct - const float * sgd_params_ptr = ggml_get_data_f32(sgd_params); - const float alpha = sgd_params_ptr[0]; - const float keep = 1.f - alpha * sgd_params_ptr[1]; - - for (int ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; - - float * w = (float *) ((char *) src0->data + offset); // weight - const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad - - for (int i00 = 0; i00 < ne00; ++i00) { - w[i00] = w[i00] * keep - alpha * g[i00]; - } - } -} - -void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_opt_step_sgd_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error - sgd is F32 only"); - } - } -} - -#include "binary-ops.h" -#include "ggml-cpu.h" -#include "ggml-impl.h" -#include "ggml.h" -#include "ops.h" -#include "unary-ops.h" -#include "vec.h" - -#include -#include -#include -#include - -// ggml_compute_forward_dup - -static void ggml_compute_forward_dup_same_cont(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - GGML_ASSERT(src0->type == dst->type); - - const size_t nb0 = ggml_type_size(src0->type); - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by blocks - const int nk = ggml_nelements(src0) / ggml_blck_size(src0->type); - const int dr = (nk + nth - 1) / nth; - const int k0 = dr * ith; - const int k1 = MIN(k0 + dr, nk); - - if (k0 < k1) { - memcpy(((char *) dst->data + k0 * nb0), ((char *) src0->data + k0 * nb0), (k1 - k0) * nb0); - } -} - -template -static void ggml_compute_forward_dup_flt(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(!ggml_is_quantized(src0->type) && !ggml_is_quantized(dst->type)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // case: type & row size equal - if (src0->type == dst->type && ne00 == ne0 && nb00 == ggml_type_size(src0->type) && - nb0 == ggml_type_size(dst->type)) { - // copy by rows - const size_t rs = ne00 * nb00; - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ir0; i01 < ir1; i01++) { - memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); - } - } - } - return; - } - - // case: dst tensor is contiguous - if (ggml_is_contiguous(dst)) { - if (nb00 == sizeof(src_t)) { - if constexpr (std::is_same_v) { - // same type - size_t id = 0; - const size_t rs = ne00 * nb00; - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - // casting between non-quantized types - size_t id = 0; - dst_t * dst_ptr = (dst_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const src_t * src0_ptr = - (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - for (int i00 = 0; i00 < ne00; i00++) { - float tmp = type_conversion_table::to_f32(src0_ptr[i00]); - dst_ptr[id] = type_conversion_table::from_f32(tmp); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } - } else { - //printf("%s: this is not optimal - fix me\n", __func__); - - size_t id = 0; - dst_t * dst_ptr = (dst_t *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += ne00 * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - const src_t * src0_ptr = - (src_t *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float tmp = type_conversion_table::to_f32(*src0_ptr); - dst_ptr[id] = type_conversion_table::from_f32(tmp); - id++; - } - } - id += ne00 * (ne01 - ir1); - } - } - } - return; - } - - // dst counters - int64_t i10 = 0; - int64_t i11 = 0; - int64_t i12 = 0; - int64_t i13 = 0; - - if constexpr (std::is_same_v) { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - memcpy(dst_ptr, src0_ptr, sizeof(dst_t)); - - if (++i10 == ne00) { - i10 = 0; - if (++i11 == ne01) { - i11 = 0; - if (++i12 == ne02) { - i12 = 0; - if (++i13 == ne03) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - - } else { - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - i10 += ne00 * ir0; - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + i10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - float tmp = type_conversion_table::to_f32(*(const src_t *) src0_ptr); - *(dst_t *) dst_ptr = type_conversion_table::from_f32(tmp); - - if (++i10 == ne0) { - i10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - i10 += ne00 * (ne01 - ir1); - while (i10 >= ne0) { - i10 -= ne0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - } -} - -template -static void ggml_compute_forward_dup_to_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(!ggml_is_quantized(src0->type)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - if (ggml_is_contiguous(dst) && nb00 == sizeof(src_t) && ggml_get_type_traits_cpu(dst->type)->from_float) { - // casting non-quantized types --> intermediate f32 --> quantized - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dst->type)->from_float; - float * src0_f32 = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - size_t id = 0; - size_t rs = nb0 * (ne00 / ggml_blck_size(dst->type)); - char * dst_ptr = (char *) dst->data; - - for (int i03 = 0; i03 < ne03; i03++) { - for (int i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int i01 = ir0; i01 < ir1; i01++) { - const src_t * src0_ptr = (src_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - for (int i00 = 0; i00 < ne00; i00++) { - src0_f32[i00] = type_conversion_table::to_f32(src0_ptr[i00]); - } - - quantize_row_q(src0_f32, dst_ptr + id, ne00); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - // printf("%s %s\n", ggml_type_name(src0->type), ggml_type_name(dst->type)); - GGML_ABORT("not implemented"); - } -} - -// A simplified version of ggml_compute_forward_dup that doesn't do float upcasting, and just plain old memcpy. -static void ggml_compute_forward_dup_bytes(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(src0->type == dst->type); - - GGML_TENSOR_UNARY_OP_LOCALS; - - if (ggml_is_contiguous(src0) && ggml_is_contiguous(dst)) { - ggml_compute_forward_dup_same_cont(params, dst); - return; - } - - const size_t type_size = ggml_type_size(src0->type); - - const int ith = params->ith; // thread index - const int nth = params->nth; // number of threads - - // parallelize by rows - const int nr = ne01; - // number of rows per thread - const int dr = (nr + nth - 1) / nth; - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - if (src0->type == dst->type && ggml_are_same_shape(src0, dst) && nb00 == type_size && nb0 == type_size) { - // copy by rows - const size_t rs = ggml_row_size(src0->type, ne00); - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ir0; i01 < ir1; i01++) { - memcpy(((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03), rs); - } - } - } - return; - } - - if (ggml_is_contiguous(dst)) { - size_t id = 0; - char * dst_ptr = (char *) dst->data; - const size_t rs = ne00 * type_size; - - if (nb00 == type_size) { - // src0 is contigous on first dimension, copy by rows - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int64_t i01 = ir0; i01 < ir1; i01++) { - const char * src0_ptr = (char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, rs); - id += rs; - } - id += rs * (ne01 - ir1); - } - } - } else { - //printf("%s: this is not optimal - fix me\n", __func__); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - id += rs * ir0; - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - const char * src0_ptr = - (char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; - memcpy(dst_ptr + id, src0_ptr, type_size); - - id += type_size; - } - } - id += rs * (ne01 - ir1); - } - } - } - - return; - } - - // dst counters - int64_t k10 = 0; - int64_t i11 = 0; - int64_t i12 = 0; - int64_t i13 = 0; - - // number of blocks in a row - const int64_t nk00 = ne00 / ggml_blck_size(src0->type); - const int64_t nk0 = ne0 / ggml_blck_size(dst->type); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - k10 += nk00 * ir0; - while (k10 >= nk0) { - k10 -= nk0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - for (int64_t i01 = ir0; i01 < ir1; i01++) { - for (int64_t k00 = 0; k00 < nk00; k00++) { - const char * src0_ptr = ((char *) src0->data + k00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - char * dst_ptr = ((char *) dst->data + k10 * nb0 + i11 * nb1 + i12 * nb2 + i13 * nb3); - - memcpy(dst_ptr, src0_ptr, type_size); - - if (++k10 == nk0) { - k10 = 0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } - k10 += nk00 * (ne01 - ir1); - while (k10 >= nk0) { - k10 -= nk0; - if (++i11 == ne1) { - i11 = 0; - if (++i12 == ne2) { - i12 = 0; - if (++i13 == ne3) { - i13 = 0; - } - } - } - } - } - } -} - -static void ggml_compute_forward_dup_from_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - size_t qk = ggml_blck_size(type); - const int64_t nr = ggml_nelements(src1) / qk; - - // destination must be contiguous in the first dimension - GGML_ASSERT(nb10 == ggml_type_size(dst->type)); - // must either have first dimension large enough to hold a row, or fully contiguous - GGML_ASSERT((ne10 % qk) == 0 || ggml_is_contiguous(dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - uint32_t i = ir * qk; - - const int64_t i03 = i / (ne00 * ne01 * ne02); - const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01); - const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00; - const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00; - const int64_t x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03; - - const int64_t i13 = i / (ne10 * ne11 * ne12); - const int64_t i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11); - const int64_t i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10; - const int64_t i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10; - const int64_t dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13; - - dequantize_row_q((const void *) ((char *) src0->data + x_offset), (float *) ((char *) dst->data + dst_offset), - qk); - } -} - -void ggml_compute_forward_dup(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (src0->type == dst->type) { - ggml_compute_forward_dup_bytes(params, dst); - return; - } - - switch (src0->type) { - case GGML_TYPE_F16: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_BF16: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_F32: - { - /**/ if (dst->type == GGML_TYPE_F16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_BF16) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else if (dst->type == GGML_TYPE_I32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - ggml_compute_forward_dup_to_q(params, dst); - } - } - break; - case GGML_TYPE_I32: - { - if (dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_flt(params, dst); - } else { - GGML_ABORT("not implemented"); - } - } - break; - default: - { - if (ggml_is_quantized(src0->type) && dst->type == GGML_TYPE_F32) { - ggml_compute_forward_dup_from_q(params, dst); - break; - } - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add - -static void ggml_compute_forward_add_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst)); - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const ggml_type type = src0->type; - const ggml_type dtype = dst->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(dtype)->from_float; - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == ggml_type_size(type)); - GGML_ASSERT(nb10 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * wdata = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i03 = ir / (ne02 * ne01); - const int i02 = (ir - i03 * ne02 * ne01) / ne01; - const int i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - // src1 and dst are same shape as src0 => same indices - const int i13 = i03; - const int i12 = i02; - const int i11 = i01; - - const int i3 = i03; - const int i2 = i02; - const int i1 = i01; - - void * src0_row = (void *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * src1_row = (float *) ((char *) src1->data + (i11 * nb11 + i12 * nb12 + i13 * nb13)); - void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - assert(ne00 % 32 == 0); - - // unquantize row from src0 to temp buffer - dequantize_row_q(src0_row, wdata, ne00); - // add src1 - ggml_vec_acc_f32(ne00, wdata, src1_row); - // quantize row to dst - if (quantize_row_q != NULL) { - quantize_row_q(wdata, dst_row, ne00); - } else { - memcpy(dst_row, wdata, ne0 * nb0); - } - } -} - -void ggml_compute_forward_add(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - { - ggml_compute_forward_add_non_quantized(params, dst); - } - break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_add_q_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add_id - -static void ggml_compute_forward_add_id_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(src2->type == GGML_TYPE_I32); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_TERNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - // src1 indices - const int i11 = *(int32_t *) ((char *) src2->data + i1 * nb20 + i2 * nb21); - - GGML_ASSERT(i11 >= 0 && i11 < ne11); - - ggml_vec_add_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), - (float *) ((char *) src1->data + i11 * nb11)); - } -} - -void ggml_compute_forward_add_id(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add_id_f32(params, dst); - } - break; - default: - { - GGML_ABORT("unsupported type for ggml_compute_forward_add_id: %s", ggml_type_name(src0->type)); - } - } -} - -// ggml_compute_forward_add1 - -static void ggml_compute_forward_add1_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - -#ifdef GGML_USE_ACCELERATE - GGML_UNUSED(ggml_vec_add1_f32); - - vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), 1, - (float *) ((char *) src1->data), 0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - 1, ne0); -#else - ggml_vec_add1_f32(ne0, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01), *(float *) src1->data); -#endif - } -} - -static void ggml_compute_forward_add1_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_f16_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = GGML_CPU_FP16_TO_FP32(*(ggml_fp16_t *) src1->data); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F16); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(GGML_CPU_FP16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - const ggml_from_float_t quantize_row_q = ggml_get_type_traits_cpu(type)->from_float; - - // we don't support permuted src0 - GGML_ASSERT(nb00 == ggml_type_size(type)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ggml_is_quantized(src0->type)); - GGML_ASSERT(dst->type == src0->type); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - void * src0_row = (void *) ((char *) src0->data + (i1 * nb01 + i2 * nb02 + i3 * nb03)); - void * dst_row = (void *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb0)); - - assert(ne0 % 32 == 0); - - // unquantize row from src0 to temp buffer - dequantize_row_q(src0_row, wdata, ne0); - // add src1 - ggml_vec_acc1_f32(ne0, wdata, v); - // quantize row to dst - quantize_row_q(wdata, dst_row, ne0); - } -} - -static void ggml_compute_forward_add1_bf16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = *(float *) src1->data; - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_BF16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_BF16); - - GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -static void ggml_compute_forward_add1_bf16_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_scalar(src1)); - - // scalar to add - const float v = GGML_BF16_TO_FP32(*(ggml_bf16_t *) src1->data); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(src0->type == GGML_TYPE_BF16); - GGML_ASSERT(src1->type == GGML_TYPE_BF16); - GGML_ASSERT(dst->type == GGML_TYPE_BF16); - - GGML_ASSERT(nb0 == sizeof(ggml_bf16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_bf16_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are same shape => same indices - const int i3 = ir / (ne2 * ne1); - const int i2 = (ir - i3 * ne2 * ne1) / ne1; - const int i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - ggml_bf16_t * dst_ptr = (ggml_bf16_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - ggml_bf16_t * src0_ptr = (ggml_bf16_t *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - for (int i = 0; i < ne0; i++) { - dst_ptr[i] = GGML_FP32_TO_BF16(GGML_BF16_TO_FP32(src0_ptr[i]) + v); - } - } -} - -void ggml_compute_forward_add1(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add1_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - if (src1->type == GGML_TYPE_F16) { - ggml_compute_forward_add1_f16_f16(params, dst); - } else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add1_f16_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } - } - break; - case GGML_TYPE_BF16: - { - if (src1->type == GGML_TYPE_BF16) { - ggml_compute_forward_add1_bf16_bf16(params, dst); - } else if (src1->type == GGML_TYPE_F32) { - ggml_compute_forward_add1_bf16_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } - } - break; - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_add1_q_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_acc - -static void ggml_compute_forward_acc_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during acc - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during acc - const size_t nb0 = ggml_element_size(src0); - - const size_t nb00 = nb0; - const size_t nb01 = nb1; - const size_t nb02 = nb2; - const size_t nb03 = nb3; - - GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb0 + (ne11 == 0 ? 0 : ne11 - 1) * nb1 + - (ne12 == 0 ? 0 : ne12 - 1) * nb2 + (ne13 == 0 ? 0 : ne13 - 1) * nb3 < - ggml_nbytes(dst)); - GGML_ASSERT(offset + (ne10 == 0 ? 0 : ne10 - 1) * nb00 + (ne11 == 0 ? 0 : ne11 - 1) * nb01 + - (ne12 == 0 ? 0 : ne12 - 1) * nb02 + (ne13 == 0 ? 0 : ne13 - 1) * nb03 < - ggml_nbytes(src0)); - - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - -#ifdef GGML_USE_ACCELERATE - vDSP_vadd((float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), 1, - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11), 1, - (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), 1, nc); -#else - ggml_vec_add_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + offset), - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); -#endif - } -} - -void ggml_compute_forward_acc(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_acc_f32(params, dst); - } - break; - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_sum - -static void ggml_compute_forward_sum_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - assert(src0->nb[0] == sizeof(float)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - ggml_float sum = 0; - ggml_float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32_ggf(ne00, &row_sum, - (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((float *) dst->data)[0] = sum; -} - -static void ggml_compute_forward_sum_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - - assert(src0->nb[0] == sizeof(ggml_fp16_t)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - float sum = 0; - float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f16_ggf(ne00, &row_sum, - (ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((ggml_fp16_t *) dst->data)[0] = GGML_CPU_FP32_TO_FP16(sum); -} - -static void ggml_compute_forward_sum_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_scalar(dst)); - - assert(src0->nb[0] == sizeof(ggml_bf16_t)); - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(size_t, nb0, src0, nb) - - float sum = 0; - float row_sum = 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_bf16_ggf(ne00, &row_sum, - (ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - sum += row_sum; - } - } - } - ((ggml_bf16_t *) dst->data)[0] = GGML_FP32_TO_BF16(sum); -} - -void ggml_compute_forward_sum(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sum_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_sum_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - { - ggml_compute_forward_sum_bf16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cumsum - -static void ggml_compute_forward_cumsum_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne01); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - const auto [ir0, ir1] = get_thread_range(params, src0); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - float * src_row = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * dst_row = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - ggml_vec_cumsum_f32(ne00, dst_row, src_row); - } -} - -void ggml_compute_forward_cumsum(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cumsum_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_sum_rows - -static void ggml_compute_forward_sum_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne0 == 1); - GGML_ASSERT(ne1 == ne01); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - for (int64_t i3 = 0; i3 < ne03; i3++) { - for (int64_t i2 = 0; i2 < ne02; i2++) { - for (int64_t i1 = 0; i1 < ne01; i1++) { - float * src_row = (float *) ((char *) src0->data + i1 * nb01 + i2 * nb02 + i3 * nb03); - float * dst_row = (float *) ((char *) dst->data + i1 * nb1 + i2 * nb2 + i3 * nb3); - float row_sum = 0; - ggml_vec_sum_f32(ne00, &row_sum, src_row); - dst_row[0] = row_sum; - } - } - } -} - -void ggml_compute_forward_sum_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_sum_rows_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_mean - -static void ggml_compute_forward_mean_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(src0->nb[0] == sizeof(float)); - - GGML_TENSOR_UNARY_OP_LOCALS - - assert(ne0 == 1); - assert(ne1 == ne01); - assert(ne2 == ne02); - assert(ne3 == ne03); - - GGML_UNUSED(ne0); - GGML_UNUSED(ne1); - GGML_UNUSED(ne2); - GGML_UNUSED(ne3); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - ggml_vec_sum_f32(ne00, (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3), - (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03)); - - *(float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3) /= (float) ne00; - } - } - } -} - -void ggml_compute_forward_mean(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_mean_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_argmax - -static void ggml_compute_forward_argmax_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(src0->nb[0] == sizeof(float)); - assert(dst->nb[0] == sizeof(float)); - - const int64_t ne00 = src0->ne[0]; - const int64_t ne01 = src0->ne[1]; - - const size_t nb01 = src0->nb[1]; - const size_t nb0 = dst->nb[0]; - - for (int64_t i1 = 0; i1 < ne01; i1++) { - float * src = (float *) ((char *) src0->data + i1 * nb01); - int32_t * dst_ = (int32_t *) ((char *) dst->data + i1 * nb0); - int v = 0; - ggml_vec_argmax_f32(ne00, &v, src); - dst_[0] = v; - } -} - -void ggml_compute_forward_argmax(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_argmax_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_count_equal - -static void ggml_compute_forward_count_equal_i32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS; - - GGML_ASSERT(src0->type == GGML_TYPE_I32); - GGML_ASSERT(src1->type == GGML_TYPE_I32); - GGML_ASSERT(ggml_are_same_shape(src0, src1)); - GGML_ASSERT(ggml_is_scalar(dst)); - GGML_ASSERT(dst->type == GGML_TYPE_I64); - - const int64_t nr = ggml_nrows(src0); - - const int ith = params->ith; - const int nth = params->nth; - - int64_t * sums = (int64_t *) params->wdata; - int64_t sum_thread = 0; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne03) / ne01; - const int64_t i01 = ir - i03 * ne03 - i02 * ne02; - - const char * data0 = (const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01; - const char * data1 = (const char *) src1->data + i03 * nb13 + i02 * nb12 + i01 * nb11; - - for (int64_t i00 = 0; i00 < ne00; ++i00) { - const int32_t val0 = *((const int32_t *) (data0 + i00 * nb00)); - const int32_t val1 = *((const int32_t *) (data1 + i00 * nb10)); - - sum_thread += val0 == val1; - } - } - if (ith != 0) { - sums[ith] = sum_thread; - } - ggml_barrier(params->threadpool); - - if (ith != 0) { - return; - } - - for (int ith_other = 1; ith_other < nth; ++ith_other) { - sum_thread += sums[ith_other]; - } - *((int64_t *) dst->data) = sum_thread; -} - -void ggml_compute_forward_count_equal(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_I32: - { - ggml_compute_forward_count_equal_i32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_repeat - -static void ggml_compute_forward_repeat_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(src0, dst)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne0 / ne00); - const int nr1 = (int) (ne1 / ne01); - const int nr2 = (int) (ne2 / ne02); - const int nr3 = (int) (ne3 / ne03); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne03; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne02; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne01; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_vec_cpy_f32( - ne00, - (float *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + (i2 * ne02 + k2) * nb2 + - (i1 * ne01 + k1) * nb1 + (i0 * ne00) * nb0), - (float *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01)); - } - } - } - } - } - } - } -} - -static void ggml_compute_forward_repeat_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(src0, dst)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne0 / ne00); - const int nr1 = (int) (ne1 / ne01); - const int nr2 = (int) (ne2 / ne02); - const int nr3 = (int) (ne3 / ne03); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne03; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne02; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne01; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + (i3 * ne03 + k3) * nb3 + - (i2 * ne02 + k2) * nb2 + (i1 * ne01 + k1) * nb1 + - (i0 * ne00) * nb0); - ggml_fp16_t * x = - (ggml_fp16_t *) ((char *) src0->data + (k3) *nb03 + (k2) *nb02 + (k1) *nb01); - // ggml_vec_cpy_f16(ne00, y, x) - for (int i = 0; i < ne00; ++i) { - y[i] = x[i]; - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_repeat(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_I16: - { - ggml_compute_forward_repeat_f16(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_repeat_f32(params, dst); - } - break; - // TODO: templateify the implemenation and support for I64 - // ref https://github.com/ggml-org/llama.cpp/pull/14274#discussion_r2169492225 - //case GGML_TYPE_I64: - // { - // ggml_compute_forward_repeat_i64(params, dst); - // } break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_repeat_back - -static void ggml_compute_forward_repeat_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_can_repeat(dst, src0)); - - GGML_TENSOR_UNARY_OP_LOCALS - - // guaranteed to be an integer due to the check in ggml_can_repeat - const int nr0 = (int) (ne00 / ne0); - const int nr1 = (int) (ne01 / ne1); - const int nr2 = (int) (ne02 / ne2); - const int nr3 = (int) (ne03 / ne3); - - // TODO: support for transposed / permuted tensors - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - if (ggml_is_contiguous(dst)) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } else { - for (int k3 = 0; k3 < ne3; k3++) { - for (int k2 = 0; k2 < ne2; k2++) { - for (int k1 = 0; k1 < ne1; k1++) { - ggml_vec_set_f32(ne0, (float *) ((char *) dst->data + k1 * nb1 + k2 * nb2 + k3 * nb3), 0); - } - } - } - } - - // TODO: maybe this is not optimal? - for (int i3 = 0; i3 < nr3; i3++) { - for (int k3 = 0; k3 < ne3; k3++) { - for (int i2 = 0; i2 < nr2; i2++) { - for (int k2 = 0; k2 < ne2; k2++) { - for (int i1 = 0; i1 < nr1; i1++) { - for (int k1 = 0; k1 < ne1; k1++) { - for (int i0 = 0; i0 < nr0; i0++) { - ggml_vec_acc_f32( - ne0, (float *) ((char *) dst->data + (k3) *nb3 + (k2) *nb2 + (k1) *nb1), - (float *) ((char *) src0->data + (i3 * ne3 + k3) * nb03 + (i2 * ne2 + k2) * nb02 + - (i1 * ne1 + k1) * nb01 + (i0 * ne0) * nb00)); - } - } - } - } - } - } - } -} - -void ggml_compute_forward_repeat_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_repeat_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_concat - -static void ggml_compute_forward_concat_any(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - const size_t len = ggml_type_size(src0->type); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const char * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + (i3) *nb03; - } else { - x = (const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + (i2 - o[2]) * nb12 + - (i3 - o[3]) * nb13; - } - - char * y = (char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3; - - memcpy(y, x, len); - } - } - } - } -} - -static void ggml_compute_forward_concat_i8(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(int8_t)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const int8_t * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const int8_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const int8_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - int8_t * y = (int8_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -static void ggml_compute_forward_concat_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(ggml_fp16_t)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const ggml_fp16_t * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const ggml_fp16_t *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const ggml_fp16_t *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - ggml_fp16_t * y = (ggml_fp16_t *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -static void ggml_compute_forward_concat_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_type_size(src0->type) == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int32_t dim = ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(dim >= 0 && dim < 4); - - int64_t o[4] = { 0, 0, 0, 0 }; - o[dim] = src0->ne[dim]; - - const float * x; - - // TODO: smarter multi-theading - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = ith; i2 < ne2; i2 += nth) { - for (int i1 = 0; i1 < ne1; i1++) { - for (int i0 = 0; i0 < ne0; i0++) { - if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) { - x = (const float *) ((const char *) src0->data + (i0) *nb00 + (i1) *nb01 + (i2) *nb02 + - (i3) *nb03); - } else { - x = (const float *) ((const char *) src1->data + (i0 - o[0]) * nb10 + (i1 - o[1]) * nb11 + - (i2 - o[2]) * nb12 + (i3 - o[3]) * nb13); - } - - float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } -} - -void ggml_compute_forward_concat(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_I16: - { - ggml_compute_forward_concat_f16(params, dst); - } - break; - case GGML_TYPE_I8: - { - ggml_compute_forward_concat_i8(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_concat_f32(params, dst); - } - break; - default: - { - ggml_compute_forward_concat_any(params, dst); - } - } -} - -// ggml_compute_forward_gelu - -static void ggml_compute_forward_gelu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_fill - -static void ggml_compute_forward_fill_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const float c = ggml_get_op_params_f32(dst, 0); - - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne); - GGML_TENSOR_LOCALS(size_t, nb, dst, nb); - - const auto [ir0, ir1] = get_thread_range(params, dst); - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne2 * ne1); - const int64_t i02 = (ir - i03 * ne2 * ne1) / ne1; - const int64_t i01 = (ir - i03 * ne2 * ne1 - i02 * ne1); - - float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); - - ggml_vec_set_f32(ne0, dst_ptr, c); - } -} - -void ggml_compute_forward_fill(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_fill_f32(params, dst); -} - -// ggml_compute_tri - -static void ggml_compute_forward_tri_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0); - - GGML_ASSERT(ggml_is_contiguous(src0)); - - GGML_TENSOR_UNARY_OP_LOCALS - - const auto [ir0, ir1] = get_thread_range(params, src0); - - bool (*bipred)(int, int); - - switch (ttype) { - case GGML_TRI_TYPE_LOWER: - bipred = [](int i, int r) { - return i < r; - }; - break; - case GGML_TRI_TYPE_LOWER_DIAG: - bipred = [](int i, int r) { - return i <= r; - }; - break; - case GGML_TRI_TYPE_UPPER: - bipred = [](int i, int r) { - return i > r; - }; - break; - case GGML_TRI_TYPE_UPPER_DIAG: - bipred = [](int i, int r) { - return i >= r; - }; - break; - default: - GGML_ABORT("invalid tri type"); - } - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const float * src_ptr = (const float *) ((const char *) src0->data + i03 * nb03 + i02 * nb02 + i01 * nb01); - float * dst_ptr = (float *) ((char *) dst->data + i03 * nb3 + i02 * nb2 + i01 * nb1); - - for (int i0 = 0; i0 < ne0; ++i0) { - dst_ptr[i0] = bipred(i0, i01) ? src_ptr[i0] : 0.0f; - } - } -} - -void ggml_compute_forward_tri(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_tri_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gelu_erf - -static void ggml_compute_forward_gelu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_erf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_erf_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_erf_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gelu_quick - -static void ggml_compute_forward_gelu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_gelu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_gelu_quick(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gelu_quick_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_gelu_quick_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_silu - -static void ggml_compute_forward_silu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_silu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i1 * (src0->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_silu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_silu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_silu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_leaky_relu - -static void ggml_compute_forward_leaky_relu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - float negative_slope; - memcpy(&negative_slope, dst->op_params, sizeof(float)); - - assert(dst->nb[0] == sizeof(float)); - assert(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < n; i++) { - ggml_vec_leaky_relu_f32(nc, (float *) ((char *) dst->data + i * (dst->nb[1])), - (float *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); - } -} - -static void ggml_compute_forward_leaky_relu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - assert(ggml_is_contiguous_1(src0)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src0, dst)); - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - float negative_slope; - memcpy(&negative_slope, dst->op_params, sizeof(float)); - - assert(dst->nb[0] == sizeof(ggml_fp16_t)); - assert(src0->nb[0] == sizeof(ggml_fp16_t)); - - for (int i = 0; i < n; i++) { - ggml_vec_leaky_relu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src0->data + i * (src0->nb[1])), negative_slope); - } -} - -void ggml_compute_forward_leaky_relu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_leaky_relu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_leaky_relu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_silu_back - -static void ggml_compute_forward_silu_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - assert(ggml_is_contiguous_1(grad)); - assert(ggml_is_contiguous_1(src1)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src1, dst)); - assert(ggml_are_same_shape(src1, grad)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; - const int nr = ggml_nrows(src1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_backward_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), - (float *) ((char *) src1->data + i1 * (src1->nb[1])), - (float *) ((char *) grad->data + i1 * (grad->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_silu_back_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - assert(ggml_is_contiguous_1(grad)); - assert(ggml_is_contiguous_1(src1)); - assert(ggml_is_contiguous_1(dst)); - assert(ggml_are_same_shape(src1, dst)); - assert(ggml_are_same_shape(src1, grad)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; - const int nr = ggml_nrows(src1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_silu_backward_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), - (ggml_fp16_t *) ((char *) src1->data + i1 * (src1->nb[1])), - (ggml_fp16_t *) ((char *) grad->data + i1 * (grad->nb[1]))); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_CPU_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -void ggml_compute_forward_silu_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_silu_back_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_silu_back_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_reglu - -static void ggml_compute_forward_reglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_reglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_reglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_reglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_reglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_reglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_reglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu - -static void ggml_compute_forward_geglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_swiglu - -static void ggml_compute_forward_swiglu_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_swiglu_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_swiglu_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_swiglu_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_swiglu_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_swiglu_oai - -static void ggml_compute_forward_swiglu_oai_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - const float alpha = ggml_get_op_params_f32(dst, 2); - const float limit = ggml_get_op_params_f32(dst, 3); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - float * dst_p = (float *) ((char *) dst->data + i1 * (dst->nb[1])); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - for (int k = 0; k < nc; k++) { - const float x = std::min(src0_p[k], limit); - const float y = std::clamp(src1_p[k], -limit, limit); - const float out_glu = x / (1.f + expf(alpha * (-x))); - dst_p[k] = out_glu * (y + 1.f); - } - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = dst_p[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_swiglu_oai(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_swiglu_oai_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu_erf - -static void ggml_compute_forward_geglu_erf_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_erf_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_erf_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_erf_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_erf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_erf_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_erf_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_geglu_quick - -static void ggml_compute_forward_geglu_quick_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * src0_p = (float *) (src0_d + i1 * src0_o); - float * src1_p = (float *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_quick_f32(nc, (float *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const float x = ((float *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - GGML_UNUSED(x); - assert(!isnan(x)); - assert(!isinf(x)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_quick_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - char * src0_d = (char *) src0->data; - char * src1_d = (char *) (src1 ? src1->data : src0->data); - const size_t src0_o = src0->nb[1]; - const size_t src1_o = src1 ? src1->nb[1] : src0->nb[1]; - - GGML_ASSERT(ggml_is_contiguous_1(src0)); - GGML_ASSERT(ggml_is_contiguous_1(dst)); - - if (src1) { - GGML_ASSERT(ggml_is_contiguous_1(src1)); - GGML_ASSERT(src0->type == src1->type); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1 ? src0->ne[0] : src0->ne[0] / 2; - const int nr = ggml_nrows(src0); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(ggml_nrows(dst) == nr); - - const int32_t swapped = ggml_get_op_params_i32(dst, 1); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_fp16_t * src0_p = (ggml_fp16_t *) (src0_d + i1 * src0_o); - ggml_fp16_t * src1_p = (ggml_fp16_t *) (src1_d + i1 * src1_o); - - if (!src1) { - src0_p += swapped ? nc : 0; - src1_p += swapped ? 0 : nc; - } - - ggml_vec_geglu_quick_f16(nc, (ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])), src0_p, src1_p); - -#ifndef NDEBUG - for (int k = 0; k < nc; k++) { - const ggml_fp16_t x = ((ggml_fp16_t *) ((char *) dst->data + i1 * (dst->nb[1])))[k]; - const float v = GGML_FP16_TO_FP32(x); - GGML_UNUSED(v); - assert(!isnan(v)); - assert(!isinf(v)); - } -#endif - } -} - -static void ggml_compute_forward_geglu_quick(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_geglu_quick_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_geglu_quick_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_norm - -static void ggml_compute_forward_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float sum = 0.0; - ggml_vec_sum_f32(ne00, &sum, x); - float mean = sum / ne00; - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - float variance = 0; - -#ifdef GGML_USE_ACCELERATE - mean = -mean; - vDSP_vsadd(x, 1, &mean, y, 1, ne00); - vDSP_measqv(y, 1, &variance, ne00); -#else - variance = ggml_vec_cvar_f32(ne00, y, x, mean); -#endif //GGML_USE_ACCELERATE - - const float scale = 1.0f / sqrtf(variance + eps); - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_group_rms_norm - -static void ggml_compute_forward_rms_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sum = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float) (x[i00] * x[i00]); - } - - const float mean = sum / ne00; - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - memcpy(y, x, ne00 * sizeof(float)); - // for (int i00 = 0; i00 < ne00; i00++) { - // y[i00] = x[i00]; - // } - - const float scale = 1.0f / sqrtf(mean + eps); - - // if you hit this, likely you got an inf somewhere earlier - assert(scale > 0.0f); - - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_rms_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rms_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_rms_norm_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // gradients from forward pass output - const ggml_tensor * src1 = dst->src[1]; // src1 from forward pass - - GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_BINARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - // src1 is same shape as src0 => same indices - const int64_t i11 = i01; - const int64_t i12 = i02; - const int64_t i13 = i03; - - const float * dz = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - const float * x = (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13); - - ggml_float sum_xx = 0.0; - ggml_float sum_xdz = 0.0; - - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum_xx += (ggml_float) (x[i00] * x[i00]); - sum_xdz += (ggml_float) (x[i00] * dz[i00]); - } - - //const float mean = (float)(sum_xx)/ne00; - const float mean_eps = (float) (sum_xx) / ne00 + eps; - const float sum_eps = (float) (sum_xx) + eps * ne00; - //const float mean_xdz = (float)(sum_xdz)/ne00; - // we could cache rms from forward pass to improve performance. - // to do this implement ggml_rms and compose ggml_rms_norm using ggml_rms. - //const float rms = sqrtf(mean_eps); - const float rrms = 1.0f / sqrtf(mean_eps); - //const float scale = -rrms/(ne00 * mean_eps); // -1/(n*rms**3) - - { - // z = rms_norm(x) - // - // rms_norm(src1) = - // scale( - // src1, - // div( - // 1, - // sqrt( - // add( - // scale( - // sum( - // sqr( - // src1)), - // (1.0/N)), - // eps)))); - - // postorder: - // ## op args grad - // 00 param src1 grad[#00] - // 01 const 1 - // 02 sqr (#00) grad[#02] - // 03 sum (#02) grad[#03] - // 04 const 1/N - // 05 scale (#03, #04) grad[#05] - // 06 const eps - // 07 add (#05, #06) grad[#07] - // 08 sqrt (#07) grad[#08] - // 09 div (#01,#08) grad[#09] - // 10 scale (#00,#09) grad[#10] - // - // backward pass, given grad[#10] - // #10: scale - // grad[#00] += scale(grad[#10],#09) - // grad[#09] += sum(mul(grad[#10],#00)) - // #09: div - // grad[#08] += neg(mul(grad[#09], div(#09,#08))) - // #08: sqrt - // grad[#07] += mul(grad[#08], div(0.5, #08)) - // #07: add - // grad[#05] += grad[#07] - // #05: scale - // grad[#03] += scale(grad[#05],#04) - // #03: sum - // grad[#02] += repeat(grad[#03], #02) - // #02: - // grad[#00] += scale(mul(#00, grad[#02]), 2.0) - // - // substitute and simplify: - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) - // grad[#02] = repeat(grad[#03], #02) - // grad[#02] = repeat(scale(grad[#05],#04), #02) - // grad[#02] = repeat(scale(grad[#07],#04), #02) - // grad[#02] = repeat(scale(mul(grad[#08], div(0.5, #08)),#04), #02) - // grad[#02] = repeat(scale(mul(neg(mul(grad[#09], div(#09,#08))), div(0.5, #08)),#04), #02) - // grad[#02] = repeat(scale(mul(neg(mul(sum(mul(grad[#10],#00)), div(#09,#08))), div(0.5, #08)),#04), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(#09,#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(div(#01,#08),#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#08*#08) * div(0.5, #08) * (1/N)), #02) - // grad[#02] = repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02) - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, grad[#02]), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(mul(#00, repeat(-(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N)), #02)), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(0.5, #08) * (1/N))), 2.0) - // grad[#00] = scale(grad(#10), #09) + scale(#00, -(sum(mul(grad[#10],#00)) * div(1,#07) * div(1,#08) * (1/N))) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,#07*#08) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(1,mean_eps*rms) * (-1/N)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*mean_eps)) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*(sum_xx/N+eps))) - // grad[#00] = scale(grad(#10), #09) + scale(#00, sum(mul(grad[#10],#00)) * div(-1,rms*N*sum_xx+rms*N*eps)) - // grad[#00] = scale(dz, rrms) + scale(x, sum(mul(dz,x)) * div(-1,rms*N*mean_eps)) - // grad[#00] = scale(dz, rrms) + scale(x, sum_xdz * div(-1,rms*N*mean_eps)) - // a = b*c + d*e - // a = b*c*f/f + d*e*f/f - // a = (b*c*f + d*e*f)*(1/f) - // a = (b*c*(1/c) + d*e*(1/c))*(1/(1/c)) - // a = (b + d*e/c)*c - // b = dz, c = rrms, d = x, e = sum_xdz * div(-1,rms*N*mean_eps) - // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)/rrms)*rrms - // a = (dz + x*sum_xdz * div(-1,rms*N*mean_eps)*rms)*rrms - // a = (dz + x*sum_xdz * div(-rms,rms*N*mean_eps))*rrms - // a = (dz + x*sum_xdz * div(-1,N*mean_eps))*rrms - // a = (dz + x*div(-sum_xdz,N*mean_eps))*rrms - // a = (dz + x*div(-mean_xdz,mean_eps))*rrms - // grad[#00] = scale(dz + scale(x, div(-mean_xdz,mean_eps)),rrms) - // grad[#00] = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - } - // dx = scale(dz + scale(x, -mean_xdz/mean_eps),rrms) - // post-order: - // dx := x - // dx := scale(dx,-mean_xdz/mean_eps) - // dx := add(dx, dz) - // dx := scale(dx, rrms) - float * dx = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - // dx[i00] = (x*(-sum_xdz/sum_eps) + dz) / sqrtf(mean_eps) - ggml_vec_cpy_f32(ne00, dx, x); - // ggml_vec_scale_f32(ne00, dx, -mean_xdz/mean_eps); - ggml_vec_scale_f32(ne00, dx, (float) (-sum_xdz) / sum_eps); - ggml_vec_acc_f32(ne00, dx, dz); - ggml_vec_scale_f32(ne00, dx, rrms); - } - } - } -} - -void ggml_compute_forward_rms_norm_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rms_norm_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_group_norm - -static void ggml_compute_forward_group_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - // TODO: optimize - - float eps; - memcpy(&eps, dst->op_params + 1, sizeof(float)); - - int n_channels = src0->ne[2]; - int n_groups = dst->op_params[0]; - int n_channels_per_group = (n_channels + n_groups - 1) / n_groups; - for (int i = ith; i < n_groups; i += nth) { - int start = i * n_channels_per_group; - int end = start + n_channels_per_group; - if (end > n_channels) { - end = n_channels; - } - int step = end - start; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - ggml_float sum = 0.0; - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sumr = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sumr += (ggml_float) x[i00]; - } - sum += sumr; - } - } - const float mean = sum / (ne00 * ne01 * step); - - ggml_float sum2 = 0.0; - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - ggml_float sumr = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - float v = x[i00] - mean; - y[i00] = v; - sumr += (ggml_float) (v * v); - } - sum2 += sumr; - } - } - const float variance = sum2 / (ne00 * ne01 * step); - const float scale = 1.0f / sqrtf(variance + eps); - - for (int64_t i02 = start; i02 < end; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - ggml_vec_scale_f32(ne00, y, scale); - } - } - } - } -} - -void ggml_compute_forward_group_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_group_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_l2_norm - -static void ggml_compute_forward_l2_norm_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float eps; - memcpy(&eps, dst->op_params, sizeof(float)); - - GGML_ASSERT(eps >= 0.0f); - - // TODO: optimize - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const float * x = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - - ggml_float sum = 0.0; - for (int64_t i00 = 0; i00 < ne00; i00++) { - sum += (ggml_float) (x[i00] * x[i00]); - } - - float * y = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - memcpy(y, x, ne00 * sizeof(float)); - - const float scale = 1.0f / fmaxf(sqrtf(sum), eps); - - ggml_vec_scale_f32(ne00, y, scale); - } - } - } -} - -void ggml_compute_forward_l2_norm(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_l2_norm_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_out_prod - -static void ggml_compute_forward_out_prod_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - GGML_ASSERT(ne2 % ne02 == 0); - GGML_ASSERT(ne3 % ne03 == 0); - - // we don't support permuted src0 or src1 - GGML_ASSERT(nb00 == sizeof(float)); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - - if (ith == 0) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } - ggml_barrier(params->threadpool); - - // dst[:,:,:,:] = 0 - // for i2,i3: - // for i1: - // for i01: - // for i0: - // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] - - // parallelize by last three dimensions - - // total rows in dst - const int64_t nr = ne1 * ne2 * ne3; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - // block-tiling attempt - const int64_t blck_0 = MAX(GGML_VEC_MAD_UNROLL, 32); - const int64_t blck_1 = 16; - - // dps == dst per src0, used for group query attention - const int64_t dps2 = ne2 / ne02; - const int64_t dps3 = ne3 / ne03; - - for (int64_t bir = ir0; bir < ir1; bir += blck_1) { - const int64_t bir1 = MIN(bir + blck_1, ir1); - for (int64_t bi01 = 0; bi01 < ne01; bi01 += blck_0) { - const int64_t bne01 = MIN(bi01 + blck_0, ne01); - for (int64_t ir = bir; ir < bir1; ++ir) { - // dst indices - const int64_t i3 = ir / (ne2 * ne1); - const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; - const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - const int64_t i02 = i2 / dps2; - const int64_t i03 = i3 / dps3; - - //const int64_t i10 = i1; - const int64_t i12 = i2; - const int64_t i13 = i3; - -#if GGML_VEC_MAD_UNROLL > 2 - const int64_t bne01_unroll = bne01 - (bne01 % GGML_VEC_MAD_UNROLL); - for (int64_t i01 = bi01; i01 < bne01_unroll; i01 += GGML_VEC_MAD_UNROLL) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32_unroll(ne0, nb01, nb11, d, s0, s1); - } - for (int64_t i01 = bne01_unroll; i01 < bne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32(ne0, d, s0, *s1); - } -#else - for (int64_t i01 = bi01; i01 < bne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - ggml_vec_mad_f32(ne0, d, s0, *s1); - } -#endif - } - } - } -} - -static void ggml_compute_forward_out_prod_q_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int ith = params->ith; - const int nth = params->nth; - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - GGML_ASSERT(ne02 == ne12); - GGML_ASSERT(ne03 == ne13); - GGML_ASSERT(ne2 == ne12); - GGML_ASSERT(ne3 == ne13); - - // we don't support permuted src0 dim0 - GGML_ASSERT(nb00 == ggml_type_size(type)); - - // dst dim0 cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - // GGML_ASSERT(nb0 <= nb1); - // GGML_ASSERT(nb1 <= nb2); - // GGML_ASSERT(nb2 <= nb3); - - GGML_ASSERT(ne0 == ne00); - GGML_ASSERT(ne1 == ne10); - GGML_ASSERT(ne2 == ne02); - GGML_ASSERT(ne3 == ne03); - - // nb01 >= nb00 - src0 is not transposed - // compute by src0 rows - - if (ith == 0) { - ggml_vec_set_f32(ne0 * ne1 * ne2 * ne3, (float *) dst->data, 0); - } - ggml_barrier(params->threadpool); - - // parallelize by last three dimensions - - // total rows in dst - const int64_t nr = ne1 * ne2 * ne3; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - // dst[:,:,:,:] = 0 - // for i2,i3: - // for i1: - // for i01: - // for i0: - // dst[i0,i1,i2,i3] += src0[i0,i01,i2,i3] * src1[i1,i01,i2,i3] - - float * wdata = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - - for (int64_t ir = ir0; ir < ir1; ++ir) { - // dst indices - const int64_t i3 = ir / (ne2 * ne1); - const int64_t i2 = (ir - i3 * ne2 * ne1) / ne1; - const int64_t i1 = (ir - i3 * ne2 * ne1 - i2 * ne1); - - const int64_t i02 = i2; - const int64_t i03 = i3; - - //const int64_t i10 = i1; - const int64_t i12 = i2; - const int64_t i13 = i3; - - for (int64_t i01 = 0; i01 < ne01; ++i01) { - const int64_t i11 = i01; - - float * s0 = (float *) ((char *) src0->data + (i01 * nb01 + i02 * nb02 + i03 * nb03)); - float * s1 = (float *) ((char *) src1->data + (i1 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13)); - float * d = (float *) ((char *) dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); - - dequantize_row_q(s0, wdata, ne0); - ggml_vec_mad_f32(ne0, d, wdata, *s1); - } - } -} - -void ggml_compute_forward_out_prod(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_out_prod_q_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - GGML_ABORT("fatal error"); // todo - // ggml_compute_forward_out_prod_f16_f32(params, dst); - } - case GGML_TYPE_F32: - { - ggml_compute_forward_out_prod_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_scale - -static void ggml_compute_forward_scale_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - - float s; // scale factor - float b; // bias - - memcpy(&s, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&b, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const size_t nb01 = src0->nb[1]; - - const size_t nb1 = dst->nb[1]; - - if (b == 0.0f) { - for (int i1 = ir0; i1 < ir1; i1++) { - if (dst->data != src0->data) { - // src0 is same shape as dst => same indices - // TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy - memcpy((char *) dst->data + i1 * nb1, (char *) src0->data + i1 * nb01, nc * sizeof(float)); - } - ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1 * nb1), s); - } - } else { - for (int i1 = ir0; i1 < ir1; i1++) { - ggml_vec_mad1_f32(nc, (float *) ((char *) dst->data + i1 * nb1), (float *) ((char *) src0->data + i1 * nb1), - s, b); - } - } -} - -void ggml_compute_forward_scale(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_scale_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_set - -static void ggml_compute_forward_set_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during set - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during set - const size_t nb0 = ggml_element_size(src0); - - const int im0 = (ne10 == 0 ? 0 : ne10 - 1); - const int im1 = (ne11 == 0 ? 0 : ne11 - 1); - const int im2 = (ne12 == 0 ? 0 : ne12 - 1); - const int im3 = (ne13 == 0 ? 0 : ne13 - 1); - - GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); - - GGML_ASSERT(nb10 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - - ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (float *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); - } -} - -static void ggml_compute_forward_set_i32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - - // view src0 and dst with these strides and data offset inbytes during set - // nb0 is implicitly element_size because src0 and dst are contiguous - size_t nb1 = ((int32_t *) dst->op_params)[0]; - size_t nb2 = ((int32_t *) dst->op_params)[1]; - size_t nb3 = ((int32_t *) dst->op_params)[2]; - size_t offset = ((int32_t *) dst->op_params)[3]; - bool inplace = (bool) ((int32_t *) dst->op_params)[4]; - - if (!inplace) { - if (params->ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src1); - const int nc = src1->ne[0]; - - GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne) - GGML_TENSOR_LOCALS(size_t, nb1, src1, nb) - - // src0 and dst as viewed during set - const size_t nb0 = ggml_element_size(src0); - - const int im0 = (ne10 == 0 ? 0 : ne10 - 1); - const int im1 = (ne11 == 0 ? 0 : ne11 - 1); - const int im2 = (ne12 == 0 ? 0 : ne12 - 1); - const int im3 = (ne13 == 0 ? 0 : ne13 - 1); - - GGML_ASSERT(offset + im0 * nb0 + im1 * nb1 + im2 * nb2 + im3 * nb3 <= ggml_nbytes(dst)); - - GGML_ASSERT(nb10 == sizeof(int32_t)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int ir = ir0; ir < ir1; ++ir) { - // src0 and dst are viewed with shape of src1 and offset - // => same indices - const int i3 = ir / (ne12 * ne11); - const int i2 = (ir - i3 * ne12 * ne11) / ne11; - const int i1 = (ir - i3 * ne12 * ne11 - i2 * ne11); - - ggml_vec_cpy_i32(nc, (int32_t *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + offset), - (int32_t *) ((char *) src1->data + i3 * nb13 + i2 * nb12 + i1 * nb11)); - } -} - -void ggml_compute_forward_set(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_set_f32(params, dst); - } - break; - case GGML_TYPE_I32: - { - ggml_compute_forward_set_i32(params, dst); - } - break; - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cpy - -void ggml_compute_forward_cpy(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_dup(params, dst); -} - -// ggml_compute_forward_cont - -void ggml_compute_forward_cont(const ggml_compute_params * params, ggml_tensor * dst) { - ggml_compute_forward_dup(params, dst); -} - -// ggml_compute_forward_get_rows - -static void ggml_compute_forward_get_rows_q(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - const ggml_type type = src0->type; - const ggml_to_float_t dequantize_row_q = ggml_get_type_traits(type)->to_float; - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == ggml_type_size(type)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - dequantize_row_q((const void *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(ggml_fp16_t)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_cpu_fp16_to_fp32((const ggml_fp16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_bf16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(ggml_bf16_t)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_cpu_bf16_to_fp32((const ggml_bf16_t *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03), - (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), nc); - } -} - -static void ggml_compute_forward_get_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ggml_nelements(src1); - - assert(ne0 == nc); - assert(ne02 == ne11); - assert(nb00 == sizeof(float)); - assert(ggml_nrows(dst) == nr); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i / (ne11 * ne10); - const int64_t i11 = (i - i12 * ne11 * ne10) / ne10; - const int64_t i10 = (i - i12 * ne11 * ne10 - i11 * ne10); - const int64_t i01 = *(int32_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i01 >= 0 && i01 < ne01); - - ggml_vec_cpy_f32(nc, (float *) ((char *) dst->data + i10 * nb1 + i11 * nb2 + i12 * nb3), - (float *) ((char *) src0->data + i01 * nb01 + i11 * nb02 + i12 * nb03)); - } -} - -void ggml_compute_forward_get_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - { - ggml_compute_forward_get_rows_q(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_get_rows_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - { - ggml_compute_forward_get_rows_bf16(params, dst); - } - break; - case GGML_TYPE_F32: - case GGML_TYPE_I32: - { - ggml_compute_forward_get_rows_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } - - //static bool first = true; - //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); - //if (first) { - // first = false; - //} else { - // for (int k = 0; k < dst->ne[1]; ++k) { - // for (int j = 0; j < dst->ne[0]/16; ++j) { - // for (int i = 0; i < 16; ++i) { - // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); - // } - // printf("\n"); - // } - // printf("\n"); - // } - // printf("\n"); - // exit(0); - //} -} - -template -static void ggml_compute_forward_set_rows_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_TENSOR_BINARY_OP_LOCALS - - const int64_t nc = ne00; - const int64_t nr = ne01; - - assert(ne0 == nc); - assert(ne2 == ne02); - assert(ne3 == ne03); - assert(src0->type == GGML_TYPE_F32); - assert(ne02 % ne11 == 0); - assert(ne03 % ne12 == 0); - - const int ith = params->ith; - const int nth = params->nth; - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = std::min(ir0 + dr, nr); - - const ggml_from_float_t from_float = ggml_get_type_traits_cpu(dst->type)->from_float; - - for (int64_t i03 = 0; i03 < ne03; ++i03) { - for (int64_t i02 = 0; i02 < ne02; ++i02) { - for (int64_t i = ir0; i < ir1; ++i) { - const int64_t i12 = i03 % ne12; - const int64_t i11 = i02 % ne11; - const int64_t i10 = i; - - const int64_t i1 = *(idx_t *) ((char *) src1->data + i10 * nb10 + i11 * nb11 + i12 * nb12); - - GGML_ASSERT(i1 >= 0 && i1 < ne1); - - from_float((const float *) ((char *) src0->data + i * nb01 + i02 * nb02 + i03 * nb03), - ((char *) dst->data + i1 * nb1 + i02 * nb2 + i03 * nb3), nc); - } - } - } -} - -void ggml_compute_forward_set_rows(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - if (src1->type == GGML_TYPE_I64) { - ggml_compute_forward_set_rows_f32(params, dst); - } else if (src1->type == GGML_TYPE_I32) { - ggml_compute_forward_set_rows_f32(params, dst); - } else { - GGML_ABORT("src1->type = %d (%s) not supported", src1->type, ggml_type_name(src1->type)); - } - } - break; - default: - { - GGML_ABORT("src0->type = %d (%s) not supported", src0->type, ggml_type_name(src0->type)); - } - } -} - -// ggml_compute_forward_get_rows_back - -static void ggml_compute_forward_get_rows_back_f32_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_is_contiguous(dst)); - - // ggml_compute_forward_dup_same_cont(params, opt0, dst); - - memset(dst->data, 0, ggml_nbytes(dst)); - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(src0->nb[0] == sizeof(ggml_fp16_t)); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - for (int j = 0; j < nc; ++j) { - ggml_fp16_t v = ((ggml_fp16_t *) ((char *) src0->data + i * src0->nb[1]))[j]; - ((float *) ((char *) dst->data + r * dst->nb[1]))[j] += GGML_CPU_FP16_TO_FP32(v); - } - } -} - -static void ggml_compute_forward_get_rows_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (params->ith != 0) { - return; - } - - GGML_ASSERT(ggml_is_contiguous(dst)); - - // ggml_compute_forward_dup_same_cont(params, opt0, dst); - - memset(dst->data, 0, ggml_nbytes(dst)); - - const int nc = src0->ne[0]; - const int nr = ggml_nelements(src1); - - GGML_ASSERT(dst->ne[0] == nc); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - for (int i = 0; i < nr; ++i) { - const int r = ((int32_t *) src1->data)[i]; - - ggml_vec_add_f32(nc, (float *) ((char *) dst->data + r * dst->nb[1]), - (float *) ((char *) dst->data + r * dst->nb[1]), - (float *) ((char *) src0->data + i * src0->nb[1])); - } -} - -void ggml_compute_forward_get_rows_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_get_rows_back_f32_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_get_rows_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } - - //static bool first = true; - //printf("ne0 = %d, ne1 = %d, ne2 = %d\n", dst->ne[0], dst->ne[1], dst->ne[2]); - //if (first) { - // first = false; - //} else { - // for (int k = 0; k < dst->ne[1]; ++k) { - // for (int j = 0; j < dst->ne[0]/16; ++j) { - // for (int i = 0; i < 16; ++i) { - // printf("%8.4f ", ((float *) dst->data)[k*dst->ne[0] + j*16 + i]); - // } - // printf("\n"); - // } - // printf("\n"); - // } - // printf("\n"); - // exit(0); - //} -} - -// ggml_compute_forward_diag - -static void ggml_compute_forward_diag_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - if (params->ith != 0) { - return; - } - - // TODO: handle transposed/permuted matrices - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(ne00 == ne0); - GGML_ASSERT(ne00 == ne1); - GGML_ASSERT(ne01 == 1); - GGML_ASSERT(ne02 == ne2); - GGML_ASSERT(ne03 == ne3); - - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb0 == sizeof(float)); - - for (int i3 = 0; i3 < ne3; i3++) { - for (int i2 = 0; i2 < ne2; i2++) { - for (int i1 = 0; i1 < ne1; i1++) { - float * d = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - float * s = (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02); - for (int i0 = 0; i0 < i1; i0++) { - d[i0] = 0; - } - d[i1] = s[i1]; - for (int i0 = i1 + 1; i0 < ne0; i0++) { - d[i0] = 0; - } - } - } - } -} - -void ggml_compute_forward_diag(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_diag_mask_inf - -static void ggml_compute_forward_diag_mask_f32(const ggml_compute_params * params, - ggml_tensor * dst, - const float value) { - const ggml_tensor * src0 = dst->src[0]; - - const int ith = params->ith; - const int nth = params->nth; - - const int n_past = ((int32_t *) dst->op_params)[0]; - const bool inplace = src0->data == dst->data; - - GGML_ASSERT(n_past >= 0); - - if (!inplace) { - if (ith == 0) { - // memcpy needs to be synchronized across threads to avoid race conditions. - // => do it in INIT phase - GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0)); - GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0)); - memcpy(((char *) dst->data), ((char *) src0->data), ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - - // TODO: handle transposed/permuted matrices - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - const int nr = src0->ne[1]; - const int nz = n / nr; - - GGML_ASSERT(dst->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - for (int k = 0; k < nz; k++) { - for (int j = ith; j < nr; j += nth) { - for (int i = n_past; i < nc; i++) { - if (i > n_past + j) { - *(float *) ((char *) dst->data + k * dst->nb[2] + j * dst->nb[1] + i * dst->nb[0]) = value; - } - } - } - } -} - -void ggml_compute_forward_diag_mask_inf(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_mask_f32(params, dst, -INFINITY); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -void ggml_compute_forward_diag_mask_zero(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_diag_mask_f32(params, dst, 0); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_soft_max - -static void ggml_compute_forward_soft_max_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - assert(ggml_is_contiguous(dst)); - assert(ggml_are_same_shape(src0, dst)); - - float scale = 1.0f; - float max_bias = 0.0f; - - memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int64_t nb11 = src1 ? src1->nb[1] : 1; - const int64_t nb12 = src1 ? src1->nb[2] : 1; - const int64_t nb13 = src1 ? src1->nb[3] : 1; - - const int64_t ne12 = src1 ? src1->ne[2] : 1; - const int64_t ne13 = src1 ? src1->ne[3] : 1; - - // TODO: is this supposed to be ceil instead of floor? - // https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370 - const uint32_t n_head = ne02; - const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); - - const float m0 = powf(2.0f, -(max_bias) / n_head_log2); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - - float * wp = (float *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); - - // sinks - const float * sk = src2 ? (float *) ((char *) src2->data) : nullptr; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = ith; i01 < ne01; i01 += nth) { - const int64_t i11 = i01; - const int64_t i12 = i02 % ne12; - const int64_t i13 = i03 % ne13; - - // ALiBi - const uint32_t h = i02; // head - const float slope = - (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; - - float * sp = (float *) ((char *) src0->data + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * dp = (float *) ((char *) dst->data + i01 * nb1 + i02 * nb2 + i03 * nb3); - - // broadcast the mask across rows - ggml_fp16_t * mp_f16 = - src1 ? (ggml_fp16_t *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; - float * mp_f32 = src1 ? (float *) ((char *) src1->data + i11 * nb11 + i12 * nb12 + i13 * nb13) : NULL; - - ggml_vec_cpy_f32(ne00, wp, sp); - ggml_vec_scale_f32(ne00, wp, scale); - if (mp_f32) { - if (use_f16) { - for (int i = 0; i < ne00; ++i) { - wp[i] += slope * GGML_CPU_FP16_TO_FP32(mp_f16[i]); - } - } else { - for (int i = 0; i < ne00; ++i) { - wp[i] += slope * mp_f32[i]; - } - } - } - -#ifndef NDEBUG - for (int i = 0; i < ne00; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(wp[i])); - } -#endif - - float max = -INFINITY; - ggml_vec_max_f32(ne00, &max, wp); - - // if we have sinks, make a correction as if they were included in the softmax - if (sk) { - max = MAX(max, sk[i02]); - } - - ggml_float sum = ggml_vec_soft_max_f32(ne00, dp, wp, max); - assert(sum > 0.0); - - if (sk) { - sum += (ggml_float) expf(sk[i02] - max); - } - - sum = 1.0 / sum; - ggml_vec_scale_f32(ne00, dp, sum); - -#ifndef NDEBUG - for (int i = 0; i < ne00; ++i) { - assert(!isnan(dp[i])); - assert(!isinf(dp[i])); - } -#endif - } - } - } -} - -void ggml_compute_forward_soft_max(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_soft_max_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_soft_max_ext_back - -static void ggml_compute_forward_soft_max_ext_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(ggml_is_contiguous(src0)); - GGML_ASSERT(ggml_is_contiguous(src1)); - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_are_same_shape(src0, dst)); - GGML_ASSERT(ggml_are_same_shape(src1, dst)); - - float scale = 1.0f; - float max_bias = 0.0f; - - memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float)); - - GGML_ASSERT(max_bias == 0.0f); - - // TODO: handle transposed/permuted matrices - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src0->ne[0]; - const int nr = ggml_nrows(src0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dy = (float *) ((char *) src0->data + i1 * src0->nb[1]); - float * y = (float *) ((char *) src1->data + i1 * src1->nb[1]); - float * dx = (float *) ((char *) dst->data + i1 * dst->nb[1]); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(dy[i])); - assert(!isnan(y[i])); - } -#endif - // Jii = yi - yi*yi - // Jij = -yi*yj - // J = diag(y)-y.T*y - // dx = J * dy - // dxk = sum_i(Jki * dyi) - // dxk = sum_i(-yk*yi * dyi) - (-yk*yk)*dyk + (yk - yk*yk)*dyk - // dxk = sum_i(-yk*yi * dyi) + yk*yk*dyk + yk*dyk - yk*yk*dyk - // dxk = sum_i(-yk*yi * dyi) + yk*dyk - // dxk = -yk * sum_i(yi * dyi) + yk*dyk - // dxk = -yk * dot(y, dy) + yk*dyk - // dxk = yk * (- dot(y, dy) + dyk) - // dxk = yk * (dyk - dot(y, dy)) - // - // post-order: - // dot_y_dy := dot(y, dy) - // dx := dy - // dx := dx - dot_y_dy - // dx := dx * y - - // linear runtime, no additional memory - float dot_y_dy = 0; - ggml_vec_dot_f32(nc, &dot_y_dy, 0, y, 0, dy, 0, 1); - ggml_vec_cpy_f32(nc, dx, dy); - ggml_vec_acc1_f32(nc, dx, -dot_y_dy); - ggml_vec_mul_f32(nc, dx, dx, y); - ggml_vec_scale_f32(nc, dx, scale); - -#ifndef NDEBUG - for (int i = 0; i < nc; ++i) { - assert(!isnan(dx[i])); - assert(!isinf(dx[i])); - } -#endif - } -} - -void ggml_compute_forward_soft_max_ext_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_soft_max_ext_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_clamp - -static void ggml_compute_forward_clamp_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - float min; - float max; - memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb00 == sizeof(float)); - - for (int j = ith; j < n; j += nth) { - float * dst_ptr = (float *) ((char *) dst->data + j * nb1); - float * src0_ptr = (float *) ((char *) src0->data + j * nb01); - - for (int i = 0; i < nc; i++) { - dst_ptr[i] = MAX(MIN(src0_ptr[i], max), min); - } - } -} - -static void ggml_compute_forward_clamp_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - float min; - float max; - memcpy(&min, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max, (float *) dst->op_params + 1, sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int n = ggml_nrows(src0); - const int nc = src0->ne[0]; - - const size_t nb00 = src0->nb[0]; - const size_t nb01 = src0->nb[1]; - - const size_t nb0 = dst->nb[0]; - const size_t nb1 = dst->nb[1]; - - GGML_ASSERT(nb0 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - - for (int j = ith; j < n; j += nth) { - ggml_fp16_t * dst_ptr = (ggml_fp16_t *) ((char *) dst->data + j * nb1); - ggml_fp16_t * src0_ptr = (ggml_fp16_t *) ((char *) src0->data + j * nb01); - - for (int i = 0; i < nc; i++) { - float v = GGML_CPU_FP16_TO_FP32(src0_ptr[i]); - dst_ptr[i] = GGML_CPU_FP32_TO_FP16(MAX(MIN(v, max), min)); - } - } -} - -void ggml_compute_forward_clamp(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_clamp_f32(params, dst); - } - break; - case GGML_TYPE_F16: - { - ggml_compute_forward_clamp_f16(params, dst); - } - break; - case GGML_TYPE_BF16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q8_1: - case GGML_TYPE_MXFP4: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - case GGML_TYPE_TQ1_0: - case GGML_TYPE_TQ2_0: - case GGML_TYPE_IQ2_XXS: - case GGML_TYPE_IQ2_XS: - case GGML_TYPE_IQ3_XXS: - case GGML_TYPE_IQ1_S: - case GGML_TYPE_IQ1_M: - case GGML_TYPE_IQ4_NL: - case GGML_TYPE_IQ4_XS: - case GGML_TYPE_IQ3_S: - case GGML_TYPE_IQ2_S: - case GGML_TYPE_Q8_K: - case GGML_TYPE_I8: - case GGML_TYPE_I16: - case GGML_TYPE_I32: - case GGML_TYPE_I64: - case GGML_TYPE_F64: - case GGML_TYPE_COUNT: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rope - -static float rope_yarn_ramp(const float low, const float high, const int i0) { - const float y = (i0 / 2 - low) / MAX(0.001f, high - low); - return 1 - MIN(1, MAX(0, y)); -} - -// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn -// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng. -static void rope_yarn(float theta_extrap, - float freq_scale, - float corr_dims[2], - int64_t i0, - float ext_factor, - float mscale, - float * cos_theta, - float * sin_theta) { - // Get n-d rotational scaling corrected for extrapolation - float theta_interp = freq_scale * theta_extrap; - float theta = theta_interp; - if (ext_factor != 0.0f) { - float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor; - theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix; - - // Get n-d magnitude scaling corrected for interpolation - mscale *= 1.0f + 0.1f * logf(1.0f / freq_scale); - } - *cos_theta = cosf(theta) * mscale; - *sin_theta = sinf(theta) * mscale; -} - -static void ggml_rope_cache_init(float theta_base, - float freq_scale, - const float * freq_factors, - float corr_dims[2], - int64_t ne0, - float ext_factor, - float mscale, - float * cache, - float sin_sign, - float theta_scale) { - // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py - float theta = theta_base; - for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; - rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); - cache[i0 + 1] *= sin_sign; - - theta *= theta_scale; - } -} - -static void ggml_mrope_cache_init(float theta_base_t, - float theta_base_h, - float theta_base_w, - float theta_base_e, - int sections[4], - bool is_imrope, - bool indep_sects, - float freq_scale, - const float * freq_factors, - float corr_dims[2], - int64_t ne0, - float ext_factor, - float mscale, - float * cache, - float sin_sign, - float theta_scale) { - // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py - float theta_t = theta_base_t; - float theta_h = theta_base_h; - float theta_w = theta_base_w; - float theta_e = theta_base_e; // extra position id for vision encoder - int sect_dims = sections[0] + sections[1] + sections[2] + sections[3]; - int sec_w = sections[1] + sections[0]; - int sec_e = sections[2] + sec_w; - GGML_ASSERT(sect_dims <= ne0); - - for (int64_t i0 = 0; i0 < ne0; i0 += 2) { - const float ff = freq_factors ? freq_factors[i0 / 2] : 1.0f; - - int sector = (i0 / 2) % sect_dims; - if (indep_sects) { - // compute theta independently for each dim sections - // (i.e. reset corresponding theta when `i0` go from one section to another) - if (sector == 0) { - theta_t = theta_base_t; - } else if (sector == sections[0]) { - theta_h = theta_base_h; - ; - } else if (sector == sec_w) { - theta_w = theta_base_w; - } else if (sector == sec_e) { - theta_e = theta_base_e; - } - } - - float theta = theta_t; - if (is_imrope) { // qwen3vl apply interleaved mrope - if (sector % 3 == 1 && sector < 3 * sections[1]) { - theta = theta_h; - } else if (sector % 3 == 2 && sector < 3 * sections[2]) { - theta = theta_w; - } else if (sector % 3 == 0 && sector < 3 * sections[0]) { - theta = theta_t; - } else { - theta = theta_e; - } - } else { - if (sector >= sections[0] && sector < sec_w) { - theta = theta_h; - } else if (sector >= sec_w && sector < sec_w + sections[2]) { - theta = theta_w; - } else if (sector >= sec_w + sections[2]) { - theta = theta_e; - } - } - - rope_yarn(theta / ff, freq_scale, corr_dims, i0, ext_factor, mscale, &cache[i0 + 0], &cache[i0 + 1]); - cache[i0 + 1] *= sin_sign; - - theta_t *= theta_scale; - theta_w *= theta_scale; - theta_h *= theta_scale; - theta_e *= theta_scale; - } -} - -template -static void rotate_pairs(const int64_t n, - const int64_t n_offset, - const float * cache, - const T * src_data, - T * dst_data, - const int scale = 2) { - for (int64_t i0 = 0; i0 < n; i0 += 2) { - const int64_t ic = - i0 / scale; // hack for GGML_ROPE_TYPE_NORMAL, where we need ic = i0; for all other cases, ic = i0/2 - - const float cos_theta = cache[i0 + 0]; - const float sin_theta = cache[i0 + 1]; - - const T * const src = src_data + ic; - T * dst = dst_data + ic; - - const float x0 = type_conversion_table::to_f32(src[0]); - const float x1 = type_conversion_table::to_f32(src[n_offset]); - - dst[0] = type_conversion_table::from_f32(x0 * cos_theta - x1 * sin_theta); - dst[n_offset] = type_conversion_table::from_f32(x0 * sin_theta + x1 * cos_theta); - } -} - -template //float or ggml_fp16_t -static void ggml_compute_forward_rope_flt(const ggml_compute_params * params, ggml_tensor * dst, const bool forward) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_I32); - - float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow; - int sections[4]; - - //const int n_past = ((int32_t *) dst->op_params)[0]; - const int n_dims = ((int32_t *) dst->op_params)[1]; - const int mode = ((int32_t *) dst->op_params)[2]; - //const int n_ctx = ((int32_t *) dst->op_params)[3]; - const int n_ctx_orig = ((int32_t *) dst->op_params)[4]; - - memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float)); - memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float)); - memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float)); - memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float)); - memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float)); - memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float)); - memcpy(§ions, (int32_t *) dst->op_params + 11, sizeof(int) * 4); - - GGML_TENSOR_UNARY_OP_LOCALS - - //printf("ne0: %d, ne1: %d, ne2: %d, ne3: %d\n", ne0, ne1, ne2, ne3); - //printf("n_past = %d, ne2 = %d\n", n_past, ne2); - - GGML_ASSERT(nb0 == nb00); - GGML_ASSERT(nb0 == sizeof(T)); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(dst); - - GGML_ASSERT(n_dims <= ne0); - GGML_ASSERT(n_dims % 2 == 0); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // row index used to determine which thread to use - int ir = 0; - - const float theta_scale = powf(freq_base, -2.0f / n_dims); - - float corr_dims[2]; - ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims); - - const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE; // qwen3vl apply interleaved mrope - const bool mrope_used = - mode & GGML_ROPE_TYPE_MROPE; // ggml_rope_multi, note: also true for vision (24 & 8 == true) and for imrope - const bool is_vision = mode == GGML_ROPE_TYPE_VISION; - - if (mrope_used) { - GGML_ASSERT(sections[0] > 0 || sections[1] > 0 || sections[2] > 0); - } - - if (is_vision) { - GGML_ASSERT(n_dims == ne0 / 2); - } - - const float * freq_factors = NULL; - if (src2 != NULL) { - GGML_ASSERT(src2->type == GGML_TYPE_F32); - GGML_ASSERT(src2->ne[0] >= n_dims / 2); - freq_factors = (const float *) src2->data; - } - - // backward process uses inverse rotation by cos and sin. - // cos and sin build a rotation matrix, where the inverse is the transpose. - // this essentially just switches the sign of sin. - const float sin_sign = forward ? 1.0f : -1.0f; - - const int32_t * pos = (const int32_t *) src1->data; - - for (int64_t i3 = 0; i3 < ne3; i3++) { // batch - for (int64_t i2 = 0; i2 < ne2; i2++) { // seq-len - - float * cache = (float *) params->wdata + (ne0 + CACHE_LINE_SIZE_F32) * ith; - if (!mrope_used) { - const int64_t p = pos[i2]; - ggml_rope_cache_init(p, freq_scale, freq_factors, corr_dims, ne0, ext_factor, attn_factor, cache, - sin_sign, theta_scale); - } else { - const int64_t p_t = pos[i2]; - const int64_t p_h = pos[i2 + ne2]; - const int64_t p_w = pos[i2 + ne2 * 2]; - const int64_t p_e = pos[i2 + ne2 * 3]; - ggml_mrope_cache_init(p_t, p_h, p_w, p_e, sections, is_imrope, is_vision, freq_scale, freq_factors, - corr_dims, ne0, ext_factor, attn_factor, cache, sin_sign, theta_scale); - } - - for (int64_t i1 = 0; i1 < ne1; i1++) { // attn-heads - if (ir++ < ir0) { - continue; - } - if (ir > ir1) { - break; - } - - T * src = (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01); - T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1); - - switch (mode) { - case GGML_ROPE_TYPE_NORMAL: - rotate_pairs(n_dims, 1, cache, src, dst_data, 1); - break; - case GGML_ROPE_TYPE_NEOX: - case GGML_ROPE_TYPE_MROPE: - case GGML_ROPE_TYPE_IMROPE: - rotate_pairs(n_dims, n_dims / 2, cache, src, dst_data); - break; - case GGML_ROPE_TYPE_VISION: - rotate_pairs(ne0, n_dims, cache, src, dst_data); - break; - default: - GGML_ABORT("rope type not supported"); - } - - if (!is_vision) { - // fill the remain channels with data from src tensor - for (int64_t i0 = n_dims; i0 < ne0; i0 += 2) { - const T * const src = - (T *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01 + i0 * nb00); - T * dst_data = (T *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0); - - dst_data[0] = src[0]; - dst_data[1] = src[1]; - } - } - } //attn-heads - } - } -} - -void ggml_compute_forward_rope(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_rope_flt(params, dst, true); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_rope_flt(params, dst, true); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rope_back - -void ggml_compute_forward_rope_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_rope_flt(params, dst, false); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_rope_flt(params, dst, false); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_conv_transpose_1d - -static void ggml_compute_forward_conv_transpose_1d_f16_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i02 * nb02 + i01 * nb01); - ggml_fp16_t * dst_data = wdata + i01 * ne00 * ne02; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00 * ne02 + i02] = src[i00]; - } - } - } - } - - // permute source data (src1) from (L x Cin) to (Cin x L) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; - ggml_fp16_t * dst_data = wdata; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i11 * nb11); - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne11 + i11] = GGML_CPU_FP32_TO_FP16(src[i10]); - } - } - } - - // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - - // total rows in dst - const int nr = ne1; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - ggml_fp16_t * const wdata_src = wdata + nk; - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *) ((char *) dst->data + i1 * nb1); - ggml_fp16_t * wdata_kernel = wdata + i1 * ne02 * ne00; - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i10 * ne11; - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f16(ne02, &v, 0, (ggml_fp16_t *) wdata_src + i1n, 0, - (ggml_fp16_t *) wdata_kernel + i00 * ne02, 0, 1); - dst_data[i10 * s0 + i00] += v; - } - } - } -} - -static void ggml_compute_forward_conv_transpose_1d_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02; - - GGML_ASSERT(nb00 == sizeof(float)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout) - { - float * const wdata = (float *) params->wdata + 0; - - for (int64_t i02 = 0; i02 < ne02; i02++) { - for (int64_t i01 = 0; i01 < ne01; i01++) { - const float * const src = (float *) ((char *) src0->data + i02 * nb02 + i01 * nb01); - float * dst_data = wdata + i01 * ne00 * ne02; - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i00 * ne02 + i02] = src[i00]; - } - } - } - } - - // prepare source data (src1) - { - float * const wdata = (float *) params->wdata + nk; - float * dst_data = wdata; - - for (int64_t i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i11 * nb11); - for (int64_t i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne11 + i11] = src[i10]; - } - } - } - - // need to zero dst since we are accumulating into it - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - - // total rows in dst - const int nr = ne1; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - float * const wdata = (float *) params->wdata + 0; - float * const wdata_src = wdata + nk; - - for (int i1 = ir0; i1 < ir1; i1++) { - float * dst_data = (float *) ((char *) dst->data + i1 * nb1); - float * wdata_kernel = wdata + i1 * ne02 * ne00; - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i10 * ne11; - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f32(ne02, &v, 0, wdata_src + i1n, 0, wdata_kernel + i00 * ne02, 0, 1); - dst_data[i10 * s0 + i00] += v; - } - } - } -} - -void ggml_compute_forward_conv_transpose_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_conv_transpose_1d_f16_f32(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_conv_transpose_1d_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_im2col_f32 -// src0: kernel [OC, IC, KH, KW] -// src1: image [N, IC, IH, IW] -// dst: result [N, OH, OW, IC*KH*KW] -static void ggml_compute_forward_im2col_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne13 : ne12; - const int64_t IC = is_2D ? ne12 : ne11; - const int64_t IH = is_2D ? ne11 : 1; - const int64_t IW = ne10; - - const int64_t KH = is_2D ? ne01 : 1; - const int64_t KW = ne00; - - const int64_t OH = is_2D ? ne2 : 1; - const int64_t OW = ne1; - - int ofs0 = is_2D ? nb13 : nb12; - int ofs1 = is_2D ? nb12 : nb11; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - float * dst_data = wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - const float * const src_data = - (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] - - for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; - } else { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = (src_data[iih * IW + iiw]); - } - } - } - } - } - } - } - } -} - -// ggml_compute_forward_im2col_f16 -// src0: kernel [OC, IC, KH, KW] -// src1: image [N, IC, IH, IW] -// dst: result [N, OH, OW, IC*KH*KW] -static void ggml_compute_forward_im2col_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne13 : ne12; - const int64_t IC = is_2D ? ne12 : ne11; - const int64_t IH = is_2D ? ne11 : 1; - const int64_t IW = ne10; - - const int64_t KH = is_2D ? ne01 : 1; - const int64_t KW = ne00; - - const int64_t OH = is_2D ? ne2 : 1; - const int64_t OW = ne1; - - int ofs0 = is_2D ? nb13 : nb12; - int ofs1 = is_2D ? nb12 : nb11; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { // 1 - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - ggml_fp16_t * dst_data = - wdata + (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - const float * const src_data = - (float *) ((char *) src1->data + in * ofs0 + iic * ofs1); // [IH, IW] - - for (int64_t ikh = 0; ikh < KH; ikh++) { // 1 - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - - if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = 0; - } else { - dst_data[iic * (KH * KW) + ikh * KW + ikw] = - GGML_CPU_FP32_TO_FP16(src_data[iih * IW + iiw]); - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_im2col(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_im2col_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_im2col_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_im2col_back_f32 - -void ggml_compute_forward_im2col_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output - const ggml_tensor * src1 = dst->src[1]; // convolution kernel - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[4]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[5]; - const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = is_2D ? ne3 : ne2; - const int64_t IC = is_2D ? ne2 : ne1; - const int64_t IH = is_2D ? ne1 : 1; - const int64_t IW = ne0; - - const int64_t KH = is_2D ? ne11 : 1; - const int64_t KW = ne10; - - const int64_t OH = is_2D ? ne02 : 1; - const int64_t OW = ne01; - - int ofs0 = is_2D ? nb3 : nb2; - int ofs1 = is_2D ? nb2 : nb1; - - GGML_ASSERT(nb0 == sizeof(float)); - - // im2col: [N, IC, IH, IW] => [N, OH, OW, IC*KH*KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - for (int64_t iih = 0; iih < IH; iih++) { - for (int64_t iiw = 0; iiw < IW; iiw++) { - // micro kernel - float grad = 0.0f; - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - // For s0 > 1 some values were skipped over in the forward pass. - // These values have tmpw % s0 != 0 and need to be skipped in the backwards pass as well. - const int64_t tmpw = (iiw + p0 - ikw * d0); - if (tmpw % s0 != 0) { - continue; - } - const int64_t iow = tmpw / s0; - - // Equivalent logic as above except for s1. - int64_t ioh; - if (is_2D) { - const int64_t tmph = iih + p1 - ikh * d1; - - if (tmph % s1 != 0) { - continue; - } - - ioh = tmph / s1; - } else { - ioh = 0; - } - - if (iow < 0 || iow >= OW || ioh < 0 || ioh >= OH) { - continue; - } - - const float * const grad_in = - (const float *) src0->data + - (in * OH * OW + ioh * OW + iow) * (IC * KH * KW); // [IC, KH, KW] - grad += grad_in[iic * (KH * KW) + ikh * KW + ikw]; - } - } - float * dst_data = (float *) ((char *) wdata + (in * ofs0 + iic * ofs1)); // [IH, IW] - dst_data[iih * IW + iiw] = grad; - } - } - } - } - } -} - -// ggml_compute_forward_im2col_3d_f16 -// src0: kernel [OC*IC, KD, KH, KW] -// src1: image [N*IC, ID, IH, IW] -// dst: result [N*OD, OH, OW, IC * KD * KH * KW] -static void ggml_compute_forward_im2col_3d_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F16); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; - const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; - const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; - const int32_t IC = ((const int32_t *) (dst->op_params))[9]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = ne13 / IC; - const int64_t ID = ne12; - const int64_t IH = ne11; - const int64_t IW = ne10; - - const int64_t OC = ne03 / IC; - GGML_UNUSED(OC); - const int64_t KD = ne02; - const int64_t KH = ne01; - const int64_t KW = ne00; - - const int64_t OD = ne3 / N; - const int64_t OH = ne2; - const int64_t OW = ne1; - const int64_t OH_OW = OH * OW; - const int64_t KD_KH_KW = KD * KH * KW; - const int64_t KH_KW = KH * KW; - const int64_t IC_KD_KH_KW = IC * KD * KH * KW; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iod = 0; iod < OD; iod++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - ggml_fp16_t * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * - IC_KD_KH_KW; // [IC, KD, KH, KW] - const float * const src_data = - (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] - - for (int64_t ikd = 0; ikd < KD; ikd++) { - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - const int64_t iid = iod * s2 + ikd * d2 - p2; - - if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || - iid < 0 || iid >= ID) { - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; - } else { - const float * const s = - (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + - iiw * nb10); // [ID, IH, IW] - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = - GGML_CPU_FP32_TO_FP16(*s); - } - } - } - } - } - } - } - } - } - } -} - -// ggml_compute_forward_im2col_3d_f32 -// src0: kernel [OC*IC, KD, KH, KW] -// src1: image [N*IC, ID, IH, IW] -// dst: result [N*OD, OH, OW, IC * KD * KH * KW] -static void ggml_compute_forward_im2col_3d_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS; - - const int32_t s0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t s1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t s2 = ((const int32_t *) (dst->op_params))[2]; - const int32_t p0 = ((const int32_t *) (dst->op_params))[3]; - const int32_t p1 = ((const int32_t *) (dst->op_params))[4]; - const int32_t p2 = ((const int32_t *) (dst->op_params))[5]; - const int32_t d0 = ((const int32_t *) (dst->op_params))[6]; - const int32_t d1 = ((const int32_t *) (dst->op_params))[7]; - const int32_t d2 = ((const int32_t *) (dst->op_params))[8]; - const int32_t IC = ((const int32_t *) (dst->op_params))[9]; - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t N = ne13 / IC; - const int64_t ID = ne12; - const int64_t IH = ne11; - const int64_t IW = ne10; - - const int64_t OC = ne03 / IC; - GGML_UNUSED(OC); - const int64_t KD = ne02; - const int64_t KH = ne01; - const int64_t KW = ne00; - - const int64_t OD = ne3 / N; - const int64_t OH = ne2; - const int64_t OW = ne1; - - const int64_t OH_OW = OH * OW; - const int64_t KD_KH_KW = KD * KH * KW; - const int64_t KH_KW = KH * KW; - const int64_t IC_KD_KH_KW = IC * KD * KH * KW; - - GGML_ASSERT(nb10 == sizeof(float)); - - // im2col: [N*IC, ID, IH, IW] => [N*OD, OH, OW, IC * KD * KH * KW] - { - float * const wdata = (float *) dst->data; - - for (int64_t in = 0; in < N; in++) { - for (int64_t iod = 0; iod < OD; iod++) { - for (int64_t ioh = 0; ioh < OH; ioh++) { - for (int64_t iow = 0; iow < OW; iow++) { - for (int64_t iic = ith; iic < IC; iic += nth) { - // micro kernel - float * dst_data = wdata + (in * OD * OH_OW + iod * OH_OW + ioh * OW + iow) * - IC_KD_KH_KW; // [IC, KD, KH, KW] - const float * const src_data = - (const float *) ((const char *) src1->data + (in * IC + iic) * nb13); // [ID, IH, IW] - - for (int64_t ikd = 0; ikd < KD; ikd++) { - for (int64_t ikh = 0; ikh < KH; ikh++) { - for (int64_t ikw = 0; ikw < KW; ikw++) { - const int64_t iiw = iow * s0 + ikw * d0 - p0; - const int64_t iih = ioh * s1 + ikh * d1 - p1; - const int64_t iid = iod * s2 + ikd * d2 - p2; - - if (iid < 0 || iid >= ID || iih < 0 || iih >= IH || iiw < 0 || iiw >= IW || - iid < 0 || iid >= ID) { - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = 0; - } else { - const float * const s = - (const float *) ((const char *) src_data + iid * nb12 + iih * nb11 + - iiw * nb10); // [ID, IH, IW] - dst_data[iic * KD_KH_KW + ikd * KH_KW + ikh * KW + ikw] = *s; - } - } - } - } - } - } - } - } - } - } -} - -void ggml_compute_forward_im2col_3d(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F16: - { - ggml_compute_forward_im2col_3d_f16(params, dst); - } - break; - case GGML_TYPE_F32: - { - ggml_compute_forward_im2col_3d_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_call_mul_mat(ggml_type type, - const ggml_compute_params * params, - int64_t m, - int64_t n, - int64_t k, - void * a, - void * b, - float * c) { - const ggml_type_traits * traits = ggml_get_type_traits(type); - struct ggml_tensor src1 = {}; - src1.type = type; - src1.ne[0] = k; - src1.ne[1] = m; - src1.ne[2] = 1; - src1.ne[3] = 1; - src1.nb[0] = traits->type_size; - src1.nb[1] = k * traits->type_size; - src1.nb[2] = src1.nb[1]; - src1.nb[3] = src1.nb[2]; - src1.data = a; - - struct ggml_tensor src0 = {}; - src0.type = type; - src0.ne[0] = k; - src0.ne[1] = n; - src0.ne[2] = 1; - src0.ne[3] = 1; - src0.nb[0] = traits->type_size; - src0.nb[1] = k * traits->type_size; - src0.nb[2] = src0.nb[1]; - src0.nb[3] = src0.nb[2]; - src0.data = b; - - struct ggml_tensor dst = {}; - dst.ne[0] = n; - dst.ne[1] = m; - dst.ne[2] = 1; - dst.ne[3] = 1; - dst.nb[0] = sizeof(float); - dst.nb[1] = n * sizeof(float); - dst.nb[2] = dst.nb[1]; - dst.nb[3] = dst.nb[2]; - dst.data = c; - dst.src[0] = &src0; - dst.src[1] = &src1; - - ggml_compute_forward_mul_mat(params, &dst); -} - -static inline int64_t ggml_wrap_around(int64_t coord, int64_t size) { - return (coord + size) % size; // adding size avoids negative number weirdness -} - -// ggml_compute_forward_conv_2d - -static void ggml_compute_forward_conv_2d_impl(const ggml_compute_params * params, - const ggml_tensor * kernel, // [KW, KH, IC, OC] - const ggml_tensor * src, // [W, H, C, N] - ggml_tensor * dst, // [OW, OH, OC, N] - ggml_type kernel_type) { - GGML_ASSERT(ggml_is_contiguous(kernel)); - GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); - GGML_ASSERT(kernel->type == kernel_type); - - const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); - - const int32_t stride_x = dst->op_params[0]; - const int32_t stride_y = dst->op_params[1]; - const int32_t pad_x = dst->op_params[2]; - const int32_t pad_y = dst->op_params[3]; - const int32_t dilation_x = dst->op_params[4]; - const int32_t dilation_y = dst->op_params[5]; - - const int64_t c_in = src->ne[2]; - const int64_t c_out = kernel->ne[3]; - GGML_ASSERT(c_in == kernel->ne[2]); - - const int64_t src_w = src->ne[0]; - const int64_t src_h = src->ne[1]; - const int64_t knl_w = kernel->ne[0]; - const int64_t knl_h = kernel->ne[1]; - const int64_t dst_w = dst->ne[0]; - const int64_t dst_h = dst->ne[1]; - - const float * src_data = (float *) src->data; - void * knl_data = kernel->data; - float * dst_data = (float *) dst->data; - - const int64_t knl_n = knl_w * knl_h * c_in; - const int64_t patch_total = dst->ne[3] * dst_w * dst_h; - - const int64_t space_per_patch = knl_n * traits->type_size + c_out * sizeof(float); - const int64_t batch_size = params->wsize / space_per_patch; - const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; - const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; - - GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); - - void * tmp = params->wdata; - - for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { - const int64_t patch_start_batch = batch_i * patches_per_batch; - const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); - const int64_t patch_n = patch_end_batch - patch_start_batch; - - const int64_t patch_per_thread = (patch_n + params->nth - 1) / params->nth; - const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; - const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); - - //im2col for a patch - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t src_x = (p / dst_w) % dst_h; - const int64_t src_y = p % dst_w; - - const float * src_base = (const float *) ((const char *) src_data + batch_n * src->nb[3]); - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n * traits->type_size; - - for (int64_t ic = 0; ic < c_in; ++ic) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sy = src_x * stride_y + ky * dilation_y - pad_y; - const int64_t sx = src_y * stride_x + kx * dilation_x - pad_x; - - int64_t dst_idx = ic * (knl_h * knl_w) + ky * knl_w + kx; - - float src_val; - if (sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { - src_val = 0.0f; - } else { - const float * src_ptr = (const float *) ((const char *) src_base + sx * src->nb[0] + - sy * src->nb[1] + ic * src->nb[2]); - src_val = *src_ptr; - } - - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); - } - } - } - } - } // patches handled by this thread - - ggml_barrier(params->threadpool); - - float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n * traits->type_size); - - GGML_ASSERT(gemm_output + patch_n * c_out <= (float *) tmp + params->wsize); - - // GEMM: patches[patch_n, knl_n] × kernel[knl_n, c_out] = output[patch_n, c_out] - ggml_call_mul_mat(kernel_type, params, patch_n, c_out, knl_n, tmp, knl_data, gemm_output); - - ggml_barrier(params->threadpool); - - //permute back [OC, N, OH, OW] to [N, OC, OH, OW] - const int64_t permute_per_thread = (patch_n + params->nth - 1) / params->nth; - const int64_t permute_start = params->ith * permute_per_thread; - const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n); - - for (int64_t i = permute_start; i < permute_end; ++i) { - const int64_t p = patch_start_batch + i; - const int64_t batch_n = p / (dst_w * dst_h); - const int64_t dst_y = (p / dst_w) % dst_h; - const int64_t dst_x = p % dst_w; - - for (int64_t oc = 0; oc < c_out; ++oc) { - const float value = gemm_output[i * c_out + oc]; - float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + - oc * dst->nb[2] + batch_n * dst->nb[3]); - *dst_ptr = value; - } - } - } -} - -void ggml_compute_forward_conv_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - ggml_compute_forward_conv_2d_impl(params, src0, src1, dst, src0->type); -} - -// ggml_compute_forward_conv_3d - -static void ggml_compute_forward_conv_3d_impl(const ggml_compute_params * params, - const ggml_tensor * kernel, - const ggml_tensor * src, - ggml_tensor * dst, - ggml_type kernel_type) { - GGML_ASSERT(ggml_is_contiguous(kernel)); - GGML_ASSERT(kernel_type == GGML_TYPE_F16 || kernel_type == GGML_TYPE_F32); - GGML_ASSERT(kernel->type == kernel_type); - - const ggml_type_traits * traits = ggml_get_type_traits(kernel_type); - - const int32_t s0 = dst->op_params[0]; - const int32_t s1 = dst->op_params[1]; - const int32_t s2 = dst->op_params[2]; - const int32_t p0 = dst->op_params[3]; - const int32_t p1 = dst->op_params[4]; - const int32_t p2 = dst->op_params[5]; - const int32_t d0 = dst->op_params[6]; - const int32_t d1 = dst->op_params[7]; - const int32_t d2 = dst->op_params[8]; - const int32_t c = dst->op_params[9]; - const int32_t n = dst->op_params[10]; - const int32_t oc = dst->op_params[11]; - - const int64_t src_w = src->ne[0]; - const int64_t src_h = src->ne[1]; - const int64_t src_d = src->ne[2]; - const int64_t knl_w = kernel->ne[0]; - const int64_t knl_h = kernel->ne[1]; - const int64_t knl_d = kernel->ne[2]; - const int64_t dst_w = dst->ne[0]; - const int64_t dst_h = dst->ne[1]; - const int64_t dst_d = dst->ne[2]; - - const float * src_data = (float *) src->data; - void * knl_data = kernel->data; - float * dst_data = (float *) dst->data; - - const int64_t knl_n_per_channel = knl_w * knl_h * knl_d; - const int64_t knl_n_total = knl_n_per_channel * c; - const int64_t patch_total = n * dst_w * dst_h * dst_d; - - const int64_t space_per_patch = knl_n_total * traits->type_size + oc * sizeof(float); - const int64_t batch_size = params->wsize / space_per_patch; - const int64_t patches_per_batch = batch_size > 8 ? (batch_size / 8) * 8 : batch_size; - const int64_t batch_n = (patch_total + patches_per_batch - 1) / patches_per_batch; - - GGML_ASSERT(patches_per_batch > 0 && batch_size >= 1); - - void * tmp = params->wdata; - - for (int64_t batch_i = 0; batch_i < batch_n; ++batch_i) { - const int64_t patch_start_batch = batch_i * patches_per_batch; - const int64_t patch_end_batch = std::min(patch_start_batch + patches_per_batch, patch_total); - const int64_t patch_n_in_batch = patch_end_batch - patch_start_batch; - - const int64_t patch_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; - const int64_t patch_start = patch_start_batch + params->ith * patch_per_thread; - const int64_t patch_end = std::min(patch_start + patch_per_thread, patch_end_batch); - - for (int64_t p = patch_start; p < patch_end; ++p) { - const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); - const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); - const int64_t batch_idx = p / (dst_w * dst_h * dst_d); - const int64_t dst_z = p_in_batch / (dst_w * dst_h); - const int64_t dst_y = p_in_depth / dst_w; - const int64_t dst_x = p_in_depth % dst_w; - - char * dst_row = (char *) tmp + (p % patches_per_batch) * knl_n_total * traits->type_size; - - for (int64_t ic = 0; ic < c; ++ic) { - for (int64_t kz = 0; kz < knl_d; ++kz) { - for (int64_t ky = 0; ky < knl_h; ++ky) { - for (int64_t kx = 0; kx < knl_w; ++kx) { - const int64_t sz = dst_z * s2 + kz * d2 - p2; - const int64_t sy = dst_y * s1 + ky * d1 - p1; - const int64_t sx = dst_x * s0 + kx * d0 - p0; - - int64_t dst_idx = ic * knl_n_per_channel + kz * (knl_h * knl_w) + ky * knl_w + kx; - - float src_val; - if (sz < 0 || sz >= src_d || sy < 0 || sy >= src_h || sx < 0 || sx >= src_w) { - src_val = 0.0f; - } else { - const int64_t cn_idx = batch_idx * c + ic; - const float * src_ptr = - (const float *) ((const char *) src_data + sx * src->nb[0] + sy * src->nb[1] + - sz * src->nb[2] + cn_idx * src->nb[3]); - src_val = *src_ptr; - } - - char * element_ptr = dst_row + dst_idx * traits->type_size; - if (kernel_type == GGML_TYPE_F32) { - *(float *) element_ptr = src_val; - } else if (kernel_type == GGML_TYPE_F16) { - *(ggml_fp16_t *) element_ptr = GGML_CPU_FP32_TO_FP16(src_val); - } - } - } - } - } - } - - ggml_barrier(params->threadpool); - - float * gemm_output = (float *) ((char *) tmp + patches_per_batch * knl_n_total * traits->type_size); - ggml_call_mul_mat(kernel_type, params, patch_n_in_batch, oc, knl_n_total, tmp, knl_data, gemm_output); - - ggml_barrier(params->threadpool); - - const int64_t permute_per_thread = (patch_n_in_batch + params->nth - 1) / params->nth; - const int64_t permute_start = params->ith * permute_per_thread; - const int64_t permute_end = std::min(permute_start + permute_per_thread, patch_n_in_batch); - - for (int64_t i = permute_start; i < permute_end; ++i) { - const int64_t p = patch_start_batch + i; - const int64_t p_in_batch = p % (dst_w * dst_h * dst_d); - const int64_t p_in_depth = p_in_batch % (dst_w * dst_h); - const int64_t batch_idx = p / (dst_w * dst_h * dst_d); - const int64_t dst_z = p_in_batch / (dst_w * dst_h); - const int64_t dst_y = p_in_depth / dst_w; - const int64_t dst_x = p_in_depth % dst_w; - - for (int64_t ioc = 0; ioc < oc; ++ioc) { - const float value = gemm_output[i * oc + ioc]; - const int64_t ocn_idx = batch_idx * oc + ioc; - float * dst_ptr = (float *) ((char *) dst_data + dst_x * dst->nb[0] + dst_y * dst->nb[1] + - dst_z * dst->nb[2] + ocn_idx * dst->nb[3]); - *dst_ptr = value; - } - } - } -} - -void ggml_compute_forward_conv_3d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - ggml_compute_forward_conv_3d_impl(params, src0, src1, dst, src0->type); -} - -// ggml_compute_forward_conv_transpose_2d - -void ggml_compute_forward_conv_transpose_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F16); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_TENSOR_BINARY_OP_LOCALS - - const int ith = params->ith; - const int nth = params->nth; - - const int nk = ne00 * ne01 * ne02 * ne03; - - GGML_ASSERT(nb00 == sizeof(ggml_fp16_t)); - GGML_ASSERT(nb10 == sizeof(float)); - - if (ith == 0) { - memset(params->wdata, 0, params->wsize); - - // permute kernel data (src0) from (Kw x Kh x Cout x Cin) to (Cin x Kw x Kh x Cout) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - - for (int64_t i03 = 0; i03 < ne03; i03++) { - for (int64_t i02 = 0; i02 < ne02; i02++) { - const ggml_fp16_t * const src = (ggml_fp16_t *) ((char *) src0->data + i03 * nb03 + i02 * nb02); - ggml_fp16_t * dst_data = wdata + i02 * ne01 * ne00 * ne03; - for (int64_t i01 = 0; i01 < ne01; i01++) { - for (int64_t i00 = 0; i00 < ne00; i00++) { - dst_data[i01 * ne00 * ne03 + i00 * ne03 + i03] = src[i01 * ne00 + i00]; - } - } - } - } - } - - // permute source data (src1) from (Sw x Sh x Cin) to (Cin x Sw x Sh) - { - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk; - for (int i12 = 0; i12 < ne12; i12++) { - for (int i11 = 0; i11 < ne11; i11++) { - const float * const src = (float *) ((char *) src1->data + i12 * nb12 + i11 * nb11); - ggml_fp16_t * dst_data = wdata + i11 * ne10 * ne12; - for (int i10 = 0; i10 < ne10; i10++) { - dst_data[i10 * ne12 + i12] = GGML_CPU_FP32_TO_FP16(src[i10]); - } - } - } - } - - memset(dst->data, 0, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - - const int32_t stride = ggml_get_op_params_i32(dst, 0); - - // total patches in dst - const int np = ne2; - - // patches per thread - const int dp = (np + nth - 1) / nth; - - // patch range for this thread - const int ip0 = dp * ith; - const int ip1 = MIN(ip0 + dp, np); - - ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0; - ggml_fp16_t * const wdata_src = wdata + nk; - - for (int i2 = ip0; i2 < ip1; i2++) { // Cout - float * dst_data = (float *) ((char *) dst->data + i2 * nb2); - ggml_fp16_t * wdata_kernel = wdata + i2 * ne01 * ne00 * ne03; - for (int i11 = 0; i11 < ne11; i11++) { - for (int i10 = 0; i10 < ne10; i10++) { - const int i1n = i11 * ne10 * ne12 + i10 * ne12; - for (int i01 = 0; i01 < ne01; i01++) { - for (int i00 = 0; i00 < ne00; i00++) { - float v = 0; - ggml_vec_dot_f16(ne03, &v, 0, wdata_src + i1n, 0, wdata_kernel + i01 * ne00 * ne03 + i00 * ne03, - 0, 1); - dst_data[(i11 * stride + i01) * ne0 + i10 * stride + i00] += v; - } - } - } - } - } -} - -// ggml_compute_forward_conv_2d_dw - -struct ggml_conv_2d_dw_params { - int64_t channels; - int64_t batch; - int64_t src_w; - int64_t src_h; - int64_t dst_w; - int64_t dst_h; - int64_t knl_w; - int64_t knl_h; - int stride_x; - int stride_y; - int pad_x; - int pad_y; - int dilation_x; - int dilation_y; -}; - -static void ggml_compute_forward_conv_2d_dw_cwhn(const ggml_compute_params * params, - const ggml_tensor * src, - const ggml_tensor * kernel, - ggml_tensor * dst, - const ggml_conv_2d_dw_params & p) { - const int64_t c = p.channels; - const float * knl_data = (const float *) kernel->data; - - const int64_t rows_total = p.dst_h * p.batch; - const int64_t rows_per_thread = (rows_total + params->nth - 1) / params->nth; - const int64_t row_start = params->ith * rows_per_thread; - const int64_t row_end = MIN(row_start + rows_per_thread, rows_total); - -#ifdef GGML_SIMD -# if defined(__ARM_FEATURE_SVE) - const int64_t pkg_size = svcntw(); -# else - const int64_t pkg_size = GGML_F32_EPR; -# endif - const int64_t pkg_count = c / pkg_size; - const int64_t c_pkg_end = pkg_count * pkg_size; -#else - const int64_t c_pkg_end = 0; -#endif - - for (int64_t row = row_start; row < row_end; ++row) { - const int64_t dst_y = row % p.dst_h; - const float * src_data = (const float *) src->data + (row / p.dst_h) * p.src_w * p.src_h * c; - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float * dst_data = (float *) dst->data + (row * p.dst_w + dst_x) * c; - const int64_t src_y_base = dst_y * p.stride_y - p.pad_y; - const int64_t src_x_base = dst_x * p.stride_x - p.pad_x; - -#ifdef GGML_SIMD - // Vectorized loop - for (int64_t c_i = 0; c_i < c_pkg_end; c_i += pkg_size) { - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - GGML_F32_VEC k = GGML_F32_VEC_LOAD(knl_data + (knl_y * p.knl_w + knl_x) * c + c_i); - GGML_F32_VEC s = GGML_F32_VEC_LOAD(src_data + (src_y * p.src_w + src_x) * c + c_i); - sum = GGML_F32_VEC_FMA(sum, k, s); - } - } - GGML_F32_VEC_STORE(dst_data + c_i, sum); - } -#endif - // Scalar loop - for (int64_t c_i = c_pkg_end; c_i < c; ++c_i) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = src_y_base + knl_y * p.dilation_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = src_x_base + knl_x * p.dilation_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - sum += knl_data[(knl_y * p.knl_w + knl_x) * c + c_i] * - src_data[(src_y * p.src_w + src_x) * c + c_i]; - } - } - dst_data[c_i] = sum; - } - } - } -} - -static void ggml_compute_forward_conv_2d_dw_whcn(const ggml_compute_params * params, - const ggml_tensor * src, - const ggml_tensor * kernel, - ggml_tensor * dst, - const ggml_conv_2d_dw_params & p) { - const int64_t n = p.channels * p.batch; - const int64_t per_thread = (n + params->nth - 1) / params->nth; - const int64_t start = params->ith * per_thread; - const int64_t end = MIN(start + per_thread, n); - - for (int64_t i = start; i < end; ++i) { - const float * knl_data = (const float *) kernel->data + (i % p.channels) * p.knl_w * p.knl_h; - const float * src_data = (const float *) src->data + i * p.src_w * p.src_h; - float * dst_data = (float *) dst->data + i * p.dst_w * p.dst_h; - - for (int64_t dst_y = 0; dst_y < p.dst_h; ++dst_y) { - for (int64_t dst_x = 0; dst_x < p.dst_w; ++dst_x) { - float sum = 0.0f; - for (int64_t knl_y = 0; knl_y < p.knl_h; ++knl_y) { - const int64_t src_y = dst_y * p.stride_y + knl_y * p.dilation_y - p.pad_y; - if (src_y < 0 || src_y >= p.src_h) { - continue; - } - for (int64_t knl_x = 0; knl_x < p.knl_w; ++knl_x) { - const int64_t src_x = dst_x * p.stride_x + knl_x * p.dilation_x - p.pad_x; - if (src_x < 0 || src_x >= p.src_w) { - continue; - } - sum += knl_data[knl_y * p.knl_w + knl_x] * src_data[src_y * p.src_w + src_x]; - } - } - dst_data[dst_y * p.dst_w + dst_x] = sum; - } - } - } -} - -void ggml_compute_forward_conv_2d_dw(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * kernel = dst->src[0]; - const ggml_tensor * src = dst->src[1]; - ggml_conv_2d_dw_params p; - p.channels = src->ne[2]; - p.batch = src->ne[3]; - p.src_w = src->ne[0]; - p.src_h = src->ne[1]; - p.dst_w = dst->ne[0]; - p.dst_h = dst->ne[1]; - p.knl_w = kernel->ne[0]; - p.knl_h = kernel->ne[1]; - p.stride_x = dst->op_params[0]; - p.stride_y = dst->op_params[1]; - p.pad_x = dst->op_params[2]; - p.pad_y = dst->op_params[3]; - p.dilation_x = dst->op_params[4]; - p.dilation_y = dst->op_params[5]; - - GGML_ASSERT(kernel->ne[3] == p.channels); - GGML_ASSERT(dst->ne[3] == p.batch); - - if (ggml_is_contiguous(src)) { - ggml_compute_forward_conv_2d_dw_whcn(params, src, kernel, dst, p); - } else if (ggml_is_contiguous_channels(src)) { - // kernel should also have channels most contiguous in memory - GGML_ASSERT(kernel->nb[0] >= kernel->nb[2] && kernel->nb[1] >= kernel->nb[0]); - ggml_compute_forward_conv_2d_dw_cwhn(params, src, kernel, dst, p); - } else { - GGML_ABORT("non-contiguous memory layout not supported"); - } -} - -// ggml_compute_forward_pool_1d_sk_p0 - -static void ggml_compute_forward_pool_1d_sk_p0(const ggml_compute_params * params, - const ggml_op_pool op, - const int k, - ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - - assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const char * cdata = (const char *) src->data; - const char * const data_end = cdata + ggml_nbytes(src); - float * drow = (float *) dst->data; - - const int64_t rs = dst->ne[0]; - - while (cdata < data_end) { - const void * srow = (const void *) cdata; - int j = 0; - for (int64_t i = 0; i < rs; ++i) { - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] = 0; - break; - case GGML_OP_POOL_MAX: - drow[i] = -FLT_MAX; - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - for (int ki = 0; ki < k; ++ki) { - const float srow_j = (src->type == GGML_TYPE_F32) ? - ((const float *) srow)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] += srow_j; - break; - case GGML_OP_POOL_MAX: - if (srow_j > drow[i]) { - drow[i] = srow_j; - } - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - ++j; - } - switch (op) { - case GGML_OP_POOL_AVG: - drow[i] /= k; - break; - case GGML_OP_POOL_MAX: - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - - cdata += src->nb[1]; - drow += rs; - } -} - -// ggml_compute_forward_pool_1d - -void ggml_compute_forward_pool_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int s0 = opts[2]; - const int p0 = opts[3]; - GGML_ASSERT(p0 == 0); // padding not supported - GGML_ASSERT(k0 == s0); // only s = k supported - - ggml_compute_forward_pool_1d_sk_p0(params, op, k0, dst); -} - -// ggml_compute_forward_pool_2d - -void ggml_compute_forward_pool_2d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - - assert(src->type == GGML_TYPE_F32 || src->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; - const char * cdata = (const char *) src->data; - const char * const data_end = cdata + ggml_nbytes(src); - - const int64_t px = dst->ne[0]; - const int64_t py = dst->ne[1]; - const int64_t pa = px * py; - - float * dplane = (float *) dst->data; - - const int ka = k0 * k1; - const int offset0 = -p0; - const int offset1 = -p1; - - while (cdata < data_end) { - for (int oy = 0; oy < py; ++oy) { - float * const drow = dplane + oy * px; - for (int ox = 0; ox < px; ++ox) { - float * const out = drow + ox; - switch (op) { - case GGML_OP_POOL_AVG: - *out = 0; - break; - case GGML_OP_POOL_MAX: - *out = -FLT_MAX; - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - - const int ix = offset0 + ox * s0; - const int iy = offset1 + oy * s1; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= src->ne[1]) { - continue; - } - const void * srow = (const void *) (cdata + src->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= src->ne[0]) { - continue; - } - const float srow_j = (src->type == GGML_TYPE_F32) ? - ((const float *) srow)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) srow)[j]); - switch (op) { - case GGML_OP_POOL_AVG: - *out += srow_j; - break; - case GGML_OP_POOL_MAX: - if (srow_j > *out) { - *out = srow_j; - } - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - } - switch (op) { - case GGML_OP_POOL_AVG: - *out /= ka; - break; - case GGML_OP_POOL_MAX: - break; - case GGML_OP_POOL_COUNT: - GGML_ABORT("fatal error"); - } - } - } - - cdata += src->nb[2]; - dplane += pa; - } -} - -// ggml_compute_forward_pool_2d_back - -void ggml_compute_forward_pool_2d_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src = dst->src[0]; - const ggml_tensor * dstf = dst->src[1]; // forward tensor of dst - - assert(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16); - - if (params->ith != 0) { - return; - } - - const int32_t * opts = (const int32_t *) dst->op_params; - ggml_op_pool op = static_cast(opts[0]); - const int k0 = opts[1]; - const int k1 = opts[2]; - const int s0 = opts[3]; - const int s1 = opts[4]; - const int p0 = opts[5]; - const int p1 = opts[6]; - - char * cdata = (char *) dst->data; - const char * cdataf = (const char *) dstf->data; - const char * const data_end = cdata + ggml_nbytes(dst); - - GGML_ASSERT(params->ith == 0); - memset(cdata, 0, ggml_nbytes(dst)); - - const int64_t px = src->ne[0]; - const int64_t py = src->ne[1]; - const int64_t pa = px * py; - - const float * splane = (const float *) src->data; - - const int ka = k0 * k1; - const int offset0 = -p0; - const int offset1 = -p1; - - while (cdata < data_end) { - for (int oy = 0; oy < py; ++oy) { - const float * const srow = splane + oy * px; - for (int ox = 0; ox < px; ++ox) { - const float grad0 = srow[ox]; - - const int ix = offset0 + ox * s0; - const int iy = offset1 + oy * s1; - - if (op == GGML_OP_POOL_MAX) { - float maxval = -FLT_MAX; - int kxmax = -1; - int kymax = -1; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= dst->ne[1]) { - continue; - } - const void * drowf = (const void *) (cdataf + dst->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= dst->ne[0]) { - continue; - } - - const float val = dst->type == GGML_TYPE_F32 ? - ((const float *) drowf)[j] : - GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drowf)[j]); - if (val <= maxval) { - continue; - } - - maxval = val; - kxmax = kx; - kymax = ky; - } - } - - if (kxmax == -1 || kymax == -1) { - continue; - } - - void * drow = (void *) (cdata + dst->nb[1] * (iy + kymax)); - const int j = ix + kxmax; - if (dst->type == GGML_TYPE_F32) { - ((float *) drow)[j] += grad0; - } else { - ((ggml_fp16_t *) drow)[j] = - GGML_CPU_FP32_TO_FP16(grad0 + GGML_CPU_FP16_TO_FP32(((const ggml_fp16_t *) drow)[j])); - } - } else if (op == GGML_OP_POOL_AVG) { - const float grad = grad0 / ka; - - for (int ky = 0; ky < k1; ++ky) { - if (iy + ky < 0 || iy + ky >= dst->ne[1]) { - continue; - } - void * drow = (void *) (cdata + dst->nb[1] * (iy + ky)); - for (int kx = 0; kx < k0; ++kx) { - int j = ix + kx; - if (j < 0 || j >= dst->ne[0]) { - continue; - } - - if (dst->type == GGML_TYPE_F32) { - ((float *) drow)[j] += grad; - } else { - ((ggml_fp16_t *) drow)[j] += GGML_CPU_FP32_TO_FP16(grad); - } - } - } - } else { - GGML_ASSERT(false); - } - } - } - - cdata += dst->nb[2]; - cdataf += dst->nb[2]; - splane += pa; - } -} - -// ggml_compute_forward_upscale - -static void ggml_compute_forward_upscale_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float sf0 = (float) ne0 / src0->ne[0]; - float sf1 = (float) ne1 / src0->ne[1]; - float sf2 = (float) ne2 / src0->ne[2]; - float sf3 = (float) ne3 / src0->ne[3]; - float pixel_offset = 0.5f; - - const int32_t mode_flags = ggml_get_op_params_i32(dst, 0); - const ggml_scale_mode mode = (ggml_scale_mode) (mode_flags & 0xFF); - - if (mode_flags & GGML_SCALE_FLAG_ALIGN_CORNERS) { - pixel_offset = 0.0f; - sf0 = ne0 > 1 && ne00 > 1 ? (float) (ne0 - 1) / (ne00 - 1) : sf0; - sf1 = ne1 > 1 && ne01 > 1 ? (float) (ne1 - 1) / (ne01 - 1) : sf1; - } - - if (mode == GGML_SCALE_MODE_NEAREST) { - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const int64_t i01 = i1 / sf1; - for (int64_t i0 = 0; i0 < ne0; i0++) { - const int64_t i00 = i0 / sf0; - - const float * x = - (float *) ((char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03); - float * y = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - - *y = *x; - } - } - } - } - } else if (mode == GGML_SCALE_MODE_BILINEAR) { - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; - int64_t y0 = (int64_t) floorf(y); - int64_t y1 = y0 + 1; - - y0 = std::max(int64_t(0), std::min(y0, ne01 - 1)); - y1 = std::max(int64_t(0), std::min(y1, ne01 - 1)); - - float dy = y - (float) y0; - dy = std::max(0.0f, std::min(dy, 1.0f)); - - for (int64_t i0 = 0; i0 < ne0; i0++) { - const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; - int64_t x0 = (int64_t) floorf(x); - int64_t x1 = x0 + 1; - - x0 = std::max(int64_t(0), std::min(x0, ne00 - 1)); - x1 = std::max(int64_t(0), std::min(x1, ne00 - 1)); - - float dx = x - (float) x0; - dx = std::max(0.0f, std::min(dx, 1.0f)); - - // fetch the four surrounding pixel values and interpolate - const float a = *(const float *) ((const char *) src0->data + x0 * nb00 + y0 * nb01 + - i02 * nb02 + i03 * nb03); - const float b = *(const float *) ((const char *) src0->data + x1 * nb00 + y0 * nb01 + - i02 * nb02 + i03 * nb03); - const float c = *(const float *) ((const char *) src0->data + x0 * nb00 + y1 * nb01 + - i02 * nb02 + i03 * nb03); - const float d = *(const float *) ((const char *) src0->data + x1 * nb00 + y1 * nb01 + - i02 * nb02 + i03 * nb03); - - const float val = a * (1 - dx) * (1 - dy) + b * dx * (1 - dy) + c * (1 - dx) * dy + d * dx * dy; - - float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - *y_dst = val; - } - } - } - } - } else if (mode == GGML_SCALE_MODE_BICUBIC) { - // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm - const float a = -0.75f; // use alpha = -0.75 (same as PyTorch) - auto weight1 = [a](float x) { - return ((a + 2) * x - (a + 3)) * x * x + 1; - }; - auto weight2 = [a](float x) { - return ((a * x - 5 * a) * x + 8 * a) * x - 4 * a; - }; - auto bicubic = [=](float p0, float p1, float p2, float p3, float x) { - const float w0 = weight2(x + 1); - const float w1 = weight1(x + 0); - const float w2 = weight1(1 - x); - const float w3 = weight2(2 - x); - return p0 * w0 + p1 * w1 + p2 * w2 + p3 * w3; - }; - - for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3 / sf3; - for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2 / sf2; - for (int64_t i1 = 0; i1 < ne1; i1++) { - const float y = ((float) i1 + pixel_offset) / sf1 - pixel_offset; - const int64_t y0 = (int64_t) floorf(y); - const float dy = y - (float) y0; - - for (int64_t i0 = 0; i0 < ne0; i0++) { - const float x = ((float) i0 + pixel_offset) / sf0 - pixel_offset; - const int64_t x0 = (int64_t) floorf(x); - const float dx = x - (float) x0; - - auto p = [=](int64_t x_off, int64_t y_off) -> float { - int64_t i00 = std::max(int64_t(0), std::min(x0 + x_off, ne00 - 1)); - int64_t i01 = std::max(int64_t(0), std::min(y0 + y_off, ne01 - 1)); - return *(const float *) ((const char *) src0->data + i00 * nb00 + i01 * nb01 + i02 * nb02 + - i03 * nb03); - }; - - const float val = bicubic(bicubic(p(-1, -1), p(0, -1), p(1, -1), p(2, -1), dx), - bicubic(p(-1, 0), p(0, 0), p(1, 0), p(2, 0), dx), - bicubic(p(-1, 1), p(0, 1), p(1, 1), p(2, 1), dx), - bicubic(p(-1, 2), p(0, 2), p(1, 2), p(2, 2), dx), dy); - - float * y_dst = (float *) ((char *) dst->data + i0 * nb0 + i1 * nb1 + i2 * nb2 + i3 * nb3); - *y_dst = val; - } - } - } - } - } else { - GGML_ABORT("unsupported upscale mode"); - } -} - -void ggml_compute_forward_upscale(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_upscale_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_pad - -static void ggml_compute_forward_pad_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - float * dst_ptr = (float *) dst->data; - const int32_t lp0 = ggml_get_op_params_i32(dst, 0); - const int32_t rp0 = ggml_get_op_params_i32(dst, 1); - const int32_t lp1 = ggml_get_op_params_i32(dst, 2); - const int32_t rp1 = ggml_get_op_params_i32(dst, 3); - const int32_t lp2 = ggml_get_op_params_i32(dst, 4); - const int32_t rp2 = ggml_get_op_params_i32(dst, 5); - const int32_t lp3 = ggml_get_op_params_i32(dst, 6); - const int32_t rp3 = ggml_get_op_params_i32(dst, 7); - const int32_t circular = ggml_get_op_params_i32(dst, 8); - - // TODO: optimize - - if (circular == 0) { - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - for (int64_t i3 = 0; i3 < ne3; ++i3) { - const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; - if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && - (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t src_idx = - (i3 - lp3) * nb03 + (i2 - lp2) * nb02 + (i1 - lp1) * nb01 + (i0 - lp0) * nb00; - const float * src_ptr = (const float *) ((char *) src0->data + src_idx); - dst_ptr[dst_idx] = *src_ptr; - } else { - dst_ptr[dst_idx] = 0; - } - } - } - } - } - } else { - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - for (int64_t i3 = 0; i3 < ne3; ++i3) { - const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; - const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); - const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); - const int64_t src_i2 = ggml_wrap_around(i2 - lp2, ne02); - const int64_t src_i3 = ggml_wrap_around(i3 - lp3, ne03); - - const int64_t src_idx = src_i3 * nb03 + src_i2 * nb02 + src_i1 * nb01 + src_i0 * nb00; - - const float * src_ptr = (const float *) ((char *) src0->data + src_idx); - dst_ptr[dst_idx] = *src_ptr; - } - } - } - } - } -} - -void ggml_compute_forward_pad(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_pad_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_pad_reflect_1d - -void ggml_compute_forward_pad_reflect_1d(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - const int ith = params->ith; - const int nth = params->nth; - - const int32_t * opts = (const int32_t *) dst->op_params; - const int p0 = opts[0]; - const int p1 = opts[1]; - - GGML_TENSOR_UNARY_OP_LOCALS - - for (int64_t i3 = 0; i3 < ne3; i3++) { - for (int64_t i2 = 0; i2 < ne2; i2++) { - for (int64_t i1 = ith; i1 < ne1; i1 += nth) { - float * left = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + p0 * nb0); - float * right = (float *) ((char *) dst->data + i3 * nb3 + i2 * nb2 + i1 * nb1 + (ne0 - p1 - 1) * nb0); - - ggml_vec_cpy_f32(ne00, left, (float *) ((char *) src0->data + i3 * nb03 + i2 * nb02 + i1 * nb01)); - - for (int i0 = 1; i0 <= p0; i0++) { - left[-i0] = left[i0]; - } - for (int i0 = 1; i0 <= p1; i0++) { - right[i0] = right[-i0]; - } - } - } - } -} - -// ggml_compute_forward_roll - -static int64_t ggml_wrap_index(int64_t i, int64_t ne) { - if (i < 0) { - return i + ne; - } else if (i >= ne) { - return i - ne; - } - return i; -} - -static void ggml_compute_forward_roll_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src_data = (const float *) src0->data; - float * dst_data = (float *) dst->data; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int s0 = ggml_get_op_params_i32(dst, 0); - const int s1 = ggml_get_op_params_i32(dst, 1); - const int s2 = ggml_get_op_params_i32(dst, 2); - const int s3 = ggml_get_op_params_i32(dst, 3); - - const int64_t total = ne1 * ne2 * ne3; - const int64_t per_thread = (total + params->nth) / params->nth; - const int64_t start = params->ith * per_thread; - const int64_t end = std::min(start + per_thread, total); - - for (int64_t i = start; i < end; ++i) { - const int64_t i1 = i % ne1; - const int64_t i2 = (i / ne1) % ne2; - const int64_t i3 = i / (ne2 * ne1); - float * dst_row = dst_data + (i3 * nb3 + i2 * nb2 + i1 * nb1) / sizeof(float); - - const int64_t i01 = ggml_wrap_index(i1 - s1, ne01); - const int64_t i02 = ggml_wrap_index(i2 - s2, ne02); - const int64_t i03 = ggml_wrap_index(i3 - s3, ne03); - const float * src_row = src_data + (i03 * nb03 + i02 * nb02 + i01 * nb01) / sizeof(float); - - const int64_t s = ggml_wrap_index(-s0, ne00); - const int64_t n = ne00 - s; - ggml_vec_cpy_f32(n, dst_row, src_row + s); - ggml_vec_cpy_f32(s, dst_row + n, src_row); - } -} - -void ggml_compute_forward_roll(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_roll_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_arange - -static void ggml_compute_forward_arange_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_ASSERT(dst->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const float start = ggml_get_op_params_f32(dst, 0); - const float stop = ggml_get_op_params_f32(dst, 1); - const float step = ggml_get_op_params_f32(dst, 2); - - const int64_t steps = (int64_t) ceilf((stop - start) / step); - - GGML_ASSERT(ggml_nelements(dst) == steps); - - for (int64_t i = ith; i < steps; i += nth) { - float value = start + step * i; - ((float *) dst->data)[i] = value; - } -} - -void ggml_compute_forward_arange(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_arange_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_timestep_embedding_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_ASSERT(src0->nb[0] == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - GGML_TENSOR_UNARY_OP_LOCALS - - const int dim = ggml_get_op_params_i32(dst, 0); - const int max_period = ggml_get_op_params_i32(dst, 1); - - int half = dim / 2; - - for (int64_t i = 0; i < ne00; i++) { - float * embed_data = (float *) ((char *) dst->data + i * nb1); - for (int64_t j = ith; j < half; j += nth) { - float timestep = ((float *) src0->data)[i]; - float freq = (float) expf(-logf(max_period) * j / half); - float arg = timestep * freq; - embed_data[j] = cosf(arg); - embed_data[j + half] = sinf(arg); - } - if (dim % 2 != 0 && ith == 0) { - embed_data[2 * half] = 0.f; - } - } -} - -void ggml_compute_forward_timestep_embedding(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_timestep_embedding_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_argsort - -template struct cmp_argsort { - const float * data; - - bool operator()(int32_t a, int32_t b) const { - if constexpr (order == GGML_SORT_ORDER_ASC) { - return data[a] < data[b]; - } else { - return data[a] > data[b]; - } - } -}; - -static void ggml_compute_forward_argsort_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nr = ggml_nrows(src0); - - ggml_sort_order order = (ggml_sort_order) ggml_get_op_params_i32(dst, 0); - - for (int64_t i = ith; i < nr; i += nth) { - const float * src_data = (float *) ((char *) src0->data + i * nb01); - - int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); - - for (int64_t j = 0; j < ne0; j++) { - dst_data[j] = j; - } - - switch (order) { - case GGML_SORT_ORDER_ASC: - std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); - break; - - case GGML_SORT_ORDER_DESC: - std::sort(dst_data, dst_data + ne0, cmp_argsort{ src_data }); - break; - - default: - GGML_ABORT("invalid sort order"); - } - } -} - -void ggml_compute_forward_argsort(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_argsort_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_top_k - -struct cmp_top_k { - const float * data; - - bool operator()(int32_t a, int32_t b) const { return data[a] > data[b]; } -}; - -static void ggml_compute_forward_top_k_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_UNARY_OP_LOCALS - - GGML_ASSERT(nb0 == sizeof(float)); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nr = ggml_nrows(src0); - - const int top_k = ne0; - - int32_t * tmp = (int32_t *) params->wdata + (ne00 + CACHE_LINE_SIZE_F32) * ith; - - for (int64_t i = ith; i < nr; i += nth) { - const float * src_data = (float *) ((char *) src0->data + i * nb01); - - for (int64_t j = 0; j < ne00; j++) { - tmp[j] = j; - } - - std::partial_sort(tmp, tmp + top_k, tmp + ne00, cmp_top_k{ src_data }); - - int32_t * dst_data = (int32_t *) ((char *) dst->data + i * nb1); - - std::copy(tmp, tmp + top_k, dst_data); - - // emphasize that the order is not important - if (top_k > 1) { - std::swap(dst_data[0], dst_data[1]); - } - } -} - -void ggml_compute_forward_top_k(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_top_k_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_flash_attn_ext - -static void ggml_compute_forward_flash_attn_ext_f16_one_chunk(const ggml_compute_params * params, - ggml_tensor * dst, - int ir0, - int ir1) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - const ggml_tensor * mask = dst->src[3]; - const ggml_tensor * sinks = dst->src[4]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int64_t DK = nek0; - const int64_t DV = nev0; - const int64_t N = neq1; - - GGML_ASSERT(ne0 == DV); - GGML_ASSERT(ne2 == N); - - // input tensor rows must be contiguous - GGML_ASSERT(nbq0 == ggml_type_size(q->type)); - GGML_ASSERT(nbk0 == ggml_type_size(k->type)); - GGML_ASSERT(nbv0 == ggml_type_size(v->type)); - - GGML_ASSERT(neq0 == DK); - GGML_ASSERT(nek0 == DK); - GGML_ASSERT(nev0 == DV); - - GGML_ASSERT(neq1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - // broadcast factors - const int64_t rk2 = neq2 / nek2; - const int64_t rk3 = neq3 / nek3; - - const int64_t rv2 = neq2 / nev2; - const int64_t rv3 = neq3 / nev3; - - // parallelize by q rows using ggml_vec_dot_f32 - - float scale = 1.0f; - float max_bias = 0.0f; - float logit_softcap = 0.0f; - - memcpy(&scale, (float *) dst->op_params + 0, sizeof(float)); - memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float)); - memcpy(&logit_softcap, (float *) dst->op_params + 2, sizeof(float)); - - if (logit_softcap != 0) { - scale /= logit_softcap; - } - - const uint32_t n_head = neq2; - const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head)); - - const float m0 = powf(2.0f, -(max_bias) / n_head_log2); - const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); - - const ggml_type k_vec_dot_type = ggml_get_type_traits_cpu(k->type)->vec_dot_type; - const ggml_from_float_t q_to_vec_dot = ggml_get_type_traits_cpu(k_vec_dot_type)->from_float; - const ggml_vec_dot_t kq_vec_dot = ggml_get_type_traits_cpu(k->type)->vec_dot; - const ggml_to_float_t v_to_float = ggml_get_type_traits(v->type)->to_float; - - GGML_ASSERT((q_to_vec_dot) && "fattn: unsupported K-type"); - GGML_ASSERT((v->type == GGML_TYPE_F32 || v_to_float) && "fattn: unsupported V-type"); - - int ith = params->ith; - - // loop over n_batch and n_head - for (int ir = ir0; ir < ir1; ++ir) { - // q indices - const int iq3 = ir / (neq2 * neq1); - const int iq2 = (ir - iq3 * neq2 * neq1) / neq1; - const int iq1 = (ir - iq3 * neq2 * neq1 - iq2 * neq1); - - const uint32_t h = iq2; // head index - const float slope = - (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2 * (h - n_head_log2) + 1) : 1.0f; - - float S = 0.0f; // sum - float M = -INFINITY; // maximum KQ value - - float * VKQ32 = - (float *) params->wdata + ith * (1 * DK + 2 * DV + CACHE_LINE_SIZE_F32); // FP32 VKQ accumulator - float * V32 = (VKQ32 + 1 * DV); // (temporary) FP32 V buffer - ggml_fp16_t * VKQ16 = (ggml_fp16_t *) (VKQ32 + 1 * DV); // (temporary) FP16 VKQ accumulator - ggml_fp16_t * Q_q = (ggml_fp16_t *) (VKQ32 + 2 * DV); // (temporary) buffer for Q converted to quantized/FP16 - - if (v->type == GGML_TYPE_F16) { - memset(VKQ16, 0, DV * sizeof(ggml_fp16_t)); - } else { - memset(VKQ32, 0, DV * sizeof(float)); - } - - const ggml_fp16_t * mp = - mask ? (ggml_fp16_t *) ((char *) mask->data + iq1 * mask->nb[1] + (iq2 % mask->ne[2]) * mask->nb[2] + - (iq3 % mask->ne[3]) * mask->nb[3]) : - NULL; - - // k indices - const int ik3 = iq3 / rk3; - const int ik2 = iq2 / rk2; - - // v indices - const int iv3 = iq3 / rv3; - const int iv2 = iq2 / rv2; - - const float * pq = (const float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)); - q_to_vec_dot(pq, Q_q, DK); - - // online softmax / attention - // loop over n_kv and n_head_kv - // ref: https://arxiv.org/pdf/2112.05682.pdf - for (int64_t ic = 0; ic < nek1; ++ic) { - const float mv = mp ? slope * GGML_CPU_FP16_TO_FP32(mp[ic]) : 0.0f; - if (mv == -INFINITY) { - continue; - } - - float s; // KQ value - - const char * k_data = (const char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3); - kq_vec_dot(DK, &s, 0, k_data, 0, Q_q, 0, 1); - - s = s * scale; // scale KQ value - - if (logit_softcap != 0.0f) { - s = logit_softcap * tanhf(s); - } - - s += mv; // apply mask - - const float Mold = M; - - float ms = 1.0f; // upon new higher max val, scale VKQ and KQ sum with this value - float vs = 1.0f; // post-softmax KQ value, expf(s - M) - - const char * v_data = ((const char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)); - - if (v->type == GGML_TYPE_F16) { - if (s > M) { - // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f - M = s; - ms = expf(Mold - M); - - // V = V*expf(Mold - M) - ggml_vec_scale_f16(DV, VKQ16, ms); - } else { - // no new maximum, ms == 1.0f, vs != 1.0f - vs = expf(s - M); - } - - // V += v*expf(s - M) - ggml_vec_mad_f16(DV, VKQ16, (const ggml_fp16_t *) v_data, vs); - } else { - if (s > M) { - // s is new maximum, ms < 1.0f, vs == expf(s - s) == 1.0f - M = s; - ms = expf(Mold - M); - - // V = V*expf(Mold - M) - ggml_vec_scale_f32(DV, VKQ32, ms); - } else { - // no new maximum, ms == 1.0f, vs != 1.0f - vs = expf(s - M); - } - - // V += v*expf(s - M) - if (v_to_float) { - v_to_float(v_data, V32, DV); - ggml_vec_mad_f32(DV, VKQ32, V32, vs); - } else { - // V is F32 - ggml_vec_mad_f32(DV, VKQ32, (const float *) v_data, vs); - } - } - - S = S * ms + vs; // scale and increment sum with partial sum - } - - if (v->type == GGML_TYPE_F16) { - for (int64_t d = 0; d < DV; ++d) { - VKQ32[d] = GGML_CPU_FP16_TO_FP32(VKQ16[d]); - } - } - - // sinks - if (sinks) { - const float s = ((float *) ((char *) sinks->data))[h]; - - float ms = 1.0f; - float vs = 1.0f; - - if (s > M) { - ms = expf(M - s); - ggml_vec_scale_f32(DV, VKQ32, ms); - } else { - vs = expf(s - M); - } - - S = S * ms + vs; - } - - // V /= S - const float S_inv = S == 0.0f ? 0.0f : 1.0f / S; - ggml_vec_scale_f32(DV, VKQ32, S_inv); - - // dst indices - const int i1 = iq1; - const int i2 = iq2; - const int i3 = iq3; - - // original - //memcpy((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3), V, nev0*sizeof(float)); - - // permute(0, 2, 1, 3) - memcpy((char *) dst->data + (i3 * ne2 * ne1 + i2 + i1 * ne1) * nb1, VKQ32, nb1); - } -} - -static void ggml_compute_forward_flash_attn_ext_f16(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int64_t DK = nek0; - const int64_t DV = nev0; - const int64_t N = neq1; - - GGML_ASSERT(ne0 == DV); - GGML_ASSERT(ne2 == N); - - // input tensor rows must be contiguous - GGML_ASSERT(nbq0 == ggml_type_size(q->type)); - GGML_ASSERT(nbk0 == ggml_type_size(k->type)); - GGML_ASSERT(nbv0 == ggml_type_size(v->type)); - - GGML_ASSERT(neq0 == DK); - GGML_ASSERT(nek0 == DK); - GGML_ASSERT(nev0 == DV); - - GGML_ASSERT(neq1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - // parallelize by q rows using ggml_vec_dot_f32 - - // total rows in q - const int64_t nr = neq1 * neq2 * neq3; - - // rows per thread - const int ith = params->ith; - const int nth = params->nth; - - // disable for NUMA - const bool disable_chunking = ggml_is_numa(); - - // 4x chunks per thread - int nth_scaled = nth * 4; - int64_t chunk_size = (nr + nth_scaled - 1) / nth_scaled; - int64_t nchunk = (nr + chunk_size - 1) / chunk_size; - - if (nth == 1 || nchunk < nth || disable_chunking) { - nchunk = nth; - } - - if (ith == 0) { - // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. - ggml_threadpool_chunk_set(params->threadpool, nth); - } - - ggml_barrier(params->threadpool); - - // The number of elements in each chunk - const int64_t dr = (nr + nchunk - 1) / nchunk; - - // The first chunk comes from our thread_id, the rest will get auto-assigned. - int current_chunk = ith; - - while (current_chunk < nchunk) { - const int64_t ir0 = dr * current_chunk; - const int64_t ir1 = MIN(ir0 + dr, nr); - - ggml_compute_forward_flash_attn_ext_f16_one_chunk(params, dst, ir0, ir1); - - current_chunk = ggml_threadpool_chunk_add(params->threadpool, 1); - } -} - -void ggml_compute_forward_flash_attn_ext(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->op_params[3]) { - case GGML_PREC_DEFAULT: - case GGML_PREC_F32: - { - // uses F32 accumulators - ggml_compute_forward_flash_attn_ext_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_flash_attn_back - -static void ggml_compute_forward_flash_attn_back_f32(const ggml_compute_params * params, - const bool masked, - ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - const ggml_tensor * k = dst->src[1]; - const ggml_tensor * v = dst->src[2]; - const ggml_tensor * d = dst->src[3]; - - GGML_TENSOR_LOCALS(int64_t, neq, q, ne) - GGML_TENSOR_LOCALS(size_t, nbq, q, nb) - GGML_TENSOR_LOCALS(int64_t, nek, k, ne) - GGML_TENSOR_LOCALS(size_t, nbk, k, nb) - GGML_TENSOR_LOCALS(int64_t, nev, v, ne) - GGML_TENSOR_LOCALS(size_t, nbv, v, nb) - GGML_TENSOR_LOCALS(int64_t, ned, d, ne) - GGML_TENSOR_LOCALS(size_t, nbd, d, nb) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - GGML_TENSOR_LOCALS(size_t, nb, dst, nb) - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t D = neq0; - const int64_t N = neq1; - const int64_t P = nek1 - N; - const int64_t M = P + N; - - const int Mup = ggml_up(M, GGML_SOFT_MAX_UNROLL); - const int mxDM = MAX(D, Mup); - - // GGML_ASSERT(ne0 == D); - // GGML_ASSERT(ne1 == N); - GGML_ASSERT(P >= 0); - - GGML_ASSERT(nbq0 == sizeof(float)); - GGML_ASSERT(nbk0 == sizeof(float)); - GGML_ASSERT(nbv0 == sizeof(float)); - - GGML_ASSERT(neq0 == D); - GGML_ASSERT(nek0 == D); - GGML_ASSERT(nev1 == D); - GGML_ASSERT(ned0 == D); - - GGML_ASSERT(neq1 == N); - GGML_ASSERT(nek1 == N + P); - GGML_ASSERT(nev1 == D); - GGML_ASSERT(ned1 == N); - - // dst cannot be transposed or permuted - GGML_ASSERT(nb0 == sizeof(float)); - GGML_ASSERT(nb0 <= nb1); - GGML_ASSERT(nb1 <= nb2); - GGML_ASSERT(nb2 <= nb3); - - if (ith == 0) { - memset(dst->data, 0, nb0 * ne0 * ne1 * ne2 * ne3); - } - ggml_barrier(params->threadpool); - - const int64_t elem_q = ggml_nelements(q); - const int64_t elem_k = ggml_nelements(k); - - ggml_type result_type = dst->type; - GGML_ASSERT(ggml_blck_size(result_type) == 1); - const size_t tsize = ggml_type_size(result_type); - - const size_t offs_q = 0; - const size_t offs_k = offs_q + GGML_PAD(elem_q * tsize, GGML_MEM_ALIGN); - const size_t offs_v = offs_k + GGML_PAD(elem_k * tsize, GGML_MEM_ALIGN); - - void * grad_q = (char *) dst->data; - void * grad_k = (char *) dst->data + offs_k; - void * grad_v = (char *) dst->data + offs_v; - - const size_t nbgq1 = nb0 * neq0; - const size_t nbgq2 = nb0 * neq0 * neq1; - const size_t nbgq3 = nb0 * neq0 * neq1 * neq2; - - const size_t nbgk1 = nb0 * nek0; - const size_t nbgk2 = nb0 * nek0 * nek1; - const size_t nbgk3 = nb0 * nek0 * nek1 * neq2; - - const size_t nbgv1 = nb0 * nev0; - const size_t nbgv2 = nb0 * nev0 * nev1; - const size_t nbgv3 = nb0 * nev0 * nev1 * neq2; - - // parallelize by k rows using ggml_vec_dot_f32 - - // total rows in k - const int nr = nek2 * nek3; - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const float scale = 1.0f / sqrtf(D); - - //printf("P=%d N=%d D=%d ir0=%d ir1=%d scale = %f\n", P, N, D, ir0, ir1, scale); - - // how often k2 (and v2) is repeated in q2 - int nrep = neq2 / nek2; - - for (int ir = ir0; ir < ir1; ++ir) { - // q indices - const int ik3 = ir / (nek2); - const int ik2 = ir - ik3 * nek2; - - const int iq3 = ik3; - const int id3 = ik3; - const int iv3 = ik3; - const int iv2 = ik2; - - for (int irep = 0; irep < nrep; ++irep) { - const int iq2 = ik2 + irep * nek2; - const int id2 = iq2; - - // (ik2 + irep*nek2) % nek2 == ik2 - for (int iq1 = 0; iq1 < neq1; ++iq1) { - const int id1 = iq1; - - // not sure about CACHE_LINE_SIZE_F32.. - // - maybe it must not be multiplied by 2 and excluded from .. in SM 1*(..) offset? - float * S = - (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 0 * (mxDM + CACHE_LINE_SIZE_F32); - float * SM = - (float *) params->wdata + ith * 2 * (mxDM + CACHE_LINE_SIZE_F32) + 1 * (mxDM + CACHE_LINE_SIZE_F32); - - for (int i = M; i < Mup; ++i) { - S[i] = -INFINITY; - } - - const int64_t masked_begin = masked ? (P + iq1 + 1) : M; - for (int64_t ic = 0; ic < masked_begin; ++ic) { - // k indices - const int ik1 = ic; - - // S indices - const int i1 = ik1; - - ggml_vec_dot_f32(neq0, S + i1, 0, - (float *) ((char *) k->data + (ik1 * nbk1 + ik2 * nbk2 + ik3 * nbk3)), 0, - (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), 0, 1); - } - - // scale - ggml_vec_scale_f32(masked_begin, S, scale); - - for (int64_t i = masked_begin; i < M; i++) { - S[i] = -INFINITY; - } - - // softmax - // exclude known -INF S[..] values from max and loop - // dont forget to set their SM values to zero - { - float max = -INFINITY; - ggml_vec_max_f32(masked_begin, &max, S); - - ggml_float sum = 0.0; - { -#ifdef GGML_SOFT_MAX_ACCELERATE - max = -max; - vDSP_vsadd(SM, 1, &max, SM, 1, Mup); - vvexpf(SM, SM, &Mup); - ggml_vec_sum_f32(Mup, &sum, SM); -#else - sum = ggml_vec_soft_max_f32(Mup, SM, S, max); -#endif - } - - assert(sum > 0.0); - - sum = 1.0 / sum; - ggml_vec_scale_f32(masked_begin, SM, sum); - } - - // step-by-step explanation - { - // forward-process shape grads from backward process - // parallel_for ik2,ik3: - // for irep: - // iq2 = ik2 + irep*nek2 - // k[:D,:M,:,:] [D,M,:,:] grad[k][:D,:M,ik2,ik3] += grad[kcur] - // q[:D,:N,:,:] [D,N,:,:] grad[q][:D,iq1,iq2,iq3] += grad[qcur] - // v[:M,:D,:,:] [M,D,:,:] grad[v][:M,:D,iv2,iv3] += grad[vcur] - // for iq1: - // kcur = k[:D,:M,ik2,ik3] [D,M,1,1] grad[kcur] = grad[S1].T @ qcur - // qcur = q[:D,iq1,iq2,iq3] [D,1,1,1] grad[qcur] = grad[S1] @ kcur - // vcur = v[:M,:D,iv2,iv3] [M,D,1,1] grad[vcur] = grad[S5].T @ S4 - // S0 = -Inf [D,1,1,1] - // ~S1[i] = dot(kcur[:D,i], qcur) - // S1 = qcur @ kcur.T [M,1,1,1] grad[S1] = grad[S2] * scale - // S2 = S1 * scale [M,1,1,1] grad[S2] = diag_mask_zero(grad[S3], P) - // S3 = diag_mask_inf(S2, P) [M,1,1,1] grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // S4 = softmax(S3) [M,1,1,1] grad[S4] = grad[S5] @ vcur - // ~S5[i] = dot(vcur[:,i], S4) - // S5 = S4 @ vcur.T [D,1,1,1] grad[S5] = d[:D,id1,id2,id3] - // ~dst[i,iq1,iq2,iq3] = S5[i] ^ - // dst[:D,iq1,iq2,iq3] = S5 | grad[dst[:D,iq1,iq2,iq3]] = d[:D,id1,id2,id3] - // dst backward-/ grad[dst] = d - // - // output gradients with their dependencies: - // - // grad[kcur] = grad[S1].T @ qcur - // grad[S1] = diag_mask_zero(grad[S3], P) * scale - // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // grad[S4] = grad[S5] @ vcur - // grad[S4] = d[:D,id1,id2,id3] @ vcur - // grad[qcur] = grad[S1] @ kcur - // grad[vcur] = grad[S5].T @ S4 - // grad[vcur] = d[:D,id1,id2,id3].T @ S4 - // - // in post-order: - // - // S1 = qcur @ kcur.T - // S2 = S1 * scale - // S3 = diag_mask_inf(S2, P) - // S4 = softmax(S3) - // grad[S4] = d[:D,id1,id2,id3] @ vcur - // grad[S3] = S4 * (grad[S4] - dot(S4, grad[S4])) - // grad[S1] = diag_mask_zero(grad[S3], P) * scale - // grad[qcur] = grad[S1] @ kcur - // grad[kcur] = grad[S1].T @ qcur - // grad[vcur] = d[:D,id1,id2,id3].T @ S4 - // - // using less variables (SM=S4): - // - // S = diag_mask_inf(qcur @ kcur.T * scale, P) - // SM = softmax(S) - // S = d[:D,iq1,iq2,iq3] @ vcur - // dot_SM_gradSM = dot(SM, S) - // S = SM * (S - dot(SM, S)) - // S = diag_mask_zero(S, P) * scale - // - // grad[q][:D,iq1,iq2,iq3] += S @ kcur - // grad[k][:D,:M,ik2,ik3] += S.T @ qcur - // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM - } - - // S = gradSM = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] - // S = d[:D,id1,id2,id3] @ vcur[:,:,iv2,iv3] - // for ic: - // S[:M] += vcur[:M,ic,iv2,iv3] * d[ic,id1,id2,id3] - // exclude known future zero S[..] values from operation - ggml_vec_set_f32(masked_begin, S, 0); - for (int64_t ic = 0; ic < D; ++ic) { - ggml_vec_mad_f32( - masked_begin, S, (float *) ((char *) v->data + (ic * nbv1 + iv2 * nbv2 + iv3 * nbv3)), - *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); - } - - // S = SM * (S - dot(SM, S)) - float dot_SM_gradSM = 0; - ggml_vec_dot_f32(masked_begin, &dot_SM_gradSM, 0, SM, 0, S, 0, 1); - ggml_vec_acc1_f32(M, S, -dot_SM_gradSM); - ggml_vec_mul_f32(masked_begin, S, S, SM); - - // S = diag_mask_zero(S, P) * scale - // already done by above ggml_vec_set_f32 - - // exclude known zero S[..] values from operation - ggml_vec_scale_f32(masked_begin, S, scale); - - // S shape [M,1] - // SM shape [M,1] - // kcur shape [D,M] - // qcur shape [D,1] - // vcur shape [M,D] - - // grad[q][:D,iq1,iq2,iq3] += S @ kcur - // grad[q][:D,iq1,iq2,iq3] += shape[M,1] @ shape[D,M] - // for ic: - // grad[q][:D,iq1,iq2,iq3] += S[ic] * kcur[:D,ic,ik2,ik3] - // exclude known zero S[..] values from loop - for (int64_t ic = 0; ic < masked_begin; ++ic) { - ggml_vec_mad_f32(D, (float *) ((char *) grad_q + (iq1 * nbgq1 + iq2 * nbgq2 + iq3 * nbgq3)), - (float *) ((char *) k->data + (ic * nbk1 + ik2 * nbk2 + ik3 * nbk3)), S[ic]); - } - - // grad[k][:D,:M,iq2,iq3] += S.T @ qcur - // for ic: - // grad[k][:D,ic,iq2,iq3] += S.T[0,ic] * qcur[:D,0] - // grad[k][:D,ic,iq2,iq3] += S[ic] * qcur[:D,0] - // exclude known zero S[..] values from loop - for (int64_t ic = 0; ic < masked_begin; ++ic) { - ggml_vec_mad_f32(D, (float *) ((char *) grad_k + (ic * nbgk1 + ik2 * nbgk2 + ik3 * nbgk3)), - (float *) ((char *) q->data + (iq1 * nbq1 + iq2 * nbq2 + iq3 * nbq3)), S[ic]); - } - - // grad[v][:M,:D,iv2,iv3] += d[:D,id1,id2,id3].T @ SM - // for ic: - // grad[v][:M,ic,iv2,iv3] += d[:D,id1,id2,id3].T[0,ic] * SM[:M] - // grad[v][:M,ic,iv2,iv3] += d[ic,id1,id2,id3] * SM[:M] - // exclude known zero SM[..] values from mad - for (int64_t ic = 0; ic < D; ++ic) { - ggml_vec_mad_f32( - masked_begin, (float *) ((char *) grad_v + (ic * nbgv1 + iv2 * nbgv2 + iv3 * nbgv3)), SM, - *(float *) ((char *) d->data + (ic * nbd0 + id1 * nbd1 + id2 * nbd2 + id3 * nbd3))); - } - } - } - } -} - -void ggml_compute_forward_flash_attn_back(const ggml_compute_params * params, const bool masked, ggml_tensor * dst) { - const ggml_tensor * q = dst->src[0]; - - switch (q->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_flash_attn_back_f32(params, masked, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_ssm_conv - -static void ggml_compute_forward_ssm_conv_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // conv_x - const ggml_tensor * src1 = dst->src[1]; // conv1d.weight - - const int ith = params->ith; - const int nth = params->nth; - - const int nc = src1->ne[0]; // d_conv - const int ncs = src0->ne[0]; // d_conv - 1 + n_t - const int nr = src0->ne[1]; // d_inner - const int n_t = dst->ne[1]; // tokens per sequence - const int n_s = dst->ne[2]; // number of sequences in the batch - - GGML_ASSERT(dst->ne[0] == nr); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - const int ir = ir1 - ir0; - - for (int i3 = 0; i3 < n_s; ++i3) { - for (int i2 = 0; i2 < n_t; ++i2) { - // {d_conv - 1 + n_t, d_inner, n_seqs} - // sliding window - const float * s = (const float *) ((const char *) src0->data + ir0 * (src0->nb[1]) + i2 * (src0->nb[0]) + - i3 * (src0->nb[2])); // {d_conv, d_inner, n_s} - const float * c = (const float *) ((const char *) src1->data + ir0 * (src1->nb[1])); // {d_conv, d_inner} - float * x = (float *) ((char *) dst->data + ir0 * (dst->nb[0]) + i2 * (dst->nb[1]) + - i3 * (dst->nb[2])); // {d_inner, n_t, n_s} - - // TODO: transpose the output for smaller strides for big batches? - // d_inner - for (int i1 = 0; i1 < ir; ++i1) { - // rowwise dot product - // NOTE: not using ggml_vec_dot_f32, because its sum is in double precision - float sumf = 0.0f; - - // d_conv - for (int i0 = 0; i0 < nc; ++i0) { - sumf += s[i0 + i1 * ncs] * c[i0 + i1 * nc]; - } - x[i1] = sumf; - } - } - } -} - -void ggml_compute_forward_ssm_conv(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->src[0]->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_ssm_conv_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_ssm_scan - -static void ggml_compute_forward_ssm_scan_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; // s {d_state, dim, n_head, n_seqs+} - const ggml_tensor * src1 = dst->src[1]; // x {dim, n_head, n_seq_tokens, n_seqs} - const ggml_tensor * src2 = dst->src[2]; // dt {n_head, n_seq_tokens, n_seqs} - const ggml_tensor * src3 = dst->src[3]; // A {d_state, n_head} or {1, n_head} - const ggml_tensor * src4 = dst->src[4]; // B {d_state, n_group, n_seq_tokens, n_seqs} - const ggml_tensor * src5 = dst->src[5]; // C {d_state, n_group, n_seq_tokens, n_seqs} - const ggml_tensor * src6 = dst->src[6]; // ids {n_seqs} - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t nc = src0->ne[0]; // d_state - const int64_t nr = src0->ne[1]; // dim - const int64_t nh = src1->ne[1]; // n_head - const int64_t ng = src4->ne[1]; - const int64_t nt = src1->ne[2]; // number of tokens per sequence - const int64_t ns = src1->ne[3]; // number of sequences in the batch - - // can't use ggml_nbytes because src1 is not necessarily contiguous - const int64_t s_off = ggml_nelements(src1) * ggml_element_size(src1); - - GGML_ASSERT(ggml_nelements(src1) + nc * nr * nh * ns == ggml_nelements(dst)); - GGML_ASSERT(src0->nb[0] == sizeof(float)); - GGML_ASSERT(src1->nb[0] == sizeof(float)); - GGML_ASSERT(src2->nb[0] == sizeof(float)); - GGML_ASSERT(src3->nb[0] == sizeof(float)); - GGML_ASSERT(src4->nb[0] == sizeof(float)); - GGML_ASSERT(src5->nb[0] == sizeof(float)); - GGML_ASSERT(src6->nb[0] == sizeof(int32_t)); - GGML_ASSERT(nh % ng == 0); - - // heads per thread - const int dh = (nh + nth - 1) / nth; - - // head range for this thread - const int ih0 = dh * ith; - const int ih1 = MIN(ih0 + dh, nh); - - const int32_t * ids = (const int32_t *) src6->data; - - for (int i3 = 0; i3 < ns; ++i3) { - const float * s0 = - (const float *) ((const char *) src0->data + ids[i3] * (src0->nb[3])); // {d_state, dim, nh, ns} - float * s = (float *) ((char *) dst->data + i3 * (src0->nb[3]) + s_off); // {d_state, dim, nh, ns} - - for (int i2 = 0; i2 < nt; ++i2) { - const float * x = (const float *) ((const char *) src1->data + i2 * (src1->nb[2]) + - i3 * (src1->nb[3])); // {dim, nh, nt, ns} - const float * dt = - (const float *) ((const char *) src2->data + i2 * (src2->nb[1]) + i3 * (src2->nb[2])); // {nh, nt, ns} - const float * A = (const float *) ((const char *) src3->data); // {d_state, nh} or {1, nh} - const float * B = (const float *) ((const char *) src4->data + i2 * (src4->nb[2]) + - i3 * (src4->nb[3])); // {d_state, ng, nt, ns} - const float * C = (const float *) ((const char *) src5->data + i2 * (src5->nb[2]) + - i3 * (src5->nb[3])); // {d_state, ng, nt, ns} - float * y = (float *) ((char *) dst->data + i2 * (nh * nr * sizeof(float)) + - i3 * (nt * nh * nr * sizeof(float))); // {dim, nh, nt, ns} - - if (src3->ne[0] == 1) { - // Mamba-2 has a scalar decay factor per head; dA can be outside the state-wise loop - - // n_head - for (int h = ih0; h < ih1; ++h) { - // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 - const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); - const float dA = expf(dt_soft_plus * A[h]); - const int g = h / (nh / ng); // repeat_interleave - - // dim - for (int i1 = 0; i1 < nr; ++i1) { - const int ii = i1 + h * nr; - const float x_dt = x[ii] * dt_soft_plus; - float sumf = 0.0f; -#if defined(GGML_SIMD) -# if defined(__ARM_FEATURE_SVE) - const int ggml_f32_epr = svcntw(); - const int ggml_f32_step = 1 * ggml_f32_epr; - - const int np = (nc & ~(ggml_f32_step - 1)); - - GGML_F32_VEC sum = GGML_F32_VEC_ZERO; - - GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); - GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); - - for (int i = 0; i < np; i += ggml_f32_step) { - // TODO: maybe unroll more? - for (int j = 0; j < 1; j++) { - GGML_F32_VEC t0 = GGML_F32_VEC_LOAD(s0 + i + j * ggml_f32_epr + ii * nc); - GGML_F32_VEC t1 = GGML_F32_VEC_LOAD(B + i + j * ggml_f32_epr + g * nc); - GGML_F32_VEC t2 = GGML_F32_VEC_LOAD(C + i + j * ggml_f32_epr + g * nc); - - t0 = GGML_F32_VEC_MUL(t0, adA); - t1 = GGML_F32_VEC_MUL(t1, axdt); - - t0 = GGML_F32_VEC_ADD(t0, t1); - - sum = GGML_F32_VEC_FMA(sum, t0, t2); - - GGML_F32_VEC_STORE(s + i + j * ggml_f32_epr + ii * nc, t0); - } - } - - sumf = GGML_F32xt_REDUCE_ONE(sum); -# elif defined(__riscv_v_intrinsic) - // todo: RVV implementation - const int np = 0; -# else - const int np = (nc & ~(GGML_F32_STEP - 1)); - - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - - GGML_F32_VEC adA = GGML_F32_VEC_SET1(dA); - GGML_F32_VEC axdt = GGML_F32_VEC_SET1(x_dt); - - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - GGML_F32_VEC az[GGML_F32_ARR]; - - for (int i = 0; i < np; i += GGML_F32_STEP) { - for (int j = 0; j < GGML_F32_ARR; j++) { - ax[j] = GGML_F32_VEC_LOAD(s0 + i + j * GGML_F32_EPR + ii * nc); - ay[j] = GGML_F32_VEC_LOAD(B + i + j * GGML_F32_EPR + g * nc); - az[j] = GGML_F32_VEC_LOAD(C + i + j * GGML_F32_EPR + g * nc); - - ax[j] = GGML_F32_VEC_MUL(ax[j], adA); - ay[j] = GGML_F32_VEC_MUL(ay[j], axdt); - - ax[j] = GGML_F32_VEC_ADD(ax[j], ay[j]); - - sum[j] = GGML_F32_VEC_FMA(sum[j], ax[j], az[j]); - - GGML_F32_VEC_STORE(s + i + j * GGML_F32_EPR + ii * nc, ax[j]); - } - } - - // reduce sum0..sum3 to sum0 - GGML_F32_VEC_REDUCE(sumf, sum); -# endif -#else - const int np = 0; -#endif - // d_state - for (int i0 = np; i0 < nc; ++i0) { - const int i = i0 + ii * nc; - const int ig = i0 + g * nc; - // state = prev_state * dA + dB * x - const float state = (s0[i] * dA) + (B[ig] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[ig]; - s[i] = state; - } - y[ii] = sumf; - } - } - } else { - // Mamba-1 has an element-wise decay factor for the states - - // n_head - for (int h = ih0; h < ih1; ++h) { - // ref: https://github.com/state-spaces/mamba/blob/62db608da60f6fc790b8ed9f4b3225e95ca15fde/mamba_ssm/ops/triton/softplus.py#L16 - const float dt_soft_plus = ggml_compute_softplus_f32(dt[h]); - const int g = h / (nh / ng); // repeat_interleave - - // dim - for (int i1 = 0; i1 < nr; ++i1) { - const int ii = i1 + h * nr; - const float x_dt = x[ii] * dt_soft_plus; -#if defined(__ARM_FEATURE_SVE) - svfloat32_t vx_dt = GGML_F32_VEC_SET1(x_dt); - svfloat32_t vdt_soft_plus = GGML_F32_VEC_SET1(dt_soft_plus); - svfloat32_t r1_vector = GGML_F32_VEC_ZERO; - - // d_state - // TODO: what happens when (d_state % svcntw()) != 0? - for (int64_t k = 0; k < nc; k += svcntw()) { - svfloat32_t vA = GGML_F32_VEC_LOAD(&A[h * nc + k]); - svfloat32_t vB = GGML_F32_VEC_LOAD(&B[k + g * nc]); - svfloat32_t vC = GGML_F32_VEC_LOAD(&C[k + g * nc]); - svfloat32_t vs0 = GGML_F32_VEC_LOAD(&s0[ii * nc + k]); - - svfloat32_t t1 = GGML_F32_VEC_MUL(vdt_soft_plus, vA); - t1 = exp_ps_sve(svptrue_b32(), t1); - svfloat32_t t2 = GGML_F32_VEC_MUL(vx_dt, vB); - - vs0 = GGML_F32_VEC_FMA(t2, vs0, t1); - r1_vector = GGML_F32_VEC_ADD(GGML_F32_VEC_MUL(vs0, vC), r1_vector); - - GGML_F32_VEC_STORE(&s[ii * nc + k], vs0); - } - y[ii] = GGML_F32xt_REDUCE_ONE(r1_vector); -#else - float sumf = 0.0f; - // NOTE: can't really use GGML_SIMD here because d_state is usually 16 - // and also because expf is used within the loop. - // d_state - for (int i0 = 0; i0 < nc; ++i0) { - const int i = i0 + ii * nc; - const int ig = i0 + g * nc; - // state = prev_state * dA + dB * x - const float state = (s0[i] * expf(dt_soft_plus * A[i0 + h * nc])) + (B[ig] * x_dt); - // y = rowwise_dotprod(state, C) - sumf += state * C[ig]; - s[i] = state; - } - y[ii] = sumf; -#endif - } - } - } - // use the output as the source when it's not the first token-wise iteration - s0 = s; - } - } -} - -void ggml_compute_forward_ssm_scan(const ggml_compute_params * params, ggml_tensor * dst) { - switch (dst->src[0]->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_ssm_scan_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_win_part - -static void ggml_compute_forward_win_part_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - - const int32_t nep0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t nep1 = ((const int32_t *) (dst->op_params))[1]; - const int32_t w = ((const int32_t *) (dst->op_params))[2]; - - assert(ne00 == ne0); - assert(ne3 == nep0 * nep1); - - // TODO: optimize / multi-thread - for (int py = 0; py < nep1; ++py) { - for (int px = 0; px < nep0; ++px) { - const int64_t i3 = py * nep0 + px; - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - const int64_t i02 = py * w + i2; - const int64_t i01 = px * w + i1; - const int64_t i00 = i0; - - const int64_t i = i3 * ne2 * ne1 * ne0 + i2 * ne1 * ne0 + i1 * ne0 + i0; - const int64_t j = i02 * ne01 * ne00 + i01 * ne00 + i00; - - if (py * w + i2 >= ne02 || px * w + i1 >= ne01) { - ((float *) dst->data)[i] = 0.0f; - } else { - ((float *) dst->data)[i] = ((float *) src0->data)[j]; - } - } - } - } - } - } -} - -void ggml_compute_forward_win_part(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_win_part_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_win_unpart - -static void ggml_compute_forward_win_unpart_f32(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne) - GGML_TENSOR_LOCALS(int64_t, ne, dst, ne) - - const int32_t w = ((const int32_t *) (dst->op_params))[0]; - - // padding - const int px = (w - ne1 % w) % w; - //const int py = (w - ne2%w)%w; - - const int npx = (px + ne1) / w; - //const int npy = (py + ne2)/w; - - assert(ne0 == ne00); - - // TODO: optimize / multi-thread - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - for (int64_t i0 = 0; i0 < ne0; ++i0) { - const int ip2 = i2 / w; - const int ip1 = i1 / w; - - const int64_t i02 = i2 % w; - const int64_t i01 = i1 % w; - const int64_t i00 = i0; - - const int64_t i = (ip2 * npx + ip1) * ne02 * ne01 * ne00 + i02 * ne01 * ne00 + i01 * ne00 + i00; - const int64_t j = i2 * ne1 * ne0 + i1 * ne0 + i0; - - ((float *) dst->data)[j] = ((float *) src0->data)[i]; - } - } - } -} - -void ggml_compute_forward_win_unpart(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_win_unpart_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -//gmml_compute_forward_unary - -void ggml_compute_forward_unary(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_unary_op op = ggml_get_unary_op(dst); - - switch (op) { - case GGML_UNARY_OP_ABS: - { - ggml_compute_forward_abs(params, dst); - } - break; - case GGML_UNARY_OP_SGN: - { - ggml_compute_forward_sgn(params, dst); - } - break; - case GGML_UNARY_OP_NEG: - { - ggml_compute_forward_neg(params, dst); - } - break; - case GGML_UNARY_OP_STEP: - { - ggml_compute_forward_step(params, dst); - } - break; - case GGML_UNARY_OP_TANH: - { - ggml_compute_forward_tanh(params, dst); - } - break; - case GGML_UNARY_OP_ELU: - { - ggml_compute_forward_elu(params, dst); - } - break; - case GGML_UNARY_OP_RELU: - { - ggml_compute_forward_relu(params, dst); - } - break; - case GGML_UNARY_OP_SIGMOID: - { - ggml_compute_forward_sigmoid(params, dst); - } - break; - case GGML_UNARY_OP_GELU: - { - ggml_compute_forward_gelu(params, dst); - } - break; - case GGML_UNARY_OP_GELU_ERF: - { - ggml_compute_forward_gelu_erf(params, dst); - } - break; - case GGML_UNARY_OP_GELU_QUICK: - { - ggml_compute_forward_gelu_quick(params, dst); - } - break; - case GGML_UNARY_OP_SILU: - { - ggml_compute_forward_silu(params, dst); - } - break; - case GGML_UNARY_OP_HARDSWISH: - { - ggml_compute_forward_hardswish(params, dst); - } - break; - case GGML_UNARY_OP_HARDSIGMOID: - { - ggml_compute_forward_hardsigmoid(params, dst); - } - break; - case GGML_UNARY_OP_EXP: - { - ggml_compute_forward_exp(params, dst); - } - break; - case GGML_UNARY_OP_FLOOR: - { - ggml_compute_forward_floor(params, dst); - } - break; - case GGML_UNARY_OP_CEIL: - { - ggml_compute_forward_ceil(params, dst); - } - break; - case GGML_UNARY_OP_ROUND: - { - ggml_compute_forward_round(params, dst); - } - break; - case GGML_UNARY_OP_TRUNC: - { - ggml_compute_forward_trunc(params, dst); - } - break; - case GGML_UNARY_OP_XIELU: - { - ggml_compute_forward_xielu(params, dst); - } - break; - case GGML_UNARY_OP_EXPM1: - { - ggml_compute_forward_expm1(params, dst); - } - break; - case GGML_UNARY_OP_SOFTPLUS: - { - ggml_compute_forward_softplus(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -//ggml_compute_forward_glu - -void ggml_compute_forward_glu(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_glu_op op = ggml_get_glu_op(dst); - - switch (op) { - case GGML_GLU_OP_REGLU: - { - ggml_compute_forward_reglu(params, dst); - } - break; - case GGML_GLU_OP_GEGLU: - { - ggml_compute_forward_geglu(params, dst); - } - break; - case GGML_GLU_OP_SWIGLU: - { - ggml_compute_forward_swiglu(params, dst); - } - break; - case GGML_GLU_OP_SWIGLU_OAI: - { - ggml_compute_forward_swiglu_oai(params, dst); - } - break; - case GGML_GLU_OP_GEGLU_ERF: - { - ggml_compute_forward_geglu_erf(params, dst); - } - break; - case GGML_GLU_OP_GEGLU_QUICK: - { - ggml_compute_forward_geglu_quick(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_get_rel_pos - -static void ggml_compute_forward_get_rel_pos_f16(const ggml_compute_params * params, ggml_tensor * dst) { - GGML_UNUSED(params); - - const ggml_tensor * src0 = dst->src[0]; - - // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L292-L322 - - GGML_TENSOR_UNARY_OP_LOCALS - - const int64_t w = ne1; - - ggml_fp16_t * src0_data = (ggml_fp16_t *) src0->data; - ggml_fp16_t * dst_data = (ggml_fp16_t *) dst->data; - - for (int64_t i2 = 0; i2 < ne2; ++i2) { - for (int64_t i1 = 0; i1 < ne1; ++i1) { - const int64_t pos = (w - i1 - 1) + i2; - for (int64_t i0 = 0; i0 < ne0; ++i0) { - dst_data[i2 * ne1 * ne0 + i1 * ne0 + i0] = src0_data[pos * ne00 + i0]; - } - } - } -} - -void ggml_compute_forward_get_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F16: - case GGML_TYPE_BF16: - { - ggml_compute_forward_get_rel_pos_f16(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_add_rel_pos - -static void ggml_compute_forward_add_rel_pos_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - const ggml_tensor * src2 = dst->src[2]; - - const bool inplace = (bool) ((int32_t *) dst->op_params)[0]; - if (!inplace) { - if (params->ith == 0) { - memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst)); - } - ggml_barrier(params->threadpool); - } - // ref: https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/modeling/image_encoder.py#L357-L359 - - float * src1_data = (float *) src1->data; - float * src2_data = (float *) src2->data; - float * dst_data = (float *) dst->data; - - const int64_t ne10 = src1->ne[0]; - const int64_t ne11 = src1->ne[1]; - const int64_t ne12 = src1->ne[2]; - const int64_t ne13 = src1->ne[3]; - - const int ith = params->ith; - const int nth = params->nth; - - // total patches in dst - const int np = ne13; - - // patches per thread - const int dp = (np + nth - 1) / nth; - - // patch range for this thread - const int ip0 = dp * ith; - const int ip1 = MIN(ip0 + dp, np); - - for (int64_t i13 = ip0; i13 < ip1; ++i13) { - for (int64_t i12 = 0; i12 < ne12; ++i12) { - for (int64_t i11 = 0; i11 < ne11; ++i11) { - const int64_t jp1 = i13 * ne12 * ne11 * ne10 + i12 * ne11 * ne10 + i11 * ne10; - for (int64_t i10 = 0; i10 < ne10; ++i10) { - const int64_t jp0 = jp1 + i10; - const float src1_e = src1_data[jp0]; - const float src2_e = src2_data[jp0]; - - const int64_t jdh = jp0 * ne10; - const int64_t jdw = jdh - (ne10 - 1) * i10; - - for (int64_t j = 0; j < ne10; ++j) { - dst_data[jdh + j] += src2_e; - dst_data[jdw + j * ne10] += src1_e; - } - } - } - } - } -} - -void ggml_compute_forward_add_rel_pos(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_add_rel_pos_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_rwkv_wkv6 - -static void ggml_compute_forward_rwkv_wkv6_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[5]->ne[1]; - const int64_t head_size = C / HEADS; - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * k = (float *) dst->src[0]->data; - float * v = (float *) dst->src[1]->data; - float * r = (float *) dst->src[2]->data; - float * time_faaaa = (float *) dst->src[3]->data; - float * time_decay = (float *) dst->src[4]->data; - - size_t t_stride = HEADS * head_size; // Same to C - - size_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - size_t h_stride_2d = head_size * head_size; - - if (ith == 0) { - memset(dst_data, 0, T * C * sizeof(float)); - } - ggml_barrier(params->threadpool); - -#if defined(__AVX__) && !defined(__AVX512F__) -# define GGML_F32X GGML_F32x8 -# define GGML_F32X_SET1 GGML_F32x8_SET1 -# define GGML_F32X_LOAD GGML_F32x8_LOAD -# define GGML_F32X_STORE GGML_F32x8_STORE -# define GGML_F32X_MUL GGML_F32x8_MUL -# define GGML_F32X_FMA GGML_F32x8_FMA -# define WKV_VECTOR_SIZE 8 -#elif defined(__AVX512F__) -# define GGML_F32X GGML_F32x16 -# define GGML_F32X_SET1 GGML_F32x16_SET1 -# define GGML_F32X_LOAD GGML_F32x16_LOAD -# define GGML_F32X_STORE GGML_F32x16_STORE -# define GGML_F32X_MUL GGML_F32x16_MUL -# define GGML_F32X_FMA GGML_F32x16_FMA -# define WKV_VECTOR_SIZE 16 -#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) -# define GGML_F32X GGML_F32xt -# define GGML_F32X_SET1 GGML_F32xt_SET1 -# define GGML_F32X_LOAD GGML_F32xt_LOAD -# define GGML_F32X_STORE GGML_F32xt_STORE -# define GGML_F32X_MUL GGML_F32xt_MUL -# define GGML_F32X_FMA GGML_F32xt_FMA -# define WKV_VECTOR_SIZE 8 -#elif defined(__ARM_NEON) && defined(__aarch64__) -# define GGML_F32X GGML_F32x4 -# define GGML_F32X_SET1 GGML_F32x4_SET1 -# define GGML_F32X_LOAD GGML_F32x4_LOAD -# define GGML_F32X_STORE GGML_F32x4_STORE -# define GGML_F32X_MUL GGML_F32x4_MUL -# define GGML_F32X_FMA GGML_F32x4_FMA -# define WKV_VECTOR_SIZE 4 -#endif - -#ifdef WKV_VECTOR_SIZE - int wkv_vector_size; -# if defined(__ARM_FEATURE_SVE) - wkv_vector_size = svcntw(); -# else - wkv_vector_size = WKV_VECTOR_SIZE; -# endif - const int64_t vec_count = head_size / wkv_vector_size; - - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_i_offset = h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float r_val = r[t_h_i_offset]; - float time_faaaa_val = time_faaaa[h_i_offset]; - float time_decay_val = time_decay[t_h_i_offset]; - - // Broadcast scalar values to vectors - GGML_F32X k_vec = GGML_F32X_SET1(k_val); - GGML_F32X r_vec = GGML_F32X_SET1(r_val); - GGML_F32X time_faaaa_vec = GGML_F32X_SET1(time_faaaa_val); - GGML_F32X time_decay_vec = GGML_F32X_SET1(time_decay_val); - - for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * wkv_vector_size; - size_t t_h_j_offset = t_h_offset + base_j; - size_t h_2d_i_j_offset = h_2d_i_offset + base_j; - - // Load x elements at once - GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); - GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); - GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); - - // Compute kv = v * k - GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); - - // Compute temp = kv * time_faaaa + prev_state - GGML_F32X temp_vec = GGML_F32X_FMA(prev_state_vec, kv_vec, time_faaaa_vec); - - // Update dst: dst += temp * r - dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, r_vec); - GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); - - // Update state: state = prev_state * time_decay + kv - GGML_F32X new_state_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, time_decay_vec); - GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], new_state_vec); - } - - // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * wkv_vector_size; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val * time_faaaa_val + prev_state_val; - dst_data[t_h_j_offset] += temp_val * r_val; - state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; - } - } - } - } - -#else - // basically fused operations: - // dst = r @ (time_faaaa * (k @ v) + state), - // state = time_decay * state + (k @ v), - // recursive through each token - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[5]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_i_offset = h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float r_val = r[t_h_i_offset]; - float time_faaaa_val = time_faaaa[h_i_offset]; - // RWKV v6: different time_decay for each token. - float time_decay_val = time_decay[t_h_i_offset]; - - for (int64_t j = 0; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val * time_faaaa_val + prev_state_val; - dst_data[t_h_j_offset] += temp_val * r_val; - state_cur[h_2d_i_j_offset] = prev_state_val * time_decay_val + kv_val; - } - } - } - } -#endif -} - -void ggml_compute_forward_rwkv_wkv6(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rwkv_wkv6_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_gla - -static void ggml_compute_forward_gla_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[4]->ne[1]; - const int64_t head_size = C / HEADS; - const float scale = ggml_get_op_params_f32(dst, 0); - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * k = (float *) dst->src[0]->data; - float * v = (float *) dst->src[1]->data; - float * q = (float *) dst->src[2]->data; - float * g = (float *) dst->src[3]->data; - - size_t t_stride = HEADS * head_size; // Same to C - - size_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - size_t h_stride_2d = head_size * head_size; - - if (ith == 0) { - memset(dst_data, 0, T * C * sizeof(float)); - } - ggml_barrier(params->threadpool); - -#if defined(__AVX__) && !defined(__AVX512F__) -# define GGML_F32X GGML_F32x8 -# define GGML_F32X_SET1 GGML_F32x8_SET1 -# define GGML_F32X_LOAD GGML_F32x8_LOAD -# define GGML_F32X_STORE GGML_F32x8_STORE -# define GGML_F32X_MUL GGML_F32x8_MUL -# define GGML_F32X_FMA GGML_F32x8_FMA -# define GLA_VECTOR_SIZE 8 -#elif defined(__AVX512F__) -# define GGML_F32X GGML_F32x16 -# define GGML_F32X_SET1 GGML_F32x16_SET1 -# define GGML_F32X_LOAD GGML_F32x16_LOAD -# define GGML_F32X_STORE GGML_F32x16_STORE -# define GGML_F32X_MUL GGML_F32x16_MUL -# define GGML_F32X_FMA GGML_F32x16_FMA -# define GLA_VECTOR_SIZE 16 -#elif defined(__ARM_FEATURE_SVE) && defined(__aarch64__) -# define GGML_F32X GGML_F32xt -# define GGML_F32X_SET1 GGML_F32xt_SET1 -# define GGML_F32X_LOAD GGML_F32xt_LOAD -# define GGML_F32X_STORE GGML_F32xt_STORE -# define GGML_F32X_MUL GGML_F32xt_MUL -# define GGML_F32X_FMA GGML_F32xt_FMA -# define GLA_VECTOR_SIZE 8 -#elif defined(__ARM_NEON) && defined(__aarch64__) -# define GGML_F32X GGML_F32x4 -# define GGML_F32X_SET1 GGML_F32x4_SET1 -# define GGML_F32X_LOAD GGML_F32x4_LOAD -# define GGML_F32X_STORE GGML_F32x4_STORE -# define GGML_F32X_MUL GGML_F32x4_MUL -# define GGML_F32X_FMA GGML_F32x4_FMA -# define GLA_VECTOR_SIZE 4 -#endif - -#ifdef GLA_VECTOR_SIZE - int gla_vector_size; -# if defined(__ARM_FEATURE_SVE) - gla_vector_size = svcntw(); -# else - gla_vector_size = GLA_VECTOR_SIZE; -# endif - const int64_t vec_count = head_size / gla_vector_size; - - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float q_val = q[t_h_i_offset] * scale; - float g_val = g[t_h_i_offset]; - - // Broadcast scalar values to vectors - GGML_F32X k_vec = GGML_F32X_SET1(k_val); - GGML_F32X q_vec = GGML_F32X_SET1(q_val); - GGML_F32X g_vec = GGML_F32X_SET1(g_val); - - for (int64_t j = 0; j < vec_count; j++) { - size_t base_j = j * gla_vector_size; - size_t t_h_j_offset = t_h_offset + base_j; - size_t h_2d_i_j_offset = h_2d_i_offset + base_j; - - // Load x elements at once - GGML_F32X v_vec = GGML_F32X_LOAD(&v[t_h_j_offset]); - GGML_F32X prev_state_vec = GGML_F32X_LOAD(&state_prev[h_2d_i_j_offset]); - GGML_F32X dst_vec = GGML_F32X_LOAD(&dst_data[t_h_j_offset]); - - // Compute kv = v * k - GGML_F32X kv_vec = GGML_F32X_MUL(v_vec, k_vec); - - // Compute temp = prev_state * g + kv - GGML_F32X temp_vec = GGML_F32X_FMA(kv_vec, prev_state_vec, g_vec); - - // Update dst: dst += temp * q - dst_vec = GGML_F32X_FMA(dst_vec, temp_vec, q_vec); - GGML_F32X_STORE(&dst_data[t_h_j_offset], dst_vec); - - // Update state - GGML_F32X_STORE(&state_cur[h_2d_i_j_offset], temp_vec); - } - - // Handle remaining elements, this will not be used. - for (int64_t j = vec_count * gla_vector_size; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = kv_val + prev_state_val * g_val; - dst_data[t_h_j_offset] += temp_val * q_val; - state_cur[h_2d_i_j_offset] = temp_val; - } - } - } - } - -#else - for (int64_t t = 0; t < T; t++) { - size_t t_offset = t * t_stride; - size_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[4]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - size_t h_offset = h * h_stride; - size_t t_h_offset = t_offset + h_offset; - size_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - size_t t_h_i_offset = t_h_offset + i; - size_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float k_val = k[t_h_i_offset]; - float q_val = q[t_h_i_offset] * scale; - float g_val = g[t_h_i_offset]; - - for (int64_t j = 0; j < head_size; j++) { - size_t t_h_j_offset = t_h_offset + j; - size_t h_2d_i_j_offset = h_2d_i_offset + j; - - float v_val = v[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - float temp_val = prev_state_val * g_val + kv_val; - dst_data[t_h_j_offset] += temp_val * q_val; - state_cur[h_2d_i_j_offset] = temp_val; - } - } - } - } -#endif -} - -void ggml_compute_forward_gla(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_gla_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_solve_tri_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - const struct ggml_tensor * src0 = dst->src[0]; // A (lower triangular) - const struct ggml_tensor * src1 = dst->src[1]; // B (RHS) - - GGML_TENSOR_BINARY_OP_LOCALS; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - GGML_ASSERT(ne00 == ne01); // A must be square - GGML_ASSERT(ne0 == ne10); // solution cols == B cols - GGML_ASSERT(ne1 == ne11); // solution rows == B rows - - GGML_ASSERT(ne02 == ne12 && ne12 == ne2); - GGML_ASSERT(ne03 == ne13 && ne13 == ne3); - - const int ith = params->ith; - const int nth = params->nth; - - const int64_t k = ne10; // number of RHS columns - const int64_t n = ne11; // A is n×n - const int64_t nr = - ne02 * ne03 * k; // we're parallelizing on columns here, so seq x token x column will be the unit - - // chunks per thread - const int64_t dr = (nr + nth - 1) / nth; - - // chunk range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - const float * A = (const float *) src0->data; // [n, n, B1, B2] - const float * B = (const float *) src1->data; // [n, k, B1, B2] - float * X = (float *) dst->data; // [n, k, B1, B2] - - for (int64_t ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * k); - const int64_t i02 = (ir - i03 * ne02 * k) / k; - const int64_t i01 = (ir - i03 * ne02 * k - i02 * k); - - const float * A_batch = A + i02 * nb02 / sizeof(float) + i03 * nb03 / sizeof(float); - const float * B_batch = B + i02 * nb12 / sizeof(float) + i03 * nb13 / sizeof(float); - - float * X_batch = X + i02 * nb2 / sizeof(float) + i03 * nb3 / sizeof(float); - - for (int64_t i00 = 0; i00 < n; ++i00) { - float sum = 0.0f; - for (int64_t t = 0; t < i00; ++t) { - sum += A_batch[i00 * n + t] * X_batch[t * k + i01]; - } - - const float diag = A_batch[i00 * n + i00]; - GGML_ASSERT(diag != 0.0f && "Zero diagonal in triangular matrix"); - X_batch[i00 * k + i01] = (B_batch[i00 * k + i01] - sum) / diag; - } - } -} - -void ggml_compute_forward_solve_tri(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) { - ggml_compute_forward_solve_tri_f32(params, dst); - } else { - GGML_ABORT("fatal error"); - } -} - -// ggml_compute_forward_rwkv_wkv7 - -static void ggml_compute_forward_rwkv_wkv7_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const int64_t T = dst->src[1]->ne[2]; - const int64_t C = dst->ne[0]; - const int64_t HEADS = dst->src[1]->ne[1]; - const int64_t n_seqs = dst->src[6]->ne[1]; - const int64_t head_size = C / HEADS; - - float * dst_data = (float *) dst->data; - float * state = ((float *) dst->data) + C * T; - - const int ith = params->ith; - const int nth = params->nth; - - if (ith >= HEADS) { - return; - } - - const int h_start = (HEADS * ith) / nth; - const int h_end = ((HEADS * (ith + 1)) / nth < HEADS) ? (HEADS * (ith + 1)) / nth : HEADS; - - float * r = (float *) dst->src[0]->data; - float * w = (float *) dst->src[1]->data; - float * k = (float *) dst->src[2]->data; - float * v = (float *) dst->src[3]->data; - float * a = (float *) dst->src[4]->data; - float * b = (float *) dst->src[5]->data; - - int64_t t_stride = HEADS * head_size; // Same to C - - int64_t h_stride = C / HEADS; - GGML_ASSERT(C % HEADS == 0); // C must be divisible by HEADS - int64_t h_stride_2d = head_size * head_size; - -#if defined(GGML_SIMD) -# if defined(__ARM_FEATURE_SVE) || defined(__riscv_v_intrinsic) - // scalar Route to scalar implementation //TODO: Write SVE code and RVV code - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - int64_t t_h_i_offset = t_h_offset + i; - int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float v_val = v[t_h_i_offset]; - - float sa = 0, result = 0; - for (int64_t j = 0; j < head_size; j++) { - sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; - } - - for (int64_t j = 0; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - result += state_cur[h_2d_i_j_offset] * r_val; - } - dst_data[t_h_i_offset] = result; - } - } - } -# else - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t ii = 0; ii < head_size; ii++) { - int64_t t_h_i_offset = t_h_offset + ii; - int64_t h_2d_i_offset = h_2d_offset + ii * h_stride; - - GGML_F32_VEC v_vec = GGML_F32_VEC_SET1(v[t_h_i_offset]); - - float sa = 0; - { - GGML_F32_VEC sum[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - GGML_F32_VEC ax[GGML_F32_ARR]; - GGML_F32_VEC ay[GGML_F32_ARR]; - for (int64_t j = 0; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - ax[kk] = GGML_F32_VEC_LOAD(&a[t_h_offset + j + kk * GGML_F32_EPR]); - ay[kk] = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_offset + j + kk * GGML_F32_EPR]); - sum[kk] = GGML_F32_VEC_FMA(sum[kk], ax[kk], ay[kk]); - } - } - GGML_F32_VEC_REDUCE(sa, sum); - } - - GGML_F32_VEC sa_vec = GGML_F32_VEC_SET1(sa); - - int64_t j = 0; - GGML_F32_VEC result_vec[GGML_F32_ARR] = { GGML_F32_VEC_ZERO }; - for (; j < head_size; j += GGML_F32_STEP) { - for (int64_t kk = 0; kk < GGML_F32_ARR; kk++) { - int64_t t_h_j_offset = t_h_offset + j + kk * GGML_F32_EPR; - int64_t h_2d_i_j_offset = h_2d_i_offset + j + kk * GGML_F32_EPR; - - GGML_F32_VEC r_vec = GGML_F32_VEC_LOAD(&r[t_h_j_offset]); - GGML_F32_VEC w_vec = GGML_F32_VEC_LOAD(&w[t_h_j_offset]); - GGML_F32_VEC k_vec = GGML_F32_VEC_LOAD(&k[t_h_j_offset]); - GGML_F32_VEC b_vec = GGML_F32_VEC_LOAD(&b[t_h_j_offset]); - - k_vec = GGML_F32_VEC_MUL(v_vec, k_vec); - - GGML_F32_VEC state_vec = GGML_F32_VEC_LOAD(&state_prev[h_2d_i_j_offset]); - // kv + s * decay + sa * b - state_vec = GGML_F32_VEC_FMA(k_vec, state_vec, w_vec); - state_vec = GGML_F32_VEC_FMA(state_vec, sa_vec, b_vec); - GGML_F32_VEC_STORE(&state_cur[h_2d_i_j_offset], state_vec); - - result_vec[kk] = GGML_F32_VEC_FMA(result_vec[kk], state_vec, r_vec); - } - } - GGML_F32_VEC_REDUCE(dst_data[t_h_i_offset], result_vec); - - // There shouldn't be left-overs though. - for (; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v[t_h_i_offset] * k_val; - - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - dst_data[t_h_i_offset] += state_cur[h_2d_i_j_offset] * r_val; - } - } - } - } -# endif -#else - for (int64_t t = 0; t < T; t++) { - int64_t t_offset = t * t_stride; - int64_t state_offset = head_size * C * (t / (T / n_seqs)); - float * state_cur = state + state_offset; - float * state_prev = t % (T / n_seqs) ? state_cur : (float *) dst->src[6]->data + state_offset; - - for (int64_t h = h_start; h < h_end; h++) { - int64_t h_offset = h * h_stride; - int64_t t_h_offset = t_offset + h_offset; - int64_t h_2d_offset = h * h_stride_2d; - - for (int64_t i = 0; i < head_size; i++) { - int64_t t_h_i_offset = t_h_offset + i; - int64_t h_2d_i_offset = h_2d_offset + i * h_stride; - - float v_val = v[t_h_i_offset]; - - float sa = 0, result = 0; - for (int64_t j = 0; j < head_size; j++) { - sa += a[t_h_offset + j] * state_prev[h_2d_i_offset + j]; - } - - for (int64_t j = 0; j < head_size; j++) { - int64_t t_h_j_offset = t_h_offset + j; - int64_t h_2d_i_j_offset = h_2d_i_offset + j; - - float r_val = r[t_h_j_offset]; - float w_val = w[t_h_j_offset]; - float k_val = k[t_h_j_offset]; - float b_val = b[t_h_j_offset]; - float kv_val = v_val * k_val; - float prev_state_val = state_prev[h_2d_i_j_offset]; - state_cur[h_2d_i_j_offset] = prev_state_val * w_val + kv_val + sa * b_val; - result += state_cur[h_2d_i_j_offset] * r_val; - } - dst_data[t_h_i_offset] = result; - } - } - } -#endif -} - -void ggml_compute_forward_rwkv_wkv7(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_rwkv_wkv7_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_map_custom1 - -void ggml_compute_forward_map_custom1(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - - struct ggml_map_custom1_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_map_custom2 - -void ggml_compute_forward_map_custom2(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - const ggml_tensor * b = dst->src[1]; - - struct ggml_map_custom2_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, b, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_map_custom3 - -void ggml_compute_forward_map_custom3(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * a = dst->src[0]; - const ggml_tensor * b = dst->src[1]; - const ggml_tensor * c = dst->src[2]; - - struct ggml_map_custom3_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, a, b, c, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_custom - -void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst) { - struct ggml_custom_op_params p; - memcpy(&p, dst->op_params, sizeof(p)); - - p.fun(dst, params->ith, params->nth, p.userdata); -} - -// ggml_compute_forward_cross_entropy_loss - -static void ggml_compute_forward_cross_entropy_loss_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src1 = dst->src[1]; - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(src1->type == GGML_TYPE_F32); - GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); - GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); - GGML_ASSERT(ggml_are_same_shape(src0, src1)); - GGML_ASSERT(ggml_is_scalar(dst)); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - - // TODO: handle transposed/permuted matrices - const int64_t nc = src0->ne[0]; - const int64_t nr = ggml_nrows(src0); - - const int ith = params->ith; - const int nth = params->nth; - - float * sums = (float *) params->wdata; - float * st = ((float *) params->wdata) + nth + ith * nc; - float sum_thread = 0.0f; - - GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc)); - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - for (int64_t i1 = ir0; i1 < ir1; ++i1) { - const float * s0 = (const float *) ((const char *) src0->data + i1 * src0->nb[1]); - const float * s1 = (const float *) ((const char *) src1->data + i1 * src1->nb[1]); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(s0[i])); - assert(!isnan(s1[i])); - } -#endif - - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - const ggml_float sum_softmax = ggml_vec_log_soft_max_f32(nc, st, s0, max); - assert(sum_softmax >= 0.0); - - ggml_vec_add1_f32(nc, st, st, -sum_softmax); - ggml_vec_mul_f32(nc, st, st, s1); - - float sum_st = 0.0f; - ggml_vec_sum_f32(nc, &sum_st, st); - sum_thread += sum_st; - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - assert(!isnan(st[i])); - assert(!isinf(st[i])); - } -#endif - } - sums[ith] = sum_thread; - ggml_barrier(params->threadpool); - - if (ith == 0) { - float * dp = (float *) dst->data; - ggml_vec_sum_f32(nth, dp, sums); - dp[0] *= -1.0f / (float) nr; - } -} - -void ggml_compute_forward_cross_entropy_loss(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cross_entropy_loss_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -// ggml_compute_forward_cross_entropy_loss_back - -static void ggml_compute_forward_cross_entropy_loss_back_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * grad = dst->src[0]; // gradient of forward pass output - const ggml_tensor * src0f = dst->src[1]; // src0 of forward pass - const ggml_tensor * src1f = dst->src[2]; // src1 of forward pass - - GGML_ASSERT(ggml_is_contiguous(dst)); - GGML_ASSERT(ggml_is_contiguous(src0f)); - GGML_ASSERT(ggml_is_contiguous(src1f)); - GGML_ASSERT(ggml_is_contiguous(grad)); - GGML_ASSERT(ggml_are_same_shape(src0f, src1f) && ggml_are_same_shape(src0f, dst)); - - const int64_t ith = params->ith; - const int64_t nth = params->nth; - - // TODO: handle transposed/permuted matrices - const int64_t nc = src0f->ne[0]; - const int64_t nr = ggml_nrows(src0f); - - // rows per thread - const int64_t dr = (nr + nth - 1) / nth; - - // row range for this thread - const int64_t ir0 = dr * ith; - const int64_t ir1 = MIN(ir0 + dr, nr); - - const float d_by_nr = ((const float *) grad->data)[0] / (float) nr; - - for (int64_t i1 = ir0; i1 < ir1; i1++) { - float * ds0 = (float *) ((char *) dst->data + i1 * dst->nb[1]); - const float * s0 = (const float *) ((const char *) src0f->data + i1 * src0f->nb[1]); - const float * s1 = (const float *) ((const char *) src1f->data + i1 * src1f->nb[1]); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - //printf("p[%d] = %f\n", i, p[i]); - assert(!isnan(s0[i])); - assert(!isnan(s1[i])); - } -#endif - - // soft_max - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - const ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); - assert(sum > 0.0); - ggml_vec_scale_f32(nc, ds0, 1.0 / sum); - - // grad(src0f) = (softmax(src0f) - src1f) * grad(cross_entropy_loss(src0f, src1f)) / nr - ggml_vec_sub_f32(nc, ds0, ds0, s1); - ggml_vec_scale_f32(nc, ds0, d_by_nr); - -#ifndef NDEBUG - for (int64_t i = 0; i < nc; ++i) { - assert(!isnan(ds0[i])); - assert(!isinf(ds0[i])); - } -#endif - } -} - -void ggml_compute_forward_cross_entropy_loss_back(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_cross_entropy_loss_back_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_opt_step_adamw_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src0_grad = dst->src[1]; - const ggml_tensor * src0_grad_m = dst->src[2]; - const ggml_tensor * src0_grad_v = dst->src[3]; - const ggml_tensor * adamw_params = dst->src[4]; - - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_m)); - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad_v)); - GGML_ASSERT(ggml_nelements(adamw_params) == 7); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - const float * adamw_params_ptr = ggml_get_data_f32(adamw_params); - - const float alpha = adamw_params_ptr[0]; - const float beta1 = adamw_params_ptr[1]; - const float beta2 = adamw_params_ptr[2]; - const float eps = adamw_params_ptr[3]; - const float wd = adamw_params_ptr[4]; - const float beta1h = adamw_params_ptr[5]; - const float beta2h = adamw_params_ptr[6]; - const float keep = 1.f - alpha * wd; - for (int ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; - - float * w = (float *) ((char *) src0->data + offset); // weight - const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad - float * m = (float *) ((char *) src0_grad_m->data + offset); - float * v = (float *) ((char *) src0_grad_v->data + offset); - - for (int i00 = 0; i00 < ne00; ++i00) { - m[i00] = m[i00] * beta1 + g[i00] * (1.0f - beta1); - v[i00] = v[i00] * beta2 + g[i00] * g[i00] * (1.0f - beta2); - - const float mh = m[i00] * beta1h; - const float vh = sqrtf(v[i00] * beta2h) + eps; - - // The weight decay is applied independently of the Adam momenta m and v. - // This is NOT equivalent to l2 regularization that adds w[i00]*w[i00] to the loss. - // See: https://arxiv.org/pdf/1711.05101v3.pdf - w[i00] = w[i00] * keep - alpha * mh / vh; - } - } -} - -void ggml_compute_forward_opt_step_adamw(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_opt_step_adamw_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error"); - } - } -} - -static void ggml_compute_forward_opt_step_sgd_f32(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const ggml_tensor * src0_grad = dst->src[1]; - const ggml_tensor * sgd_params = dst->src[2]; - - GGML_ASSERT(ggml_are_same_shape(src0, src0_grad)); - GGML_ASSERT(ggml_nelements(sgd_params) == 2); - - const int ith = params->ith; - const int nth = params->nth; - - const int nr = ggml_nrows(src0); - - GGML_TENSOR_UNARY_OP_LOCALS - GGML_ASSERT(nb00 == sizeof(float)); - - // rows per thread - const int dr = (nr + nth - 1) / nth; - - // row range for this thread - const int ir0 = dr * ith; - const int ir1 = MIN(ir0 + dr, nr); - - // using adamw param subset we care about - alpha, wd - could have a separate struct - const float * sgd_params_ptr = ggml_get_data_f32(sgd_params); - const float alpha = sgd_params_ptr[0]; - const float keep = 1.f - alpha * sgd_params_ptr[1]; - - for (int ir = ir0; ir < ir1; ++ir) { - const int64_t i03 = ir / (ne02 * ne01); - const int64_t i02 = (ir - i03 * ne02 * ne01) / ne01; - const int64_t i01 = (ir - i03 * ne02 * ne01 - i02 * ne01); - - const size_t offset = i03 * nb03 + i02 * nb02 + i01 * nb01; - - float * w = (float *) ((char *) src0->data + offset); // weight - const float * g = (const float *) ((const char *) src0_grad->data + offset); // grad - - for (int i00 = 0; i00 < ne00; ++i00) { - w[i00] = w[i00] * keep - alpha * g[i00]; - } - } -} - -void ggml_compute_forward_opt_step_sgd(const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - - switch (src0->type) { - case GGML_TYPE_F32: - { - ggml_compute_forward_opt_step_sgd_f32(params, dst); - } - break; - default: - { - GGML_ABORT("fatal error - sgd is F32 only"); - } - } -} From b794da86bb197ea58effb1e484b84bb7f2ac4f6e Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:00:45 -0800 Subject: [PATCH 23/39] clang format and update to bool --- ggml/src/ggml-cuda/pad.cu | 247 +++++++++++++++++++++++++++++++++++++- 1 file changed, 245 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 4361c0596d9..13049db114c 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -71,7 +71,7 @@ static void pad_f32_cuda(const float * src, float * dst, const int lp0, const int rp0, const int lp1, const int rp1, const int lp2, const int rp2, const int lp3, const int rp3, const int ne0, const int ne1, const int ne2, const int ne3, - const int circular, cudaStream_t stream) { + const bool circular, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2*ne3); pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); @@ -191,7 +191,250 @@ static void pad_f32_cuda(const float * src, const int ne1, const int ne2, const int ne3, - const int circular, + const bool circular, + cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; + dim3 gridDim(num_blocks, ne1, ne2 * ne3); + pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, + ne2, ne3, circular); +} + +void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *) src0->data; + float * dst_d = (float *) dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + const int32_t lp0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t rp0 = ((const int32_t *) (dst->op_params))[1]; + const int32_t lp1 = ((const int32_t *) (dst->op_params))[2]; + const int32_t rp1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t lp2 = ((const int32_t *) (dst->op_params))[4]; + const int32_t rp2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t lp3 = ((const int32_t *) (dst->op_params))[6]; + const int32_t rp3 = ((const int32_t *) (dst->op_params))[7]; + const int32_t circular = ((const int32_t *) (dst->op_params))[8]; + + pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + (bool)circular, stream); +} +#include "pad.cuh" + +#include + +__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { + // + size ensures negatives are handled properly + return (coord + size) % size; +} + +static __global__ void pad_f32(const float * src, + float * dst, + const int lp0, + const int rp0, + const int lp1, + const int rp1, + const int lp2, + const int rp2, + const int lp3, + const int rp3, + const int ne0, + const int ne1, + const int ne2, + const int ne3, + const bool circular) { + // blockIdx.z: i3*ne2+i2 + // blockIdx.y: i1 + // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE + // gridDim.y: ne1 + int i0 = threadIdx.x + blockIdx.x * blockDim.x; + int i1 = blockIdx.y; + int i2 = blockIdx.z % ne2; + int i3 = blockIdx.z / ne2; + + if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { + return; + } + + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + + if (!circular) { + // operation + if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && (i2 >= lp2 && i2 < ne2 - rp2) && + (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t i00 = i0 - lp0; + const int64_t i01 = i1 - lp1; + const int64_t i02 = i2 - lp2; + const int64_t i03 = i3 - lp3; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne00 = ne0 - lp0 - rp0; + + const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } else { + dst[dst_idx] = 0.0f; + } + } + // circular means on a torus, so x and y wrap around + else { + const int64_t ne00 = ne0 - lp0 - rp0; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne03 = ne3 - lp3 - rp3; + + const int64_t i00 = wrap_around(i0 - lp0, ne00); + const int64_t i01 = wrap_around(i1 - lp1, ne01); + const int64_t i02 = wrap_around(i2 - lp2, ne02); + const int64_t i03 = wrap_around(i3 - lp3, ne03); + + const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } +} + +static void pad_f32_cuda(const float * src, + float * dst, + const int lp0, + const int rp0, + const int lp1, + const int rp1, + const int lp2, + const int rp2, + const int lp3, + const int rp3, + const int ne0, + const int ne1, + const int ne2, + const int ne3, + const bool circular, + cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; + dim3 gridDim(num_blocks, ne1, ne2 * ne3); + pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, + ne2, ne3, circular); +} + +void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *) src0->data; + float * dst_d = (float *) dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + const int32_t lp0 = ((const int32_t *) (dst->op_params))[0]; + const int32_t rp0 = ((const int32_t *) (dst->op_params))[1]; + const int32_t lp1 = ((const int32_t *) (dst->op_params))[2]; + const int32_t rp1 = ((const int32_t *) (dst->op_params))[3]; + const int32_t lp2 = ((const int32_t *) (dst->op_params))[4]; + const int32_t rp2 = ((const int32_t *) (dst->op_params))[5]; + const int32_t lp3 = ((const int32_t *) (dst->op_params))[6]; + const int32_t rp3 = ((const int32_t *) (dst->op_params))[7]; + const int32_t circular = ((const int32_t *) (dst->op_params))[8]; + + pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + (bool) circular, stream); +} + +#include "pad.cuh" + +#include + +__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { + // + size ensures negatives are handled properly + return (coord + size) % size; +} + +static __global__ void pad_f32(const float * src, + float * dst, + const int lp0, + const int rp0, + const int lp1, + const int rp1, + const int lp2, + const int rp2, + const int lp3, + const int rp3, + const int ne0, + const int ne1, + const int ne2, + const int ne3, + const bool circular) { + // blockIdx.z: i3*ne2+i2 + // blockIdx.y: i1 + // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE + // gridDim.y: ne1 + int i0 = threadIdx.x + blockIdx.x * blockDim.x; + int i1 = blockIdx.y; + int i2 = blockIdx.z % ne2; + int i3 = blockIdx.z / ne2; + + if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { + return; + } + + const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; + + if (!circular) { + // operation + if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && (i2 >= lp2 && i2 < ne2 - rp2) && + (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t i00 = i0 - lp0; + const int64_t i01 = i1 - lp1; + const int64_t i02 = i2 - lp2; + const int64_t i03 = i3 - lp3; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne00 = ne0 - lp0 - rp0; + + const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } else { + dst[dst_idx] = 0.0f; + } + } + // circular means on a torus, so x and y wrap around + else { + const int64_t ne00 = ne0 - lp0 - rp0; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne03 = ne3 - lp3 - rp3; + + const int64_t i00 = wrap_around(i0 - lp0, ne00); + const int64_t i01 = wrap_around(i1 - lp1, ne01); + const int64_t i02 = wrap_around(i2 - lp2, ne02); + const int64_t i03 = wrap_around(i3 - lp3, ne03); + + const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } +} + +static void pad_f32_cuda(const float * src, + float * dst, + const int lp0, + const int rp0, + const int lp1, + const int rp1, + const int lp2, + const int rp2, + const int lp3, + const int rp3, + const int ne0, + const int ne1, + const int ne2, + const int ne3, + const bool circular, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2 * ne3); From e315fcf364ea83bad930eb8aa83fdb155fb02ddb Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:03:04 -0800 Subject: [PATCH 24/39] fix duplicates --- ggml/src/ggml-cuda/pad.cu | 346 -------------------------------------- 1 file changed, 346 deletions(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 13049db114c..2347146205c 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -1,106 +1,3 @@ -#include - -#include "pad.cuh" - - -__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { - // + size ensures negatives are handled properly - return (coord + size) % size; -} - -static __global__ void pad_f32(const float * src, float * dst, - const int lp0, const int rp0, const int lp1, const int rp1, - const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3, - const bool circular) { - // blockIdx.z: i3*ne2+i2 - // blockIdx.y: i1 - // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE - // gridDim.y: ne1 - int i0 = threadIdx.x + blockIdx.x * blockDim.x; - int i1 = blockIdx.y; - int i2 = blockIdx.z % ne2; - int i3 = blockIdx.z / ne2; - - if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { - return; - } - - const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; - - if (!circular) { - // operation - if ((i0 >= lp0 && i0 < ne0 - rp0) && - (i1 >= lp1 && i1 < ne1 - rp1) && - (i2 >= lp2 && i2 < ne2 - rp2) && - (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t i00 = i0 - lp0; - const int64_t i01 = i1 - lp1; - const int64_t i02 = i2 - lp2; - const int64_t i03 = i3 - lp3; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne00 = ne0 - lp0 - rp0; - - const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } else { - dst[dst_idx] = 0.0f; - } - } - // circular means on a torus, so x and y wrap around - else { - const int64_t ne00 = ne0 - lp0 - rp0; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne03 = ne3 - lp3 - rp3; - - const int64_t i00 = wrap_around(i0 - lp0, ne00); - const int64_t i01 = wrap_around(i1 - lp1, ne01); - const int64_t i02 = wrap_around(i2 - lp2, ne02); - const int64_t i03 = wrap_around(i3 - lp3, ne03); - - const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } -} - -static void pad_f32_cuda(const float * src, float * dst, - const int lp0, const int rp0, const int lp1, const int rp1, - const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3, - const bool circular, cudaStream_t stream) { - int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; - dim3 gridDim(num_blocks, ne1, ne2*ne3); - pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); -} - -void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src0_d = (const float *)src0->data; - float * dst_d = (float *)dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(ggml_is_contiguous(src0)); - - const int32_t lp0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t rp0 = ((const int32_t*)(dst->op_params))[1]; - const int32_t lp1 = ((const int32_t*)(dst->op_params))[2]; - const int32_t rp1 = ((const int32_t*)(dst->op_params))[3]; - const int32_t lp2 = ((const int32_t*)(dst->op_params))[4]; - const int32_t rp2 = ((const int32_t*)(dst->op_params))[5]; - const int32_t lp3 = ((const int32_t*)(dst->op_params))[6]; - const int32_t rp3 = ((const int32_t*)(dst->op_params))[7]; - const int32_t circular = ((const int32_t*)(dst->op_params))[8]; - - pad_f32_cuda(src0_d, dst_d, - lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, - dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool)circular, stream); -} #include "pad.cuh" #include @@ -222,246 +119,3 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool)circular, stream); } -#include "pad.cuh" - -#include - -__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { - // + size ensures negatives are handled properly - return (coord + size) % size; -} - -static __global__ void pad_f32(const float * src, - float * dst, - const int lp0, - const int rp0, - const int lp1, - const int rp1, - const int lp2, - const int rp2, - const int lp3, - const int rp3, - const int ne0, - const int ne1, - const int ne2, - const int ne3, - const bool circular) { - // blockIdx.z: i3*ne2+i2 - // blockIdx.y: i1 - // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE - // gridDim.y: ne1 - int i0 = threadIdx.x + blockIdx.x * blockDim.x; - int i1 = blockIdx.y; - int i2 = blockIdx.z % ne2; - int i3 = blockIdx.z / ne2; - - if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { - return; - } - - const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; - - if (!circular) { - // operation - if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && (i2 >= lp2 && i2 < ne2 - rp2) && - (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t i00 = i0 - lp0; - const int64_t i01 = i1 - lp1; - const int64_t i02 = i2 - lp2; - const int64_t i03 = i3 - lp3; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne00 = ne0 - lp0 - rp0; - - const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } else { - dst[dst_idx] = 0.0f; - } - } - // circular means on a torus, so x and y wrap around - else { - const int64_t ne00 = ne0 - lp0 - rp0; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne03 = ne3 - lp3 - rp3; - - const int64_t i00 = wrap_around(i0 - lp0, ne00); - const int64_t i01 = wrap_around(i1 - lp1, ne01); - const int64_t i02 = wrap_around(i2 - lp2, ne02); - const int64_t i03 = wrap_around(i3 - lp3, ne03); - - const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } -} - -static void pad_f32_cuda(const float * src, - float * dst, - const int lp0, - const int rp0, - const int lp1, - const int rp1, - const int lp2, - const int rp2, - const int lp3, - const int rp3, - const int ne0, - const int ne1, - const int ne2, - const int ne3, - const bool circular, - cudaStream_t stream) { - int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; - dim3 gridDim(num_blocks, ne1, ne2 * ne3); - pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, - ne2, ne3, circular); -} - -void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src0_d = (const float *) src0->data; - float * dst_d = (float *) dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(ggml_is_contiguous(src0)); - - const int32_t lp0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t rp0 = ((const int32_t *) (dst->op_params))[1]; - const int32_t lp1 = ((const int32_t *) (dst->op_params))[2]; - const int32_t rp1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t lp2 = ((const int32_t *) (dst->op_params))[4]; - const int32_t rp2 = ((const int32_t *) (dst->op_params))[5]; - const int32_t lp3 = ((const int32_t *) (dst->op_params))[6]; - const int32_t rp3 = ((const int32_t *) (dst->op_params))[7]; - const int32_t circular = ((const int32_t *) (dst->op_params))[8]; - - pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], - (bool) circular, stream); -} - -#include "pad.cuh" - -#include - -__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { - // + size ensures negatives are handled properly - return (coord + size) % size; -} - -static __global__ void pad_f32(const float * src, - float * dst, - const int lp0, - const int rp0, - const int lp1, - const int rp1, - const int lp2, - const int rp2, - const int lp3, - const int rp3, - const int ne0, - const int ne1, - const int ne2, - const int ne3, - const bool circular) { - // blockIdx.z: i3*ne2+i2 - // blockIdx.y: i1 - // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE - // gridDim.y: ne1 - int i0 = threadIdx.x + blockIdx.x * blockDim.x; - int i1 = blockIdx.y; - int i2 = blockIdx.z % ne2; - int i3 = blockIdx.z / ne2; - - if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { - return; - } - - const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; - - if (!circular) { - // operation - if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && (i2 >= lp2 && i2 < ne2 - rp2) && - (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t i00 = i0 - lp0; - const int64_t i01 = i1 - lp1; - const int64_t i02 = i2 - lp2; - const int64_t i03 = i3 - lp3; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne00 = ne0 - lp0 - rp0; - - const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } else { - dst[dst_idx] = 0.0f; - } - } - // circular means on a torus, so x and y wrap around - else { - const int64_t ne00 = ne0 - lp0 - rp0; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne03 = ne3 - lp3 - rp3; - - const int64_t i00 = wrap_around(i0 - lp0, ne00); - const int64_t i01 = wrap_around(i1 - lp1, ne01); - const int64_t i02 = wrap_around(i2 - lp2, ne02); - const int64_t i03 = wrap_around(i3 - lp3, ne03); - - const int64_t src_idx = i03 * (ne00 * ne01 * ne02) + i02 * (ne00 * ne01) + i01 * ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } -} - -static void pad_f32_cuda(const float * src, - float * dst, - const int lp0, - const int rp0, - const int lp1, - const int rp1, - const int lp2, - const int rp2, - const int lp3, - const int rp3, - const int ne0, - const int ne1, - const int ne2, - const int ne3, - const bool circular, - cudaStream_t stream) { - int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; - dim3 gridDim(num_blocks, ne1, ne2 * ne3); - pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, - ne2, ne3, circular); -} - -void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src0_d = (const float *) src0->data; - float * dst_d = (float *) dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(ggml_is_contiguous(src0)); - - const int32_t lp0 = ((const int32_t *) (dst->op_params))[0]; - const int32_t rp0 = ((const int32_t *) (dst->op_params))[1]; - const int32_t lp1 = ((const int32_t *) (dst->op_params))[2]; - const int32_t rp1 = ((const int32_t *) (dst->op_params))[3]; - const int32_t lp2 = ((const int32_t *) (dst->op_params))[4]; - const int32_t rp2 = ((const int32_t *) (dst->op_params))[5]; - const int32_t lp3 = ((const int32_t *) (dst->op_params))[6]; - const int32_t rp3 = ((const int32_t *) (dst->op_params))[7]; - const int32_t circular = ((const int32_t *) (dst->op_params))[8]; - - pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], - (bool) circular, stream); -} From 80915a1618eba07b00c4cfe26808f8767af43c0b Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:05:08 -0800 Subject: [PATCH 25/39] don't need to fix the padding --- ggml/src/ggml-cuda/pad.cu | 147 +++++++++++++++++++++++++++++--------- 1 file changed, 115 insertions(+), 32 deletions(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 2347146205c..1d6ca27e2bc 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -1,3 +1,106 @@ +#include + +#include "pad.cuh" + + +__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { + // + size ensures negatives are handled properly + return (coord + size) % size; +} + +static __global__ void pad_f32(const float * src, float * dst, + const int lp0, const int rp0, const int lp1, const int rp1, + const int lp2, const int rp2, const int lp3, const int rp3, + const int ne0, const int ne1, const int ne2, const int ne3, + const bool circular) { + // blockIdx.z: i3*ne2+i2 + // blockIdx.y: i1 + // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE + // gridDim.y: ne1 + int i0 = threadIdx.x + blockIdx.x * blockDim.x; + int i1 = blockIdx.y; + int i2 = blockIdx.z % ne2; + int i3 = blockIdx.z / ne2; + + if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { + return; + } + + const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; + + if (!circular) { + // operation + if ((i0 >= lp0 && i0 < ne0 - rp0) && + (i1 >= lp1 && i1 < ne1 - rp1) && + (i2 >= lp2 && i2 < ne2 - rp2) && + (i3 >= lp3 && i3 < ne3 - rp3)) { + const int64_t i00 = i0 - lp0; + const int64_t i01 = i1 - lp1; + const int64_t i02 = i2 - lp2; + const int64_t i03 = i3 - lp3; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne00 = ne0 - lp0 - rp0; + + const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } else { + dst[dst_idx] = 0.0f; + } + } + // circular means on a torus, so x and y wrap around + else { + const int64_t ne00 = ne0 - lp0 - rp0; + const int64_t ne01 = ne1 - lp1 - rp1; + const int64_t ne02 = ne2 - lp2 - rp2; + const int64_t ne03 = ne3 - lp3 - rp3; + + const int64_t i00 = wrap_around(i0 - lp0, ne00); + const int64_t i01 = wrap_around(i1 - lp1, ne01); + const int64_t i02 = wrap_around(i2 - lp2, ne02); + const int64_t i03 = wrap_around(i3 - lp3, ne03); + + const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; + + dst[dst_idx] = src[src_idx]; + } +} + +static void pad_f32_cuda(const float * src, float * dst, + const int lp0, const int rp0, const int lp1, const int rp1, + const int lp2, const int rp2, const int lp3, const int rp3, + const int ne0, const int ne1, const int ne2, const int ne3, + const int circular, cudaStream_t stream) { + int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; + dim3 gridDim(num_blocks, ne1, ne2*ne3); + pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); +} + +void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * src0 = dst->src[0]; + const float * src0_d = (const float *)src0->data; + float * dst_d = (float *)dst->data; + cudaStream_t stream = ctx.stream(); + + GGML_ASSERT(src0->type == GGML_TYPE_F32); + GGML_ASSERT(dst->type == GGML_TYPE_F32); + GGML_ASSERT(ggml_is_contiguous(src0)); + + const int32_t lp0 = ((const int32_t*)(dst->op_params))[0]; + const int32_t rp0 = ((const int32_t*)(dst->op_params))[1]; + const int32_t lp1 = ((const int32_t*)(dst->op_params))[2]; + const int32_t rp1 = ((const int32_t*)(dst->op_params))[3]; + const int32_t lp2 = ((const int32_t*)(dst->op_params))[4]; + const int32_t rp2 = ((const int32_t*)(dst->op_params))[5]; + const int32_t lp3 = ((const int32_t*)(dst->op_params))[6]; + const int32_t rp3 = ((const int32_t*)(dst->op_params))[7]; + const int32_t circular = ((const int32_t*)(dst->op_params))[8]; + + pad_f32_cuda(src0_d, dst_d, + lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, + dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool)circular, stream); +} #include "pad.cuh" #include @@ -7,21 +110,11 @@ __device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { return (coord + size) % size; } -static __global__ void pad_f32(const float * src, - float * dst, - const int lp0, - const int rp0, - const int lp1, - const int rp1, - const int lp2, - const int rp2, - const int lp3, - const int rp3, - const int ne0, - const int ne1, - const int ne2, - const int ne3, - const bool circular) { +static __global__ void pad_f32(const float * src, float * dst, + const int lp0, const int rp0, const int lp1, const int rp1, + const int lp2, const int rp2, const int lp3, const int rp3, + const int ne0, const int ne1, const int ne2, const int ne3, + const bool circular) { // blockIdx.z: i3*ne2+i2 // blockIdx.y: i1 // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE @@ -74,22 +167,12 @@ static __global__ void pad_f32(const float * src, } } -static void pad_f32_cuda(const float * src, - float * dst, - const int lp0, - const int rp0, - const int lp1, - const int rp1, - const int lp2, - const int rp2, - const int lp3, - const int rp3, - const int ne0, - const int ne1, - const int ne2, - const int ne3, - const bool circular, - cudaStream_t stream) { + +static void pad_f32_cuda(const float * src, float * dst, + const int lp0, const int rp0, const int lp1, const int rp1, + const int lp2, const int rp2, const int lp3, const int rp3, + const int ne0, const int ne1, const int ne2, const int ne3, + const bool circular, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2 * ne3); pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, @@ -117,5 +200,5 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int32_t circular = ((const int32_t *) (dst->op_params))[8]; pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], - (bool)circular, stream); + (bool) circular, stream); } From 1721a2b49b39dee0272940d62be28f5de4a40836 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:06:22 -0800 Subject: [PATCH 26/39] make circular bool --- ggml/src/ggml-cuda/pad.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 1d6ca27e2bc..d4ec1472370 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -71,7 +71,7 @@ static void pad_f32_cuda(const float * src, float * dst, const int lp0, const int rp0, const int lp1, const int rp1, const int lp2, const int rp2, const int lp3, const int rp3, const int ne0, const int ne1, const int ne2, const int ne3, - const int circular, cudaStream_t stream) { + const bool circular, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2*ne3); pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); From 89559a11eb4b63ceaf0a9c92440606426c3d677b Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:07:54 -0800 Subject: [PATCH 27/39] duplicate again --- ggml/src/ggml-cuda/pad.cu | 105 +------------------------------------- 1 file changed, 1 insertion(+), 104 deletions(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index d4ec1472370..a183e28aac1 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -1,106 +1,3 @@ -#include - -#include "pad.cuh" - - -__device__ __forceinline__ int64_t wrap_around(int64_t coord, int64_t size) { - // + size ensures negatives are handled properly - return (coord + size) % size; -} - -static __global__ void pad_f32(const float * src, float * dst, - const int lp0, const int rp0, const int lp1, const int rp1, - const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3, - const bool circular) { - // blockIdx.z: i3*ne2+i2 - // blockIdx.y: i1 - // blockIDx.x: i0 / CUDA_PAD_BLOCK_SIZE - // gridDim.y: ne1 - int i0 = threadIdx.x + blockIdx.x * blockDim.x; - int i1 = blockIdx.y; - int i2 = blockIdx.z % ne2; - int i3 = blockIdx.z / ne2; - - if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) { - return; - } - - const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; - - if (!circular) { - // operation - if ((i0 >= lp0 && i0 < ne0 - rp0) && - (i1 >= lp1 && i1 < ne1 - rp1) && - (i2 >= lp2 && i2 < ne2 - rp2) && - (i3 >= lp3 && i3 < ne3 - rp3)) { - const int64_t i00 = i0 - lp0; - const int64_t i01 = i1 - lp1; - const int64_t i02 = i2 - lp2; - const int64_t i03 = i3 - lp3; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne00 = ne0 - lp0 - rp0; - - const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } else { - dst[dst_idx] = 0.0f; - } - } - // circular means on a torus, so x and y wrap around - else { - const int64_t ne00 = ne0 - lp0 - rp0; - const int64_t ne01 = ne1 - lp1 - rp1; - const int64_t ne02 = ne2 - lp2 - rp2; - const int64_t ne03 = ne3 - lp3 - rp3; - - const int64_t i00 = wrap_around(i0 - lp0, ne00); - const int64_t i01 = wrap_around(i1 - lp1, ne01); - const int64_t i02 = wrap_around(i2 - lp2, ne02); - const int64_t i03 = wrap_around(i3 - lp3, ne03); - - const int64_t src_idx = i03*(ne00*ne01*ne02) + i02*(ne00*ne01) + i01*ne00 + i00; - - dst[dst_idx] = src[src_idx]; - } -} - -static void pad_f32_cuda(const float * src, float * dst, - const int lp0, const int rp0, const int lp1, const int rp1, - const int lp2, const int rp2, const int lp3, const int rp3, - const int ne0, const int ne1, const int ne2, const int ne3, - const bool circular, cudaStream_t stream) { - int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; - dim3 gridDim(num_blocks, ne1, ne2*ne3); - pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); -} - -void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - const float * src0_d = (const float *)src0->data; - float * dst_d = (float *)dst->data; - cudaStream_t stream = ctx.stream(); - - GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(ggml_is_contiguous(src0)); - - const int32_t lp0 = ((const int32_t*)(dst->op_params))[0]; - const int32_t rp0 = ((const int32_t*)(dst->op_params))[1]; - const int32_t lp1 = ((const int32_t*)(dst->op_params))[2]; - const int32_t rp1 = ((const int32_t*)(dst->op_params))[3]; - const int32_t lp2 = ((const int32_t*)(dst->op_params))[4]; - const int32_t rp2 = ((const int32_t*)(dst->op_params))[5]; - const int32_t lp3 = ((const int32_t*)(dst->op_params))[6]; - const int32_t rp3 = ((const int32_t*)(dst->op_params))[7]; - const int32_t circular = ((const int32_t*)(dst->op_params))[8]; - - pad_f32_cuda(src0_d, dst_d, - lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, - dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool)circular, stream); -} #include "pad.cuh" #include @@ -201,4 +98,4 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool) circular, stream); -} +} \ No newline at end of file From af56c820eaec4d4622508a59ebbc4b56733d9a97 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:10:24 -0800 Subject: [PATCH 28/39] rename vulkan to wrap around --- ggml/src/ggml-vulkan/vulkan-shaders/pad.comp | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp index 7edc524490e..5abd2f6fc69 100644 --- a/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp +++ b/ggml/src/ggml-vulkan/vulkan-shaders/pad.comp @@ -19,7 +19,7 @@ layout (push_constant) uniform parameter uint get_aoffset() { return p.misalign_offsets >> 16; } uint get_doffset() { return p.misalign_offsets & 0xFFFF; } -uint wrap_coord(int coord, uint size) { +uint wrap_around(int coord, uint size) { return (uint(coord + int(size))) % size; // add size to avoid issues with negative } @@ -46,10 +46,10 @@ void main() { const uint dst_idx = i3*p.nb13 + i2*p.nb12 + i1*p.nb11 + i0*p.nb10; if (p.circular != 0u) { - const uint ci0 = wrap_coord(int(i0) - int(p.lp0), p.ne00); - const uint ci1 = wrap_coord(int(i1) - int(p.lp1), p.ne01); - const uint ci2 = wrap_coord(int(i2) - int(p.lp2), p.ne02); - const uint ci3 = wrap_coord(int(i3) - int(p.lp3), p.ne03); + const uint ci0 = wrap_around(int(i0) - int(p.lp0), p.ne00); + const uint ci1 = wrap_around(int(i1) - int(p.lp1), p.ne01); + const uint ci2 = wrap_around(int(i2) - int(p.lp2), p.ne02); + const uint ci3 = wrap_around(int(i3) - int(p.lp3), p.ne03); const uint circular_src_idx = ci3*p.nb03 + ci2*p.nb02 + ci1*p.nb01 + ci0*p.nb00; data_d[get_doffset() + dst_idx] = D_TYPE(data_a[get_aoffset() + circular_src_idx]); } else { From ec892eceddef3fc28696473e6b00a3e660dea975 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:13:40 -0800 Subject: [PATCH 29/39] Don't need indent --- ggml/src/ggml-cuda/pad.cu | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index a183e28aac1..2355335dd9d 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -72,8 +72,7 @@ static void pad_f32_cuda(const float * src, float * dst, const bool circular, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2 * ne3); - pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, - ne2, ne3, circular); + pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); } void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { From f295d28425f604fef15cdaa90ee21a648c29ab88 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:25:57 -0800 Subject: [PATCH 30/39] moved to const expr --- ggml/src/ggml-cpu/ops.cpp | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index c72789b10ec..9064fe08041 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7537,6 +7537,7 @@ void ggml_compute_forward_upscale( // ggml_compute_forward_pad +template static void ggml_compute_forward_pad_f32( const ggml_compute_params * params, ggml_tensor * dst) { @@ -7560,8 +7561,6 @@ static void ggml_compute_forward_pad_f32( const int32_t rp2 = ggml_get_op_params_i32(dst, 5); const int32_t lp3 = ggml_get_op_params_i32(dst, 6); const int32_t rp3 = ggml_get_op_params_i32(dst, 7); - const bool circular = (bool)ggml_get_op_params_i32(dst, 8); - // TODO: optimize @@ -7570,7 +7569,7 @@ static void ggml_compute_forward_pad_f32( for (int64_t i0 = 0; i0 < ne0; ++i0) { for (int64_t i3 = 0; i3 < ne3; ++i3) { // circular means wrap around on a torus, so x and y loop around - if (circular) { + if constexpr (CIRCULAR) { const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); @@ -7585,8 +7584,7 @@ static void ggml_compute_forward_pad_f32( const float * src_ptr = (const float *)((char *) src0->data + src_idx); dst_ptr[dst_idx] = *src_ptr; - } - else { + } else { const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; if ((i0 >= lp0 && i0 < ne0 - rp0) \ && (i1 >= lp1 && i1 < ne1 - rp1) \ @@ -7609,13 +7607,16 @@ static void ggml_compute_forward_pad_f32( void ggml_compute_forward_pad( const ggml_compute_params * params, ggml_tensor * dst) { - const ggml_tensor * src0 = dst->src[0]; - + const bool circular = (bool) ggml_get_op_params_i32(dst, 8); switch (src0->type) { case GGML_TYPE_F32: { - ggml_compute_forward_pad_f32(params, dst); + if (circular) { + ggml_compute_forward_pad_f32(params, dst); + } else { + ggml_compute_forward_pad_f32(params, dst); + } } break; default: { From bb8ecadf994670e5e4b2d979d33ff1f5ab073f75 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:30:22 -0800 Subject: [PATCH 31/39] removed unneded extra line break --- ggml/src/ggml-cuda/pad.cu | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 2355335dd9d..8596f654c46 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -95,6 +95,5 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int32_t rp3 = ((const int32_t *) (dst->op_params))[7]; const int32_t circular = ((const int32_t *) (dst->op_params))[8]; - pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], - (bool) circular, stream); + pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool) circular, stream); } \ No newline at end of file From 4d208564cd2db7f5a89b14e8126aab9c56be7cc3 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 19:32:22 -0800 Subject: [PATCH 32/39] More readable method calls --- ggml/src/ggml-cuda/pad.cu | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 8596f654c46..14827e2686a 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -72,7 +72,9 @@ static void pad_f32_cuda(const float * src, float * dst, const bool circular, cudaStream_t stream) { int num_blocks = (ne0 + CUDA_PAD_BLOCK_SIZE - 1) / CUDA_PAD_BLOCK_SIZE; dim3 gridDim(num_blocks, ne1, ne2 * ne3); - pad_f32<<>>(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1, ne2, ne3, circular); + pad_f32<<>>(src, dst, + lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, + ne0, ne1, ne2, ne3, circular); } void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -95,5 +97,8 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { const int32_t rp3 = ((const int32_t *) (dst->op_params))[7]; const int32_t circular = ((const int32_t *) (dst->op_params))[8]; - pad_f32_cuda(src0_d, dst_d, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool) circular, stream); + pad_f32_cuda(src0_d, dst_d, + lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, + dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], + (bool) circular, stream); } \ No newline at end of file From b850c04292e99bceb88b5f74ce61f6a8471dc263 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 21:11:05 -0800 Subject: [PATCH 33/39] Minor wording changes --- ggml/src/ggml-cpu/ops.cpp | 4 ++-- ggml/src/ggml-cuda/pad.cu | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-cpu/ops.cpp b/ggml/src/ggml-cpu/ops.cpp index 9064fe08041..f0e1ef32eec 100644 --- a/ggml/src/ggml-cpu/ops.cpp +++ b/ggml/src/ggml-cpu/ops.cpp @@ -7537,7 +7537,7 @@ void ggml_compute_forward_upscale( // ggml_compute_forward_pad -template +template static void ggml_compute_forward_pad_f32( const ggml_compute_params * params, ggml_tensor * dst) { @@ -7569,7 +7569,7 @@ static void ggml_compute_forward_pad_f32( for (int64_t i0 = 0; i0 < ne0; ++i0) { for (int64_t i3 = 0; i3 < ne3; ++i3) { // circular means wrap around on a torus, so x and y loop around - if constexpr (CIRCULAR) { + if constexpr (circular_t) { const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0; const int64_t src_i0 = ggml_wrap_around(i0 - lp0, ne00); const int64_t src_i1 = ggml_wrap_around(i1 - lp1, ne01); diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index 14827e2686a..de1c8759290 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -28,7 +28,6 @@ static __global__ void pad_f32(const float * src, float * dst, const int64_t dst_idx = i3 * (ne0 * ne1 * ne2) + i2 * (ne0 * ne1) + i1 * ne0 + i0; if (!circular) { - // operation if ((i0 >= lp0 && i0 < ne0 - rp0) && (i1 >= lp1 && i1 < ne1 - rp1) && (i2 >= lp2 && i2 < ne2 - rp2) && (i3 >= lp3 && i3 < ne3 - rp3)) { const int64_t i00 = i0 - lp0; From 801cd847e96d6424974ed77f977f76ed2de1a4a5 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Sat, 29 Nov 2025 21:34:26 -0800 Subject: [PATCH 34/39] Added final newline --- ggml/src/ggml-cuda/pad.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml/src/ggml-cuda/pad.cu b/ggml/src/ggml-cuda/pad.cu index de1c8759290..660c192e48a 100644 --- a/ggml/src/ggml-cuda/pad.cu +++ b/ggml/src/ggml-cuda/pad.cu @@ -100,4 +100,4 @@ void ggml_cuda_op_pad(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], (bool) circular, stream); -} \ No newline at end of file +} From 7fd9ea3fd8c4edbab6dcdb8c1f64d4de40749b4c Mon Sep 17 00:00:00 2001 From: Phylliida Dev Date: Mon, 1 Dec 2025 16:02:29 -0800 Subject: [PATCH 35/39] Update ggml/include/ggml.h Co-authored-by: Georgi Gerganov --- ggml/include/ggml.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 4a7d1e61e79..9d1e38fd9dc 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -2195,10 +2195,10 @@ extern "C" { GGML_API struct ggml_tensor * ggml_pad_circular( struct ggml_context * ctx, struct ggml_tensor * a, - int p0, - int p1, - int p2, - int p3); + int p0, + int p1, + int p2, + int p3); GGML_API struct ggml_tensor * ggml_pad_ext( struct ggml_context * ctx, From b29544d1bba51631f92050e1e5e609fead42b65b Mon Sep 17 00:00:00 2001 From: Phylliida Dev Date: Mon, 1 Dec 2025 16:02:43 -0800 Subject: [PATCH 36/39] Update ggml/include/ggml.h Co-authored-by: Georgi Gerganov --- ggml/include/ggml.h | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 9d1e38fd9dc..dc7160c40a1 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -2217,15 +2217,14 @@ extern "C" { GGML_API struct ggml_tensor * ggml_pad_ext_circular( struct ggml_context * ctx, struct ggml_tensor * a, - int lp0, - int rp0, - int lp1, - int rp1, - int lp2, - int rp2, - int lp3, - int rp3 - ); + int lp0, + int rp0, + int lp1, + int rp1, + int lp2, + int rp2, + int lp3, + int rp3); // pad each dimension with reflection: [a, b, c, d] -> [b, a, b, c, d, c] GGML_API struct ggml_tensor * ggml_pad_reflect_1d( From 2f3d4ba6035f36af9c4121d43789300db8653d67 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Mon, 1 Dec 2025 16:07:44 -0800 Subject: [PATCH 37/39] Added circular pad ext tests --- tests/test-backend-ops.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 0a8a3f0255a..ae534608995 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -7741,8 +7741,10 @@ static std::vector> make_test_cases_eval() { test_cases.emplace_back(new test_solve_tri(GGML_TYPE_F32, { 100, 100, 4, 4 }, { 41, 100, 4, 4 })); for (bool v : {false, true}) { - test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {512, 512, 1, 1}, 0, 1, 0, 1, 0, 0, 0, 0, v)); - test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {11, 22, 33, 44}, 1, 2, 3, 4, 5, 6, 7, 8, v)); + for (bool circular : {false, true}) { + test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {512, 512, 1, 1}, 0, 1, 0, 1, 0, 0, 0, 0, v, circular)); + test_cases.emplace_back(new test_pad_ext(GGML_TYPE_F32, {11, 22, 33, 44}, 1, 2, 3, 4, 5, 6, 7, 8, v, circular)); + } } for (int hsk : { 40, 64, 72, 80, 96, 128, 192, 256, 576 }) { From 624433d2722c3d4fe9bab5ef3e22a4e398b077f1 Mon Sep 17 00:00:00 2001 From: Phylliida Date: Thu, 4 Dec 2025 13:30:48 -0800 Subject: [PATCH 38/39] Gate non circular pad devices --- ggml/src/ggml-cann/ggml-cann.cpp | 3 ++- ggml/src/ggml-metal/ggml-metal-device.m | 5 +++++ ggml/src/ggml-opencl/ggml-opencl.cpp | 3 +++ ggml/src/ggml-sycl/ggml-sycl.cpp | 3 +++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index 8995a5c121f..740a718fcf6 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -2554,7 +2554,6 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, const ggml_ten case GGML_OP_ARGSORT: case GGML_OP_ACC: case GGML_OP_GROUP_NORM: - case GGML_OP_PAD: case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: @@ -2566,6 +2565,8 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, const ggml_ten case GGML_OP_PAD_REFLECT_1D: case GGML_OP_COUNT_EQUAL: return true; + case GGML_OP_PAD: + return ggml_get_op_params_i32(op, 8) == 0; case GGML_OP_OUT_PROD: { switch (op->src[0]->type) { diff --git a/ggml/src/ggml-metal/ggml-metal-device.m b/ggml/src/ggml-metal/ggml-metal-device.m index 09b1b503118..5ba128e3e04 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.m +++ b/ggml/src/ggml-metal/ggml-metal-device.m @@ -898,6 +898,11 @@ bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_te case GGML_OP_POOL_2D: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_PAD: + // TODO: add circular padding support https://github.com/ggml-org/llama.cpp/pull/16985 + if (ggml_get_op_params_i32(op, 8) != 0) { + return false; + } + return (ggml_get_op_params_i32(op, 0) == 0) && (ggml_get_op_params_i32(op, 2) == 0) && (ggml_get_op_params_i32(op, 4) == 0) && (ggml_get_op_params_i32(op, 6) == 0); case GGML_OP_PAD_REFLECT_1D: diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 2319f7a9e25..15ee4bbee1f 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -2997,6 +2997,9 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te case GGML_OP_REPEAT: return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; // Assuming F32 for now, can be expanded case GGML_OP_PAD: + if (ggml_get_op_params_i32(op, 8) != 0) { + return false; + } return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; case GGML_OP_UPSCALE: { ggml_scale_mode mode = (ggml_scale_mode)(ggml_get_op_params_i32(op, 0) & 0xFF); diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 3f1bdfb9f1b..8cf895f21f4 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4607,6 +4607,9 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_ACC: return true; case GGML_OP_PAD: + if (ggml_get_op_params_i32(op, 8) != 0) { + return false; + } return ggml_is_contiguous(op->src[0]); case GGML_OP_LEAKY_RELU: case GGML_OP_TIMESTEP_EMBEDDING: From 8515811c502b0ef64975af1586a996e51200489b Mon Sep 17 00:00:00 2001 From: Phylliida Date: Thu, 4 Dec 2025 13:38:12 -0800 Subject: [PATCH 39/39] Cleaned gating of non-circular pad devices --- ggml/src/ggml-cann/ggml-cann.cpp | 5 +++-- ggml/src/ggml-metal/ggml-metal-device.m | 2 +- ggml/src/ggml-opencl/ggml-opencl.cpp | 1 + ggml/src/ggml-sycl/ggml-sycl.cpp | 1 + 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ggml/src/ggml-cann/ggml-cann.cpp b/ggml/src/ggml-cann/ggml-cann.cpp index 740a718fcf6..45a01371f18 100644 --- a/ggml/src/ggml-cann/ggml-cann.cpp +++ b/ggml/src/ggml-cann/ggml-cann.cpp @@ -2554,6 +2554,9 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, const ggml_ten case GGML_OP_ARGSORT: case GGML_OP_ACC: case GGML_OP_GROUP_NORM: + case GGML_OP_PAD: + // TODO: add circular padding support for cann, see https://github.com/ggml-org/llama.cpp/pull/16985 + return ggml_get_op_params_i32(op, 8) == 0; case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: @@ -2565,8 +2568,6 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev, const ggml_ten case GGML_OP_PAD_REFLECT_1D: case GGML_OP_COUNT_EQUAL: return true; - case GGML_OP_PAD: - return ggml_get_op_params_i32(op, 8) == 0; case GGML_OP_OUT_PROD: { switch (op->src[0]->type) { diff --git a/ggml/src/ggml-metal/ggml-metal-device.m b/ggml/src/ggml-metal/ggml-metal-device.m index 5ba128e3e04..ff2d295953a 100644 --- a/ggml/src/ggml-metal/ggml-metal-device.m +++ b/ggml/src/ggml-metal/ggml-metal-device.m @@ -898,7 +898,7 @@ bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_te case GGML_OP_POOL_2D: return op->src[0]->type == GGML_TYPE_F32; case GGML_OP_PAD: - // TODO: add circular padding support https://github.com/ggml-org/llama.cpp/pull/16985 + // TODO: add circular padding support for metal, see https://github.com/ggml-org/llama.cpp/pull/16985 if (ggml_get_op_params_i32(op, 8) != 0) { return false; } diff --git a/ggml/src/ggml-opencl/ggml-opencl.cpp b/ggml/src/ggml-opencl/ggml-opencl.cpp index 15ee4bbee1f..ec80857675a 100644 --- a/ggml/src/ggml-opencl/ggml-opencl.cpp +++ b/ggml/src/ggml-opencl/ggml-opencl.cpp @@ -2997,6 +2997,7 @@ static bool ggml_opencl_supports_op(ggml_backend_dev_t dev, const struct ggml_te case GGML_OP_REPEAT: return op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32; // Assuming F32 for now, can be expanded case GGML_OP_PAD: + // TODO: add circular padding support for opencl, see https://github.com/ggml-org/llama.cpp/pull/16985 if (ggml_get_op_params_i32(op, 8) != 0) { return false; } diff --git a/ggml/src/ggml-sycl/ggml-sycl.cpp b/ggml/src/ggml-sycl/ggml-sycl.cpp index 8cf895f21f4..44edc3fd1a2 100644 --- a/ggml/src/ggml-sycl/ggml-sycl.cpp +++ b/ggml/src/ggml-sycl/ggml-sycl.cpp @@ -4607,6 +4607,7 @@ static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const g case GGML_OP_ACC: return true; case GGML_OP_PAD: + // TODO: add circular padding support for syscl, see https://github.com/ggml-org/llama.cpp/pull/16985 if (ggml_get_op_params_i32(op, 8) != 0) { return false; }