Spaces:
Sleeping
Sleeping
ggml : add ggml_scale_bias (llama/14417)
Browse files* ggml : add ggml_scale_bias
* ggml_vec_mad1_f32
* add more simd
* add CUDA
* sycl
* vulkan
* cann (placeholder)
* opencl
* will this fix cpu?
* fix cuda
* suggestions from coderabbit
* fix cann compile error
* vDSP_vsmsa
* rm __ARM_FEATURE_SVE
* use memcpy for op params
* make code looks more consistent
* use scalar for __ARM_FEATURE_SVE
* add x param to ggml_vec_mad1_f32
- ggml/include/ggml.h +13 -0
- ggml/src/ggml-cann/ggml-cann.cpp +4 -1
- ggml/src/ggml-cpu/ops.cpp +20 -8
- ggml/src/ggml-cpu/vec.h +39 -0
- ggml/src/ggml-cuda/scale.cu +8 -6
- ggml/src/ggml-metal/ggml-metal.m +4 -1
- ggml/src/ggml-metal/ggml-metal.metal +4 -2
- ggml/src/ggml-opencl/ggml-opencl.cpp +4 -1
- ggml/src/ggml-opencl/kernels/scale.cl +3 -2
- ggml/src/ggml-sycl/ggml-sycl.cpp +8 -6
- ggml/src/ggml-vulkan/ggml-vulkan.cpp +1 -1
- ggml/src/ggml-vulkan/vulkan-shaders/scale.comp +1 -1
- ggml/src/ggml.c +23 -5
ggml/include/ggml.h
CHANGED
|
@@ -1294,6 +1294,19 @@ extern "C" {
|
|
| 1294 |
struct ggml_tensor * a,
|
| 1295 |
float s);
|
| 1296 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1297 |
// b -> view(a,offset,nb1,nb2,3), return modified a
|
| 1298 |
GGML_API struct ggml_tensor * ggml_set(
|
| 1299 |
struct ggml_context * ctx,
|
|
|
|
| 1294 |
struct ggml_tensor * a,
|
| 1295 |
float s);
|
| 1296 |
|
| 1297 |
+
// x = s * a + b
|
| 1298 |
+
GGML_API struct ggml_tensor * ggml_scale_bias(
|
| 1299 |
+
struct ggml_context * ctx,
|
| 1300 |
+
struct ggml_tensor * a,
|
| 1301 |
+
float s,
|
| 1302 |
+
float b);
|
| 1303 |
+
|
| 1304 |
+
GGML_API struct ggml_tensor * ggml_scale_bias_inplace(
|
| 1305 |
+
struct ggml_context * ctx,
|
| 1306 |
+
struct ggml_tensor * a,
|
| 1307 |
+
float s,
|
| 1308 |
+
float b);
|
| 1309 |
+
|
| 1310 |
// b -> view(a,offset,nb1,nb2,3), return modified a
|
| 1311 |
GGML_API struct ggml_tensor * ggml_set(
|
| 1312 |
struct ggml_context * ctx,
|
ggml/src/ggml-cann/ggml-cann.cpp
CHANGED
|
@@ -2188,7 +2188,6 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev,
|
|
| 2188 |
case GGML_OP_MUL:
|
| 2189 |
case GGML_OP_DIV:
|
| 2190 |
case GGML_OP_RMS_NORM:
|
| 2191 |
-
case GGML_OP_SCALE:
|
| 2192 |
case GGML_OP_SQR:
|
| 2193 |
case GGML_OP_SQRT:
|
| 2194 |
case GGML_OP_CLAMP:
|
|
@@ -2210,6 +2209,10 @@ static bool ggml_backend_cann_supports_op(ggml_backend_dev_t dev,
|
|
| 2210 |
case GGML_OP_PAD_REFLECT_1D:
|
| 2211 |
case GGML_OP_COUNT_EQUAL:
|
| 2212 |
return true;
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2213 |
case GGML_OP_SOFT_MAX:
|
| 2214 |
// TODO: support broadcast
|
| 2215 |
// ref: https://github.com/ggml-org/llama.cpp/pull/14435
|
|
|
|
| 2188 |
case GGML_OP_MUL:
|
| 2189 |
case GGML_OP_DIV:
|
| 2190 |
case GGML_OP_RMS_NORM:
|
|
|
|
| 2191 |
case GGML_OP_SQR:
|
| 2192 |
case GGML_OP_SQRT:
|
| 2193 |
case GGML_OP_CLAMP:
|
|
|
|
| 2209 |
case GGML_OP_PAD_REFLECT_1D:
|
| 2210 |
case GGML_OP_COUNT_EQUAL:
|
| 2211 |
return true;
|
| 2212 |
+
case GGML_OP_SCALE:
|
| 2213 |
+
float bias;
|
| 2214 |
+
memcpy(&bias, (float*)op->op_params + 1, sizeof(float));
|
| 2215 |
+
return bias == 0.0f; // TODO: support bias != 0.0f
|
| 2216 |
case GGML_OP_SOFT_MAX:
|
| 2217 |
// TODO: support broadcast
|
| 2218 |
// ref: https://github.com/ggml-org/llama.cpp/pull/14435
|
ggml/src/ggml-cpu/ops.cpp
CHANGED
|
@@ -4643,9 +4643,11 @@ static void ggml_compute_forward_scale_f32(
|
|
| 4643 |
GGML_ASSERT(ggml_is_contiguous(dst));
|
| 4644 |
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
| 4645 |
|
| 4646 |
-
// scale factor
|
| 4647 |
-
float
|
| 4648 |
-
|
|
|
|
|
|
|
| 4649 |
|
| 4650 |
const int ith = params->ith;
|
| 4651 |
const int nth = params->nth;
|
|
@@ -4664,12 +4666,22 @@ static void ggml_compute_forward_scale_f32(
|
|
| 4664 |
|
| 4665 |
const size_t nb1 = dst->nb[1];
|
| 4666 |
|
| 4667 |
-
|
| 4668 |
-
|
| 4669 |
-
|
| 4670 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4671 |
}
|
| 4672 |
-
ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), v);
|
| 4673 |
}
|
| 4674 |
}
|
| 4675 |
|
|
|
|
| 4643 |
GGML_ASSERT(ggml_is_contiguous(dst));
|
| 4644 |
GGML_ASSERT(ggml_are_same_shape(src0, dst));
|
| 4645 |
|
| 4646 |
+
float s; // scale factor
|
| 4647 |
+
float b; // bias
|
| 4648 |
+
|
| 4649 |
+
memcpy(&s, (float *) dst->op_params + 0, sizeof(float));
|
| 4650 |
+
memcpy(&b, (float *) dst->op_params + 1, sizeof(float));
|
| 4651 |
|
| 4652 |
const int ith = params->ith;
|
| 4653 |
const int nth = params->nth;
|
|
|
|
| 4666 |
|
| 4667 |
const size_t nb1 = dst->nb[1];
|
| 4668 |
|
| 4669 |
+
if (b == 0.0f) {
|
| 4670 |
+
for (int i1 = ir0; i1 < ir1; i1++) {
|
| 4671 |
+
if (dst->data != src0->data) {
|
| 4672 |
+
// src0 is same shape as dst => same indices
|
| 4673 |
+
// TODO: add x parameter to ggml_vec_scale_f32 and remove this memcpy
|
| 4674 |
+
memcpy((char *)dst->data + i1*nb1, (char *)src0->data + i1*nb01, nc * sizeof(float));
|
| 4675 |
+
}
|
| 4676 |
+
ggml_vec_scale_f32(nc, (float *) ((char *) dst->data + i1*nb1), s);
|
| 4677 |
+
}
|
| 4678 |
+
} else {
|
| 4679 |
+
for (int i1 = ir0; i1 < ir1; i1++) {
|
| 4680 |
+
ggml_vec_mad1_f32(nc,
|
| 4681 |
+
(float *) ((char *) dst->data + i1*nb1),
|
| 4682 |
+
(float *) ((char *) src0->data + i1*nb1),
|
| 4683 |
+
s, b);
|
| 4684 |
}
|
|
|
|
| 4685 |
}
|
| 4686 |
}
|
| 4687 |
|
ggml/src/ggml-cpu/vec.h
CHANGED
|
@@ -351,6 +351,45 @@ inline static void ggml_vec_mad_f32_unroll(const int n, const int xs, const int
|
|
| 351 |
#endif
|
| 352 |
}
|
| 353 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 354 |
//inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
|
| 355 |
inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
|
| 356 |
#if defined(GGML_USE_ACCELERATE)
|
|
|
|
| 351 |
#endif
|
| 352 |
}
|
| 353 |
|
| 354 |
+
inline static void ggml_vec_mad1_f32(const int n, float * y, const float * x, const float s, const float b) {
|
| 355 |
+
#if defined(GGML_USE_ACCELERATE)
|
| 356 |
+
vDSP_vsmsa(x, 1, &s, &b, y, 1, n);
|
| 357 |
+
#elif defined(GGML_SIMD)
|
| 358 |
+
#if defined(__ARM_FEATURE_SVE)
|
| 359 |
+
// scalar ; TODO: Write SVE code
|
| 360 |
+
for (int i = 0; i < n; ++i) {
|
| 361 |
+
y[i] = x[i]*s + b;
|
| 362 |
+
}
|
| 363 |
+
#else
|
| 364 |
+
const int np = (n & ~(GGML_F32_STEP - 1));
|
| 365 |
+
|
| 366 |
+
GGML_F32_VEC vs = GGML_F32_VEC_SET1(s);
|
| 367 |
+
GGML_F32_VEC vb = GGML_F32_VEC_SET1(b);
|
| 368 |
+
|
| 369 |
+
GGML_F32_VEC ay[GGML_F32_ARR];
|
| 370 |
+
|
| 371 |
+
for (int i = 0; i < np; i += GGML_F32_STEP) {
|
| 372 |
+
for (int j = 0; j < GGML_F32_ARR; j++) {
|
| 373 |
+
ay[j] = GGML_F32_VEC_LOAD(x + i + j*GGML_F32_EPR);
|
| 374 |
+
ay[j] = GGML_F32_VEC_FMA(ay[j], vs, vb);
|
| 375 |
+
|
| 376 |
+
GGML_F32_VEC_STORE(y + i + j*GGML_F32_EPR, ay[j]);
|
| 377 |
+
}
|
| 378 |
+
}
|
| 379 |
+
|
| 380 |
+
// leftovers
|
| 381 |
+
for (int i = np; i < n; ++i) {
|
| 382 |
+
y[i] = x[i]*s + b;
|
| 383 |
+
}
|
| 384 |
+
#endif
|
| 385 |
+
#else
|
| 386 |
+
// scalar
|
| 387 |
+
for (int i = 0; i < n; ++i) {
|
| 388 |
+
y[i] = x[i]*s + b;
|
| 389 |
+
}
|
| 390 |
+
#endif
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
//inline static void ggml_vec_scale_f32(const int n, float * y, const float v) { for (int i = 0; i < n; ++i) y[i] *= v; }
|
| 394 |
inline static void ggml_vec_scale_f32(const int n, float * y, const float v) {
|
| 395 |
#if defined(GGML_USE_ACCELERATE)
|
ggml/src/ggml-cuda/scale.cu
CHANGED
|
@@ -1,18 +1,18 @@
|
|
| 1 |
#include "scale.cuh"
|
| 2 |
|
| 3 |
-
static __global__ void scale_f32(const float * x, float * dst, const float scale, const int k) {
|
| 4 |
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
| 5 |
|
| 6 |
if (i >= k) {
|
| 7 |
return;
|
| 8 |
}
|
| 9 |
|
| 10 |
-
dst[i] = scale * x[i];
|
| 11 |
}
|
| 12 |
|
| 13 |
-
static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) {
|
| 14 |
const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE;
|
| 15 |
-
scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0, stream>>>(x, dst, scale, k);
|
| 16 |
}
|
| 17 |
|
| 18 |
void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
@@ -25,7 +25,9 @@ void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
| 25 |
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
| 26 |
|
| 27 |
float scale;
|
| 28 |
-
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
scale_f32_cuda(src0_d, dst_d, scale, ggml_nelements(src0), stream);
|
| 31 |
}
|
|
|
|
| 1 |
#include "scale.cuh"
|
| 2 |
|
| 3 |
+
static __global__ void scale_f32(const float * x, float * dst, const float scale, const float bias, const int k) {
|
| 4 |
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
| 5 |
|
| 6 |
if (i >= k) {
|
| 7 |
return;
|
| 8 |
}
|
| 9 |
|
| 10 |
+
dst[i] = scale * x[i] + bias;
|
| 11 |
}
|
| 12 |
|
| 13 |
+
static void scale_f32_cuda(const float * x, float * dst, const float scale, const float bias, const int k, cudaStream_t stream) {
|
| 14 |
const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE;
|
| 15 |
+
scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0, stream>>>(x, dst, scale, bias, k);
|
| 16 |
}
|
| 17 |
|
| 18 |
void ggml_cuda_op_scale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
|
|
| 25 |
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
| 26 |
|
| 27 |
float scale;
|
| 28 |
+
float bias;
|
| 29 |
+
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
| 30 |
+
memcpy(&bias, (float *) dst->op_params + 1, sizeof(float));
|
| 31 |
|
| 32 |
+
scale_f32_cuda(src0_d, dst_d, scale, bias, ggml_nelements(src0), stream);
|
| 33 |
}
|
ggml/src/ggml-metal/ggml-metal.m
CHANGED
|
@@ -2256,7 +2256,9 @@ static bool ggml_metal_encode_node(
|
|
| 2256 |
GGML_ASSERT(ggml_is_contiguous(src0));
|
| 2257 |
|
| 2258 |
float scale;
|
| 2259 |
-
|
|
|
|
|
|
|
| 2260 |
|
| 2261 |
int64_t n = ggml_nelements(dst);
|
| 2262 |
|
|
@@ -2273,6 +2275,7 @@ static bool ggml_metal_encode_node(
|
|
| 2273 |
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 2274 |
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 2275 |
[encoder setBytes:&scale length:sizeof(scale) atIndex:2];
|
|
|
|
| 2276 |
|
| 2277 |
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 2278 |
} break;
|
|
|
|
| 2256 |
GGML_ASSERT(ggml_is_contiguous(src0));
|
| 2257 |
|
| 2258 |
float scale;
|
| 2259 |
+
float bias;
|
| 2260 |
+
memcpy(&scale, ((const int32_t *) dst->op_params) + 0, sizeof(float));
|
| 2261 |
+
memcpy(&bias, ((const int32_t *) dst->op_params) + 1, sizeof(float));
|
| 2262 |
|
| 2263 |
int64_t n = ggml_nelements(dst);
|
| 2264 |
|
|
|
|
| 2275 |
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
| 2276 |
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
| 2277 |
[encoder setBytes:&scale length:sizeof(scale) atIndex:2];
|
| 2278 |
+
[encoder setBytes:&bias length:sizeof(bias) atIndex:3];
|
| 2279 |
|
| 2280 |
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
| 2281 |
} break;
|
ggml/src/ggml-metal/ggml-metal.metal
CHANGED
|
@@ -1014,16 +1014,18 @@ kernel void kernel_scale(
|
|
| 1014 |
device const float * src0,
|
| 1015 |
device float * dst,
|
| 1016 |
constant float & scale,
|
|
|
|
| 1017 |
uint tpig[[thread_position_in_grid]]) {
|
| 1018 |
-
dst[tpig] = src0[tpig] * scale;
|
| 1019 |
}
|
| 1020 |
|
| 1021 |
kernel void kernel_scale_4(
|
| 1022 |
device const float4 * src0,
|
| 1023 |
device float4 * dst,
|
| 1024 |
constant float & scale,
|
|
|
|
| 1025 |
uint tpig[[thread_position_in_grid]]) {
|
| 1026 |
-
dst[tpig] = src0[tpig] * scale;
|
| 1027 |
}
|
| 1028 |
|
| 1029 |
kernel void kernel_clamp(
|
|
|
|
| 1014 |
device const float * src0,
|
| 1015 |
device float * dst,
|
| 1016 |
constant float & scale,
|
| 1017 |
+
constant float & bias,
|
| 1018 |
uint tpig[[thread_position_in_grid]]) {
|
| 1019 |
+
dst[tpig] = src0[tpig] * scale + bias;
|
| 1020 |
}
|
| 1021 |
|
| 1022 |
kernel void kernel_scale_4(
|
| 1023 |
device const float4 * src0,
|
| 1024 |
device float4 * dst,
|
| 1025 |
constant float & scale,
|
| 1026 |
+
constant float & bias,
|
| 1027 |
uint tpig[[thread_position_in_grid]]) {
|
| 1028 |
+
dst[tpig] = src0[tpig] * scale + bias;
|
| 1029 |
}
|
| 1030 |
|
| 1031 |
kernel void kernel_clamp(
|
ggml/src/ggml-opencl/ggml-opencl.cpp
CHANGED
|
@@ -5587,7 +5587,9 @@ static void ggml_cl_scale(ggml_backend_t backend, const ggml_tensor * src0, cons
|
|
| 5587 |
ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context;
|
| 5588 |
|
| 5589 |
float scale;
|
| 5590 |
-
|
|
|
|
|
|
|
| 5591 |
|
| 5592 |
ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra;
|
| 5593 |
ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra;
|
|
@@ -5602,6 +5604,7 @@ static void ggml_cl_scale(ggml_backend_t backend, const ggml_tensor * src0, cons
|
|
| 5602 |
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device));
|
| 5603 |
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd));
|
| 5604 |
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(float), &scale));
|
|
|
|
| 5605 |
|
| 5606 |
int n = ggml_nelements(dst)/4;
|
| 5607 |
|
|
|
|
| 5587 |
ggml_backend_opencl_context *backend_ctx = (ggml_backend_opencl_context *)backend->context;
|
| 5588 |
|
| 5589 |
float scale;
|
| 5590 |
+
float bias;
|
| 5591 |
+
memcpy(&scale, ((int32_t *) dst->op_params) + 0, sizeof(float));
|
| 5592 |
+
memcpy(&bias, ((int32_t *) dst->op_params) + 1, sizeof(float));
|
| 5593 |
|
| 5594 |
ggml_tensor_extra_cl * extra0 = (ggml_tensor_extra_cl *)src0->extra;
|
| 5595 |
ggml_tensor_extra_cl * extrad = (ggml_tensor_extra_cl *)dst->extra;
|
|
|
|
| 5604 |
CL_CHECK(clSetKernelArg(kernel, 2, sizeof(cl_mem), &extrad->data_device));
|
| 5605 |
CL_CHECK(clSetKernelArg(kernel, 3, sizeof(cl_ulong), &offsetd));
|
| 5606 |
CL_CHECK(clSetKernelArg(kernel, 4, sizeof(float), &scale));
|
| 5607 |
+
CL_CHECK(clSetKernelArg(kernel, 5, sizeof(float), &bias));
|
| 5608 |
|
| 5609 |
int n = ggml_nelements(dst)/4;
|
| 5610 |
|
ggml/src/ggml-opencl/kernels/scale.cl
CHANGED
|
@@ -8,9 +8,10 @@ kernel void kernel_scale(
|
|
| 8 |
ulong offset0,
|
| 9 |
global float4 * dst,
|
| 10 |
ulong offsetd,
|
| 11 |
-
float scale
|
|
|
|
| 12 |
) {
|
| 13 |
src0 = (global float4*)((global char*)src0 + offset0);
|
| 14 |
dst = (global float4*)((global char*)dst + offsetd);
|
| 15 |
-
dst[get_global_id(0)] = src0[get_global_id(0)] * scale;
|
| 16 |
}
|
|
|
|
| 8 |
ulong offset0,
|
| 9 |
global float4 * dst,
|
| 10 |
ulong offsetd,
|
| 11 |
+
float scale,
|
| 12 |
+
float bias
|
| 13 |
) {
|
| 14 |
src0 = (global float4*)((global char*)src0 + offset0);
|
| 15 |
dst = (global float4*)((global char*)dst + offsetd);
|
| 16 |
+
dst[get_global_id(0)] = src0[get_global_id(0)] * scale + bias;
|
| 17 |
}
|
ggml/src/ggml-sycl/ggml-sycl.cpp
CHANGED
|
@@ -1695,7 +1695,7 @@ static void diag_mask_inf_f32(const float * x, float * dst, const int ncols, con
|
|
| 1695 |
dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX;
|
| 1696 |
}
|
| 1697 |
|
| 1698 |
-
static void scale_f32(const float * x, float * dst, const float scale, const int k,
|
| 1699 |
const sycl::nd_item<3> &item_ct1) {
|
| 1700 |
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
|
| 1701 |
item_ct1.get_local_id(2);
|
|
@@ -1704,7 +1704,7 @@ static void scale_f32(const float * x, float * dst, const float scale, const int
|
|
| 1704 |
return;
|
| 1705 |
}
|
| 1706 |
|
| 1707 |
-
dst[i] = scale * x[i];
|
| 1708 |
}
|
| 1709 |
|
| 1710 |
|
|
@@ -1842,7 +1842,7 @@ static void ggml_mul_mat_vec_nc_f16_f32_sycl(
|
|
| 1842 |
|
| 1843 |
|
| 1844 |
|
| 1845 |
-
static void scale_f32_sycl(const float *x, float *dst, const float scale,
|
| 1846 |
const int k, queue_ptr stream) {
|
| 1847 |
const int num_blocks = (k + SYCL_SCALE_BLOCK_SIZE - 1) / SYCL_SCALE_BLOCK_SIZE;
|
| 1848 |
stream->parallel_for(
|
|
@@ -1850,7 +1850,7 @@ static void scale_f32_sycl(const float *x, float *dst, const float scale,
|
|
| 1850 |
sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE),
|
| 1851 |
sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE)),
|
| 1852 |
[=](sycl::nd_item<3> item_ct1) {
|
| 1853 |
-
scale_f32(x, dst, scale, k, item_ct1);
|
| 1854 |
});
|
| 1855 |
}
|
| 1856 |
|
|
@@ -2319,9 +2319,11 @@ inline void ggml_sycl_op_scale(ggml_backend_sycl_context & ctx, ggml_tensor * ds
|
|
| 2319 |
float * dst_dd = static_cast<float *>(dst->data);
|
| 2320 |
|
| 2321 |
float scale;
|
| 2322 |
-
|
|
|
|
|
|
|
| 2323 |
|
| 2324 |
-
scale_f32_sycl(src0_dd, dst_dd, scale, ggml_nelements(dst->src[0]), main_stream);
|
| 2325 |
/*
|
| 2326 |
DPCT1010:87: SYCL uses exceptions to report errors and does not use the
|
| 2327 |
error codes. The call was replaced with 0. You need to rewrite this code.
|
|
|
|
| 1695 |
dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX;
|
| 1696 |
}
|
| 1697 |
|
| 1698 |
+
static void scale_f32(const float * x, float * dst, const float scale, const float bias, const int k,
|
| 1699 |
const sycl::nd_item<3> &item_ct1) {
|
| 1700 |
const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
|
| 1701 |
item_ct1.get_local_id(2);
|
|
|
|
| 1704 |
return;
|
| 1705 |
}
|
| 1706 |
|
| 1707 |
+
dst[i] = scale * x[i] + bias;
|
| 1708 |
}
|
| 1709 |
|
| 1710 |
|
|
|
|
| 1842 |
|
| 1843 |
|
| 1844 |
|
| 1845 |
+
static void scale_f32_sycl(const float *x, float *dst, const float scale, const float bias,
|
| 1846 |
const int k, queue_ptr stream) {
|
| 1847 |
const int num_blocks = (k + SYCL_SCALE_BLOCK_SIZE - 1) / SYCL_SCALE_BLOCK_SIZE;
|
| 1848 |
stream->parallel_for(
|
|
|
|
| 1850 |
sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE),
|
| 1851 |
sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE)),
|
| 1852 |
[=](sycl::nd_item<3> item_ct1) {
|
| 1853 |
+
scale_f32(x, dst, scale, bias, k, item_ct1);
|
| 1854 |
});
|
| 1855 |
}
|
| 1856 |
|
|
|
|
| 2319 |
float * dst_dd = static_cast<float *>(dst->data);
|
| 2320 |
|
| 2321 |
float scale;
|
| 2322 |
+
float bias;
|
| 2323 |
+
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
| 2324 |
+
memcpy(&bias, (float *) dst->op_params + 1, sizeof(float));
|
| 2325 |
|
| 2326 |
+
scale_f32_sycl(src0_dd, dst_dd, scale, bias, ggml_nelements(dst->src[0]), main_stream);
|
| 2327 |
/*
|
| 2328 |
DPCT1010:87: SYCL uses exceptions to report errors and does not use the
|
| 2329 |
error codes. The call was replaced with 0. You need to rewrite this code.
|
ggml/src/ggml-vulkan/ggml-vulkan.cpp
CHANGED
|
@@ -7508,7 +7508,7 @@ static void ggml_vk_scale(ggml_backend_vk_context * ctx, vk_context& subctx, con
|
|
| 7508 |
(uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
|
| 7509 |
(uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
|
| 7510 |
0,
|
| 7511 |
-
op_params[0],
|
| 7512 |
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 7513 |
}, dryrun);
|
| 7514 |
}
|
|
|
|
| 7508 |
(uint32_t)src0->ne[0], (uint32_t)src0->ne[1], (uint32_t)src0->ne[2], (uint32_t)src0->ne[3], (uint32_t)src0->nb[0] / src0_type_size, (uint32_t)src0->nb[1] / src0_type_size, (uint32_t)src0->nb[2] / src0_type_size, (uint32_t)src0->nb[3] / src0_type_size,
|
| 7509 |
(uint32_t) dst->ne[0], (uint32_t) dst->ne[1], (uint32_t) dst->ne[2], (uint32_t) dst->ne[3], (uint32_t) dst->nb[0] / dst_type_size, (uint32_t) dst->nb[1] / dst_type_size, (uint32_t) dst->nb[2] / dst_type_size, (uint32_t) dst->nb[3] / dst_type_size,
|
| 7510 |
0,
|
| 7511 |
+
op_params[0], op_params[1],
|
| 7512 |
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
| 7513 |
}, dryrun);
|
| 7514 |
}
|
ggml/src/ggml-vulkan/vulkan-shaders/scale.comp
CHANGED
|
@@ -18,7 +18,7 @@ void main() {
|
|
| 18 |
continue;
|
| 19 |
}
|
| 20 |
|
| 21 |
-
data_d[get_doffset() + idx] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + idx]) * FLOAT_TYPE(p.param1));
|
| 22 |
idx += num_threads;
|
| 23 |
}
|
| 24 |
}
|
|
|
|
| 18 |
continue;
|
| 19 |
}
|
| 20 |
|
| 21 |
+
data_d[get_doffset() + idx] = D_TYPE(FLOAT_TYPE(data_a[get_aoffset() + idx]) * FLOAT_TYPE(p.param1) + FLOAT_TYPE(p.param2));
|
| 22 |
idx += num_threads;
|
| 23 |
}
|
| 24 |
}
|
ggml/src/ggml.c
CHANGED
|
@@ -3061,12 +3061,14 @@ static struct ggml_tensor * ggml_scale_impl(
|
|
| 3061 |
struct ggml_context * ctx,
|
| 3062 |
struct ggml_tensor * a,
|
| 3063 |
float s,
|
|
|
|
| 3064 |
bool inplace) {
|
| 3065 |
GGML_ASSERT(ggml_is_padded_1d(a));
|
| 3066 |
|
| 3067 |
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
| 3068 |
|
| 3069 |
-
|
|
|
|
| 3070 |
|
| 3071 |
result->op = GGML_OP_SCALE;
|
| 3072 |
result->src[0] = a;
|
|
@@ -3078,14 +3080,30 @@ struct ggml_tensor * ggml_scale(
|
|
| 3078 |
struct ggml_context * ctx,
|
| 3079 |
struct ggml_tensor * a,
|
| 3080 |
float s) {
|
| 3081 |
-
return ggml_scale_impl(ctx, a, s, false);
|
| 3082 |
}
|
| 3083 |
|
| 3084 |
struct ggml_tensor * ggml_scale_inplace(
|
| 3085 |
struct ggml_context * ctx,
|
| 3086 |
struct ggml_tensor * a,
|
| 3087 |
float s) {
|
| 3088 |
-
return ggml_scale_impl(ctx, a, s, true);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3089 |
}
|
| 3090 |
|
| 3091 |
// ggml_set
|
|
@@ -5769,7 +5787,7 @@ static void ggml_compute_backward(
|
|
| 5769 |
} break;
|
| 5770 |
case GGML_OP_MEAN: {
|
| 5771 |
if (src0_needs_grads) {
|
| 5772 |
-
ggml_add1_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, 1.0f/src0->ne[0], false));
|
| 5773 |
}
|
| 5774 |
} break;
|
| 5775 |
case GGML_OP_REPEAT: {
|
|
@@ -5846,7 +5864,7 @@ static void ggml_compute_backward(
|
|
| 5846 |
if (src0_needs_grads) {
|
| 5847 |
float s;
|
| 5848 |
memcpy(&s, tensor->op_params, sizeof(float));
|
| 5849 |
-
ggml_add_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, s, false));
|
| 5850 |
}
|
| 5851 |
} break;
|
| 5852 |
case GGML_OP_SET: {
|
|
|
|
| 3061 |
struct ggml_context * ctx,
|
| 3062 |
struct ggml_tensor * a,
|
| 3063 |
float s,
|
| 3064 |
+
float b,
|
| 3065 |
bool inplace) {
|
| 3066 |
GGML_ASSERT(ggml_is_padded_1d(a));
|
| 3067 |
|
| 3068 |
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
| 3069 |
|
| 3070 |
+
float params[2] = { s, b };
|
| 3071 |
+
ggml_set_op_params(result, ¶ms, sizeof(params));
|
| 3072 |
|
| 3073 |
result->op = GGML_OP_SCALE;
|
| 3074 |
result->src[0] = a;
|
|
|
|
| 3080 |
struct ggml_context * ctx,
|
| 3081 |
struct ggml_tensor * a,
|
| 3082 |
float s) {
|
| 3083 |
+
return ggml_scale_impl(ctx, a, s, 0.0, false);
|
| 3084 |
}
|
| 3085 |
|
| 3086 |
struct ggml_tensor * ggml_scale_inplace(
|
| 3087 |
struct ggml_context * ctx,
|
| 3088 |
struct ggml_tensor * a,
|
| 3089 |
float s) {
|
| 3090 |
+
return ggml_scale_impl(ctx, a, s, 0.0, true);
|
| 3091 |
+
}
|
| 3092 |
+
|
| 3093 |
+
struct ggml_tensor * ggml_scale_bias(
|
| 3094 |
+
struct ggml_context * ctx,
|
| 3095 |
+
struct ggml_tensor * a,
|
| 3096 |
+
float s,
|
| 3097 |
+
float b) {
|
| 3098 |
+
return ggml_scale_impl(ctx, a, s, b, false);
|
| 3099 |
+
}
|
| 3100 |
+
|
| 3101 |
+
struct ggml_tensor * ggml_scale_bias_inplace(
|
| 3102 |
+
struct ggml_context * ctx,
|
| 3103 |
+
struct ggml_tensor * a,
|
| 3104 |
+
float s,
|
| 3105 |
+
float b) {
|
| 3106 |
+
return ggml_scale_impl(ctx, a, s, b, true);
|
| 3107 |
}
|
| 3108 |
|
| 3109 |
// ggml_set
|
|
|
|
| 5787 |
} break;
|
| 5788 |
case GGML_OP_MEAN: {
|
| 5789 |
if (src0_needs_grads) {
|
| 5790 |
+
ggml_add1_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, 1.0f/src0->ne[0], 0.0, false));
|
| 5791 |
}
|
| 5792 |
} break;
|
| 5793 |
case GGML_OP_REPEAT: {
|
|
|
|
| 5864 |
if (src0_needs_grads) {
|
| 5865 |
float s;
|
| 5866 |
memcpy(&s, tensor->op_params, sizeof(float));
|
| 5867 |
+
ggml_add_or_set(ctx, cgraph, isrc0, ggml_scale_impl(ctx, grad, s, 0.0, false));
|
| 5868 |
}
|
| 5869 |
} break;
|
| 5870 |
case GGML_OP_SET: {
|