Skip to content

Commit

Permalink
q8 mat*vec
Browse files Browse the repository at this point in the history
  • Loading branch information
apage43 authored and cebtenzzre committed Oct 5, 2023
1 parent 4905cd0 commit f9d41c7
Show file tree
Hide file tree
Showing 3 changed files with 107 additions and 0 deletions.
2 changes: 2 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -487,6 +487,7 @@ if (LLAMA_KOMPUTE)
kompute/op_rmsnorm.comp
kompute/op_diagmask.comp
kompute/op_mul_mat_f16.comp
kompute/op_mul_mat_q8_0.comp
kompute/op_mul_mat_q4_0.comp
kompute/op_mul_mat_q4_1.comp
kompute/op_mul_mat_q6_k.comp
Expand Down Expand Up @@ -516,6 +517,7 @@ if (LLAMA_KOMPUTE)
shaderop_rmsnorm.h
shaderop_diagmask.h
shaderop_mul_mat_f16.h
shaderop_mul_mat_q8_0.h
shaderop_mul_mat_q4_0.h
shaderop_mul_mat_q4_1.h
shaderop_mul_mat_q6_k.h
Expand Down
41 changes: 41 additions & 0 deletions ggml-vulkan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "shaderop_rmsnorm.h"
#include "shaderop_diagmask.h"
#include "shaderop_mul_mat_f16.h"
#include "shaderop_mul_mat_q8_0.h"
#include "shaderop_mul_mat_q4_0.h"
#include "shaderop_mul_mat_q4_1.h"
#include "shaderop_mul_mat_q6_k.h"
Expand Down Expand Up @@ -918,6 +919,43 @@ void ggml_vk_mul_mat_f16(kp::Sequence& seq,
seq.record<kp::OpAlgoDispatch>(s_algo);
}

void ggml_vk_mul_mat_q8_0(kp::Sequence& seq,
const std::shared_ptr<kp::Tensor>& inA,
const std::shared_ptr<kp::Tensor>& inB,
const std::shared_ptr<kp::Tensor>& out,
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
int32_t ne00, int32_t ne01,
uint32_t nb01, uint32_t nb02,
int32_t ne11, int32_t ne12,
uint32_t nb11, uint32_t nb12,
int32_t ne0, int32_t ne1) {
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_q8_0_comp_spv,
kp::shader_data::op_mul_mat_q8_0_comp_spv_len);
struct PushConstants {
uint32_t inAOff, inBOff, outOff;
int32_t ne00;
uint32_t nb01, nb02;
uint32_t nb11, nb12;
int32_t ne0, ne1;
} pushConsts {
safe_divide(inAOff, 2), safe_divide(inBOff, 4), safe_divide(outOff, 4),
ne00, nb01, nb02, nb11, nb12, ne0, ne1,
};

std::shared_ptr<kp::Algorithm> s_algo = nullptr;
if (!komputeManager()->hasAlgorithm(__func__)) {
const uint32_t local_x = ggml_vk_current_device().subgroupSize;
s_algo = komputeManager()->algorithm<uint32_t, PushConstants>(__func__, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned(ne01), unsigned(ne11), unsigned(ne12)}, {local_x}, {pushConsts});
} else {
s_algo = komputeManager()->getAlgorithm(__func__);
s_algo->setTensors({inA, inB, out});
s_algo->setWorkgroup({unsigned(ne01), unsigned(ne11), unsigned(ne12)});
s_algo->setPushConstants<PushConstants>({pushConsts});
s_algo->updateDescriptors(s_kompute_context->pool.get());
}
seq.record<kp::OpAlgoDispatch>(s_algo);
}

void ggml_vk_mul_mat_q4_x(const std::vector<uint32_t>& spirv, uint32_t block_size, kp::Sequence& seq,
const std::shared_ptr<kp::Tensor>& inA,
const std::shared_ptr<kp::Tensor>& inB,
Expand Down Expand Up @@ -1335,6 +1373,9 @@ void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph
case GGML_TYPE_F32:
ggml_vk_mul_mat_f16(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1);
break;
case GGML_TYPE_Q8_0:
ggml_vk_mul_mat_q8_0(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1);
break;
case GGML_TYPE_Q4_0:
ggml_vk_mul_mat_q4_0(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02);
break;
Expand Down
64 changes: 64 additions & 0 deletions kompute/op_mul_mat_q8_0.comp
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/**
* Copyright (c) 2023 Nomic, Inc. All rights reserved.
*
* This software is licensed under the terms of the Software for Open Models License (SOM),
* version 1.0, as detailed in the LICENSE_SOM.txt file. A copy of this license should accompany
* this software. Except as expressly granted in the SOM license, all rights are reserved by Nomic, Inc.
*/

#version 450

#include "common.comp"

#define BLOCKS_IN_QUANT QK8_0
#define SIZE_OF_BLOCK sizeof_block_q8_0
#define N_ROWS 4

layout(local_size_x_id = 0) in;
layout(local_size_y = 1) in;
layout(local_size_z = 1) in;

layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };

layout (push_constant) uniform parameter {
uint inAOff;
uint inBOff;
uint outOff;
int ne00;
int ne10;
int ne0;
int ne1;
int ne01;
int gqa;
} pcs;

#define ELS_PER_BLOCK 32
#define SIZE_OF_D 2
#define BLOCK_SIZE (ELS_PER_BLOCK + SIZE_OF_D)

void main() {
const uint r0 = gl_WorkGroupID.x;
const uint r1 = gl_WorkGroupID.y;
const uint im = gl_WorkGroupID.z;

const uint x = r0 * (pcs.ne00/ELS_PER_BLOCK) * BLOCK_SIZE + pcs.inAOff; // Based from inA
const uint y = r1 * pcs.ne10 + pcs.inBOff; // based from inB

float sumf = 0.0f;
for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) {
const uint block_number = i / ELS_PER_BLOCK;
const uint block_offset = block_number * BLOCK_SIZE;
const float d = u8BufToFloat16(inA, x + block_offset);
const uint position_in_block = i % ELS_PER_BLOCK;
const int q = int8_t(inA[x+block_offset+SIZE_OF_D+position_in_block]);
const float dq = d * q;
sumf += dq * float(inB[y+i]);
}

const float all_sum = subgroupAdd(sumf);
if (subgroupElect()) {
out_[im*pcs.ne1*pcs.ne0 + r1*pcs.ne0 + r0 + pcs.outOff] = all_sum;
}
}

0 comments on commit f9d41c7

Please sign in to comment.