-
Notifications
You must be signed in to change notification settings - Fork 9
/
matmul_naive.cu
62 lines (49 loc) · 1.88 KB
/
matmul_naive.cu
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#include "utils.cpp"
dim3 threadsPerBlock(16, 16);
template <typename T>
__global__ void matmul_naive(T* a, T* b, T* c, int M, int K, int N) {
/* A naive implementation of matrix multiplication.
* a: MxK
* b: KxN
* c: MxN
*
* Average Time: 1000x1000x1000, 4.85s
* Average Time: 1024x1024x1024, 1.53s
*/
// If the whole threads can't cover the matrix elements,
// the outside loop is required.
// Here I only consider the case that the size of the matrix
// is multiple of block size.
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
for (int i = x; i < M; i += blockDim.x) {
for (int j = y; j < N; j += blockDim.y) {
c[i*M+j] = 0;
// A for loop in one thread caculates the
// one value in output matrix.
for (int k = 0; k < K; ++k) {
c[i*M+j] += a[i*M+k]*b[k*K+j];
}
}
}
}
int main(int argc, char *argv[]) {
int M = std::atoi(argv[1]), K = std::atoi(argv[2]), N = std::atoi(argv[3]);
dim3 blocksPerGrid;
blocksPerGrid.x = M / threadsPerBlock.x;
blocksPerGrid.y = N / threadsPerBlock.y;
blocksPerGrid.z = 1;
double* a = random_matrix_gpu<double>(M, K);
double* b = random_matrix_gpu<double>(K, N);
double* c = new double[M*N];
double *dev_a, *dev_b, *dev_c;
cudaMalloc((void**)&dev_a, M*K*sizeof(double));
cudaMalloc((void**)&dev_b, K*N*sizeof(double));
cudaMalloc((void**)&dev_c, M*N*sizeof(double));
cudaMemcpy(dev_a, a, M*K*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, K*N*sizeof(double), cudaMemcpyHostToDevice);
matmul_naive<double><<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_c, M, K, N);
cudaMemcpy(c, dev_c, M*N*sizeof(double), cudaMemcpyDeviceToHost);
std::cout << (check_mul<double>(a, b, c, M, K, N) ? "Correct!!" : "Wrong Answer!") << std::endl;
return 0;
}