前面我們實現了向量的加法,今天我們實現復雜一些的運算,矩陣的加法,即將矩陣對應位置上的元素進行相加,相當於向量加法的升級版本。不過需要注意的是,malloc時需要分配二維矩陣,這樣才能使用A[i][j];
CPU實現
CPP實現起來的注意點在於二維數組的開辟,通過給二維數組的每一個指針賦值實現二維數據的訪問,具體算法兩層循環即可。
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
#include <math.h>
const int ROWS=1024;
const int COLS=1024;
using namespace std;
int main()
{
struct timeval start, end;
gettimeofday( &start, NULL );
int *A, **A_ptr, *B, **B_ptr, *C, **C_ptr;
int total_size = ROWS*COLS*sizeof(int);
A = (int*)malloc(total_size);
B = (int*)malloc(total_size);
C = (int*)malloc(total_size);
A_ptr = (int**)malloc(ROWS*sizeof(int*));
B_ptr = (int**)malloc(ROWS*sizeof(int*));
C_ptr = (int**)malloc(ROWS*sizeof(int*));
//CPU一維數組初始化
for(int i=0;i<ROWS*COLS;i++)
{
A[i] = 80;
B[i] = 20;
}
for(int i=0;i<ROWS;i++)
{
A_ptr[i] = A + COLS*i;
B_ptr[i] = B + COLS*i;
C_ptr[i] = C + COLS*i;
}
for(int i=0;i<ROWS;i++)
for(int j=0;j<COLS;j++)
{
C_ptr[i][j] = A_ptr[i][j] + B_ptr[i][j];
}
//檢查結果
int max_error = 0;
for(int i=0;i<ROWS*COLS;i++)
{
//cout << C[i] << endl;
max_error += abs(100-C[i]);
}
cout << "max_error is " << max_error <<endl;
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
cout << "total time is " << timeuse/1000 << "ms" <<endl;
return 0;
}
運行結果
運行時間為19ms,對於1024*1024的矩陣,這已經足夠快。
CUDA版本
CUDA版本與CPU版本基本類似,在核函數中則基本與向量的加法基本類似,只不過一維數據變成了二維。
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#define Row 1024
#define Col 1024
__global__
void addKernel(int **C, int **A, int ** B)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
if (idx < Col && idy < Row) {
C[idy][idx] = A[idy][idx] + B[idy][idx];
}
}
int main()
{
struct timeval start, end;
gettimeofday( &start, NULL );
int **A = (int **)malloc(sizeof(int*) * Row);
int **B = (int **)malloc(sizeof(int*) * Row);
int **C = (int **)malloc(sizeof(int*) * Row);
int *dataA = (int *)malloc(sizeof(int) * Row * Col);
int *dataB = (int *)malloc(sizeof(int) * Row * Col);
int *dataC = (int *)malloc(sizeof(int) * Row * Col);
int **d_A;
int **d_B;
int **d_C;
int *d_dataA;
int *d_dataB;
int *d_dataC;
//malloc device memory
cudaMalloc((void**)&d_A, sizeof(int **) * Row);
cudaMalloc((void**)&d_B, sizeof(int **) * Row);
cudaMalloc((void**)&d_C, sizeof(int **) * Row);
cudaMalloc((void**)&d_dataA, sizeof(int) *Row*Col);
cudaMalloc((void**)&d_dataB, sizeof(int) *Row*Col);
cudaMalloc((void**)&d_dataC, sizeof(int) *Row*Col);
//set value
for (int i = 0; i < Row*Col; i++) {
dataA[i] = 90;
dataB[i] = 10;
}
//將主機指針A指向設備數據位置,目的是讓設備二級指針能夠指向設備數據一級指針
//A 和 dataA 都傳到了設備上,但是二者還沒有建立對應關系
for (int i = 0; i < Row; i++) {
A[i] = d_dataA + Col * i;
B[i] = d_dataB + Col * i;
C[i] = d_dataC + Col * i;
}
cudaMemcpy(d_A, A, sizeof(int*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeof(int*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, sizeof(int*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(d_dataA, dataA, sizeof(int) * Row * Col, cudaMemcpyHostToDevice);
cudaMemcpy(d_dataB, dataB, sizeof(int) * Row * Col, cudaMemcpyHostToDevice);
dim3 threadPerBlock(16, 16);
dim3 blockNumber( (Col + threadPerBlock.x - 1)/ threadPerBlock.x, (Row + threadPerBlock.y - 1) / threadPerBlock.y );
printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y);
addKernel << <blockNumber, threadPerBlock >> > (d_C, d_A, d_B);
//拷貝計算數據-一級數據指針
cudaMemcpy(dataC, d_dataC, sizeof(int) * Row * Col, cudaMemcpyDeviceToHost);
int max_error = 0;
for(int i=0;i<Row*Col;i++)
{
//printf("%d\n", dataC[i]);
max_error += abs(100-dataC[i]);
}
//釋放內存
free(A);
free(B);
free(C);
free(dataA);
free(dataB);
free(dataC);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_dataA);
cudaFree(d_dataB);
cudaFree(d_dataC);
printf("max_error is %d\n", max_error);
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("total time is %d ms\n", timeuse/1000);
return 0;
}
這里需要注意的是,dim3 threadPerBlock(16, 16)這里采用了二維的線程,那么對應的threadIdx也為二維的。
dim3則為英偉達內置的三維數據類型,即英偉達認為每個grid,或者是thread都應當是三維的,盡管有些維度還未實現。
運行結果
運行結果依然比CPU版本慢,原因還是核函數過於簡單,以至於線程調度占據了更多的時間。