CUDA之单thread单block&多thread单block&多thread多block

用简单的立方和归约来举例:

//单thread单block
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define DATA_SIZE 1048576
int data[DATA_SIZE];
//产生大量0-9之间的随机数
void GenerateNumbers(int *number, int size)
{
for (int i = 0; i < size; i++) {
number[i] = rand() % 10;
}
}
//CUDA 初始化
bool InitCUDA()
{
int count;
//取得支持Cuda的装置的数目
cudaGetDeviceCount(&count);
if (count == 0) {
fprintf(stderr, "There is no device.\n");
return false;
}
int i;
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
if (prop.major >= 1) {
break; } }
}
if (i == count) {
fprintf(stderr, "There is no device supporting CUDA 1.x.\n");
return false;
}
cudaSetDevice(i);
return true;
}
// __global__ 函数(GPU上执行) 计算立方和
__global__ static void sumOfcubes(int *num, int* result)
{
intsum = 0;
inti;
for (i= 0; i< DATA_SIZE; i++) {
sum += num[i] * num[i] * num[i];
}
*result = sum;
}
int main()
{ //CUDA 初始化
if (!InitCUDA()) {
return 0;
}
//生成随机数
GenerateNumbers(data, DATA_SIZE);
int* gpudata, *result;
cudaMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE);
cudaMalloc((void**)&result, sizeof(int));
//cudaMemcpy 将产生的随机数复制到显卡内存中
cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice);
sumOfcubes<< <1, 1, 0 >> > (gpudata, result);
cudaMemcpy(sum, result, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(gpudata);
cudaFree(result);
printf("GPUsum: %d \n", sum);
int sum = 0;
for (int i = 0; i < DATA_SIZE; i++) {
sum += data[i] * data[i] * data[i];
}
printf("CPUsum: %d \n", sum);
getchar();
return 0;
}
//单block多thread

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA RunTime API
#include <cuda_runtime.h>
#include "device_launch_parameters.h"

#define DATA_SIZE 1048576
#define THREAD_NUM 1024 //256--->1024
int data[DATA_SIZE];

void GenerateNumbers(int *number, int size)
{
	for (int i = 0; i < size; i++) {
		number[i] = rand() % 10;
	}
}
// __global__ 函数(GPU上执行) 计算立方和
__global__ static void sumOfSquares(int *num, int* result, clock_t* time)
{ 
	const int tid = threadIdx.x;
//计算每个线程需要完成的量
const int size = DATA_SIZE / THREAD_NUM;
int sum = 0;
int i;
//记录运算开始的时间
clock_t start;
//只在thread 0(即threadIdx.x = 0 的时候)进行记录
if (tid == 0) start = clock();
for (i = tid; i < DATA_SIZE; i += THREAD_NUM)
//for (i = tid * size; i < (tid + 1) * size; i++)
{
	sum += num[i] * num[i] * num[i];
}
result[tid] = sum;
//计算时间的动作,只在thread 0(即threadIdx.x = 0 的时候)进行
if (tid == 0) 
*time = clock() - start;
}
int main()
{ //CUDA 初始化
	//生成随机数
	GenerateNumbers(data, DATA_SIZE);
	/*把数据复制到显卡内存中*/
	int* gpudata, *result;
	clock_t* time;
	//cudaMalloc 取得一块显卡内存( 其中result用来存储计算结果,time用来存储运行时间)
	cudaMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE); 
	cudaMalloc((void**)&result, sizeof(int)*THREAD_NUM);
	cudaMalloc((void**)&time, sizeof(clock_t));
	//cudaMemcpy 将产生的随机数复制到显卡内存中cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice);
	// 启动kernel函数
	cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice);
	sumOfSquares << < 1, THREAD_NUM, 0 >> >(gpudata, result, time);
	int sum[THREAD_NUM];
	clock_t time_use;
	//cudaMemcpy 将结果从显存中复制回内存
	cudaMemcpy(sum, result, sizeof(int) * THREAD_NUM, cudaMemcpyDeviceToHost);
	cudaMemcpy(time_use, time, sizeof(clock_t), cudaMemcpyDeviceToHost);
	//Free
	cudaFree(gpudata);
	cudaFree(result);
	cudaFree(time);
	int final_sum = 0; /*立方和归约*/
	for (int i = 0; i < THREAD_NUM; i++)
	{
		final_sum += sum[i];
	}
	printf("GPUsum: %d\n time:%d\n", final_sum,time_use);
	final_sum = 0;
	for (int i = 0; i < DATA_SIZE; i++) {
		final_sum += data[i] * data[i] * data[i];
	}
	printf("CPUsum: %d \n", final_sum);
	getchar();
	return 0;
}
//多block多thread

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA RunTime API
#include <cuda_runtime.h>
#include "device_launch_parameters.h"

#define DATA_SIZE 1048576
#define THREAD_NUM 256
#define BLOCK_NUM 32
int data[DATA_SIZE];

void GenerateNumbers(int *number, int size)
{
	for (int i = 0; i < size; i++) {
		number[i] = rand() % 10;
	}
}
// __global__ 函数(GPU上执行) 计算立方和
__global__ static void sumOfSquares(int *num, int* result, clock_t* time)
{
	const int tid = threadIdx.x;
	const int bid = blockIdx.x;

	int sum = 0;
	int i;
	//记录运算开始的时间
	clock_t start;
	//只在thread 0(即threadIdx.x = 0 的时候)进行记录,每个block 都会记录开始时间及结束时间
	if (tid == 0)
		time[bid] = clock();
	//thread需要同时通过tid和bid来确定,并保证内存连续性
	for (i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM)
	{
		sum += num[i] * num[i] * num[i];
	}
	//Result的数量随之增加
	result[bid * THREAD_NUM + tid] = sum;
	//计算时间的动作,只在thread 0(即threadIdx.x = 0 的时候)进行,每个block 都会记录开始时间及结束时间
	if (tid == 0)
		time[bid + BLOCK_NUM] = clock();
}
int main()
{
	GenerateNumbers(data, DATA_SIZE);
	int* gpudata, *result;
	clock_t* time;
	cudaMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE);
	cudaMalloc((void**)&result, sizeof(int)*THREAD_NUM* BLOCK_NUM);
	cudaMalloc((void**)&time, sizeof(clock_t)* BLOCK_NUM * 2);
	cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice);
	// 在CUDA 中执行函数语法:函数名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...);
	sumOfSquares << < BLOCK_NUM, THREAD_NUM, 0 >> > (gpudata, result, time);
	int sum[THREAD_NUM*BLOCK_NUM];
	clock_t time_use[BLOCK_NUM * 2];
	//cudaMemcpy 将结果从显存中复制回内存
	cudaMemcpy(sum, result, sizeof(int)* THREAD_NUM*BLOCK_NUM, cudaMemcpyDeviceToHost);
	cudaMemcpy(time_use, time, sizeof(clock_t)* BLOCK_NUM * 2, cudaMemcpyDeviceToHost);
	cudaFree(gpudata);
	cudaFree(result);
	cudaFree(time);
	int final_sum = 0;
	for (int i = 0; i < THREAD_NUM*BLOCK_NUM; i++)
	{
		final_sum += sum[i];
	}
	//采取新的计时策略把每个block 最早的开始时间,和最晚的结束时间相减,取得总运行时间
	clock_t min_start, max_end;
	min_start = time_use[0];
	max_end = time_use[BLOCK_NUM];
	for (int i = 1; i < BLOCK_NUM; i++)
	{
		if (min_start > time_use[i]) min_start = time_use[i];
		if (max_end < time_use[i + BLOCK_NUM])
			max_end = time_use[i + BLOCK_NUM];
	}
	printf("GPUsum: %d gputime: %d\n", final_sum, max_end - min_start);
	final_sum = 0;
	for (int i = 0; i < DATA_SIZE; i++)
	{
		final_sum += data[i] * data[i] * data[i];
	}
	printf("CPUsum: %d \n", final_sum);
	getchar();
	return 0;
}

ShareMemory
是一个block 中所有thread 都能使用的共享内存,存取的速度相当快,存取shared memory 的速度和存取寄存器相同,不需要担心latency 的问题。
可以直接利用__shared__声明一个shared memory变量
__shared__ float temp[THREAD_NUM * 3];
Shared memory 有时会出现存储体冲突(bank conflict)的问题:
例如:每个SM有16KB 的shared memory,分成16 个bank
•如果同时每个thread 是存取不同的bank,就不会有问题
•如果同时有两个(或更多)threads 存取同一个bank 的数据,就会发生bank conflict,这些threads 就必须照顺序去存取,而无法同时存取shared memory 了。

//多block多thread 使用sharememory

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA RunTime API
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "device_functions.h"

#define DATA_SIZE 1048576
#define THREAD_NUM 256
#define BLOCK_NUM 32
int data[DATA_SIZE];
 
void GenerateNumbers(int *number, int size)
{
	for (int i = 0; i < size; i++) {
		number[i] = rand() % 10;
	}
}
// __global__ 函数(GPU上执行) 计算立方和
__global__ static void sumOfSquares(int *num, int* result, clock_t* time)
{
	extern __shared__ int shared[];
	const int tid = threadIdx.x;
	const int bid = blockIdx.x;
	
	shared[tid] = 0;
	int i;
	//记录运算开始的时间
	clock_t start;
	//只在thread 0(即threadIdx.x = 0 的时候)进行记录,每个block 都会记录开始时间及结束时间
	if (tid == 0) 
		time[bid] = clock();
	//thread需要同时通过tid和bid来确定,并保证内存连续性
	for (i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM)
	{
		shared[tid] += num[i] * num[i] * num[i];
	}
	//同步保证每个thread 都已经把结果写到shared[tid] 里面
	__syncthreads();
	//使用线程0完成加和运算
	if (tid == 0)
	{
		for (i = 1; i < THREAD_NUM; i++) shared[0] += shared[i];
		result[bid] = shared[0];
	}
	//计算时间的动作,只在thread 0(即threadIdx.x = 0 的时候)进行,每个block 都会记录开始时间及结束时间
	if (tid == 0) 
		time[bid + BLOCK_NUM] = clock();
}
int main()
{
	GenerateNumbers(data, DATA_SIZE);
	int* gpudata, *result;
	clock_t* time;
	cudaMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE);
	cudaMalloc((void**)&result, sizeof(int)*BLOCK_NUM);
	cudaMalloc((void**)&time, sizeof(clock_t)* BLOCK_NUM * 2);
	cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice);
	// 在CUDA 中执行函数语法:函数名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...);
	sumOfSquares <<< BLOCK_NUM, THREAD_NUM, THREAD_NUM * sizeof(int) >>>(gpudata, result,time);
	int sum[BLOCK_NUM];
	clock_t time_use[BLOCK_NUM * 2];
	//cudaMemcpy 将结果从显存中复制回内存
	cudaMemcpy(&sum, result, sizeof(int)*BLOCK_NUM, cudaMemcpyDeviceToHost);
	cudaMemcpy(&time_use, time, sizeof(clock_t)* BLOCK_NUM * 2, cudaMemcpyDeviceToHost);
	cudaFree(gpudata);
	cudaFree(result);
	cudaFree(time);
	int final_sum = 0;
	for (int i = 0; i < BLOCK_NUM; i++)
	{
		final_sum += sum[i];
	}
	//采取新的计时策略把每个block 最早的开始时间,和最晚的结束时间相减,取得总运行时间
	clock_t min_start, max_end;
	min_start = time_use[0];
	max_end = time_use[BLOCK_NUM];
	for (int i = 1; i < BLOCK_NUM; i++)
	{
		if (min_start > time_use[i]) min_start = time_use[i];
		if (max_end < time_use[i + BLOCK_NUM])
			max_end = time_use[i + BLOCK_NUM];
	}
	printf("GPUsum: %d gputime: %d\n", final_sum, max_end - min_start);
	final_sum = 0;
	for (int i = 0; i < DATA_SIZE; i++)
	{
		final_sum += data[i] * data[i] * data[i];
	}
	printf("CPUsum: %d \n", final_sum);
	getchar();
	return 0;
}

Block内完成部分加和工作,所以gputime增加了

//多block多thread

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA RunTime API
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include "device_functions.h"

#define DATA_SIZE 1048576
#define THREAD_NUM 256
#define BLOCK_NUM 32
int data[DATA_SIZE];
 
void GenerateNumbers(int *number, int size)
{
	for (int i = 0; i < size; i++) {
		number[i] = rand() % 10;
	}
}
// __global__ 函数(GPU上执行) 计算立方和
__global__ static void sumOfSquares(int *num, int* result, clock_t* time)
{
	extern __shared__ int shared[];
	const int tid = threadIdx.x;
	const int bid = blockIdx.x;
	
	shared[tid] = 0;
	int i;
	//记录运算开始的时间
	//只在thread 0(即threadIdx.x = 0 的时候)进行记录,每个block 都会记录开始时间及结束时间
	if (tid == 0) 
		time[bid] = clock();
	//thread需要同时通过tid和bid来确定,并保证内存连续性
	for (i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM)
	{
		shared[tid] += num[i] * num[i] * num[i];
	}
	//同步保证每个thread 都已经把结果写到shared[tid] 里面
	__syncthreads();

	//树状加法
	int offset = 1, mask = 1;
	while (offset < THREAD_NUM)
	{
		if ((tid & mask) == 0)
		{
			shared[tid] += shared[tid + offset];
		}
		offset += offset;
		mask = offset + mask;
		__syncthreads();
	}
	if (tid == 0)
	{
		result[bid] = shared[0];
		time[bid + BLOCK_NUM] = clock();
	}	
}

int main()
{
	GenerateNumbers(data, DATA_SIZE);
	int* gpudata, *result;
	clock_t* time;
	cudaMalloc((void**)&gpudata, sizeof(int)* DATA_SIZE);
	cudaMalloc((void**)&result, sizeof(int)*BLOCK_NUM);
	cudaMalloc((void**)&time, sizeof(clock_t)* BLOCK_NUM * 2);
	cudaMemcpy(gpudata, data, sizeof(int)* DATA_SIZE, cudaMemcpyHostToDevice);
	// 在CUDA 中执行函数语法:函数名称<<<block 数目, thread 数目, shared memory 大小>>>(参数...);
	sumOfSquares <<< BLOCK_NUM, THREAD_NUM, THREAD_NUM * sizeof(int) >>>(gpudata, result,time);
	int sum[BLOCK_NUM];
	clock_t time_use[BLOCK_NUM * 2];
	//cudaMemcpy 将结果从显存中复制回内存
	cudaMemcpy(&sum, result, sizeof(int)*BLOCK_NUM, cudaMemcpyDeviceToHost);
	cudaMemcpy(&time_use, time, sizeof(clock_t)* BLOCK_NUM * 2, cudaMemcpyDeviceToHost);
	cudaFree(gpudata);
	cudaFree(result);
	cudaFree(time);
	int final_sum = 0;
	for (int i = 0; i < BLOCK_NUM; i++)
	{
		final_sum += sum[i];
	}
	//采取新的计时策略把每个block 最早的开始时间,和最晚的结束时间相减,取得总运行时间
	clock_t min_start, max_end;
	min_start = time_use[0];
	max_end = time_use[BLOCK_NUM];
	for (int i = 1; i < BLOCK_NUM; i++)
	{
		if (min_start > time_use[i]) min_start = time_use[i];
		if (max_end < time_use[i + BLOCK_NUM])
			max_end = time_use[i + BLOCK_NUM];
	}
	printf("GPUsum: %d gputime: %d\n", final_sum, max_end - min_start);
	final_sum = 0;
	for (int i = 0; i < DATA_SIZE; i++)
	{
		final_sum += data[i] * data[i] * data[i];
	}
	printf("CPUsum: %d \n", final_sum);
	getchar();
	return 0;
}

猜你喜欢

转载自blog.csdn.net/qq_41598072/article/details/83959063
今日推荐