在CUDA中并行化for循环(1D原始卷积)
有人能帮我把嵌套的for循环转换成CUDA内核吗? 以下是我试图转换为CUDA内核的函数:在CUDA中并行化for循环(1D原始卷积),c,cuda,parallel-processing,convolution,C,Cuda,Parallel Processing,Convolution,有人能帮我把嵌套的for循环转换成CUDA内核吗? 以下是我试图转换为CUDA内核的函数: // Convolution on Host void conv(int* A, int* B, int* out) { for (int i = 0; i < N; ++i) for (int j = 0; j < N; ++j) out[i + j] += A[i] * B[j]; } //主机上的卷积 无效转换(int*A、int*B、
// Convolution on Host
void conv(int* A, int* B, int* out) {
for (int i = 0; i < N; ++i)
for (int j = 0; j < N; ++j)
out[i + j] += A[i] * B[j];
}
//主机上的卷积
无效转换(int*A、int*B、int*out){
对于(int i=0;i
我已经非常努力地将此代码并行化。以下是我的尝试:
__global__ void conv_Kernel(int* A, int* B, int* out) {
int i = blockIdx.x;
int j = threadIdx.x;
__shared__ int temp[N];
__syncthreads();
temp[i + j] = A[i] * B[j];
__syncthreads();
int sum = 0;
for (int k = 0; k < N; k++)
sum += temp[k];
out[i + j] = sum;
}
\uuuuu全局\uuuu无效转换内核(int*A、int*B、int*out){
int i=块idx.x;
int j=螺纹IDX.x;
__共享温度[N];
__同步线程();
温度[i+j]=A[i]*B[j];
__同步线程();
整数和=0;
对于(int k=0;k您的cpuconv
函数似乎正在执行此操作(例如,对于N
=4):
(请注意,即使只使用-O3参数,CPU代码的执行也会产生显著差异)
正如我提到的,我认为我的两个例子对于GPU代码也是非常幼稚的(NIETER使用共享内存),但是它们可以给你一些入门的想法。
为了简洁起见,我放弃了CUDA错误检查。但是,我建议您在CUDA代码出现问题时执行以下操作。对于您的conv_内核
,我相信它会指示一些错误(如果您尝试运行它)。作为一种快速测试,您始终可以使用CUDA memcheck
运行任何CUDA代码,以查看是否发生任何API错误
EDIT:我尝试了一个简单的共享内存版本的conv_Kernel2
,但没有更快。我认为这是因为这些数据集(在N
=4096、A
和B
各为16Kbytes,out
约为32Kbytes)足够小,可以轻松地放入GPU二级缓存中,而不会发生抖动
但是,对于较新的体系结构(cc 3.5和更新版本),CUDA编译器有时可以对内核进行额外的优化。因此,如果我们将我的conv_Kernel2
定义更改为:
__global__ void conv_Kernel2(const int * __restrict__ A, const int * __restrict__ B, int *out){
在我的案例中,我见证了执行时间的略微改善:
$ ./t617
Finished. Results match. cpu time: 13792us, gpu time: 3209us
Finished. Results match. cpu time: 13792us, gpu2 time: 1626us
$
我创建了代码的修改版本,该版本执行以下操作:
N
在命令行上指定
仅包括cpuconv
和gpuconv\u内核2
将数据移动到GPU或从GPU移出的时间成本包含在GPU定时测量中
atypedef。。。mytype提供了code>,以便可以轻松地重新编译代码,以测试各种数据类型的行为
打印出一个“加速系数”,即cpu时间除以gpu时间
修改代码:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
// RG*RG*MAXN must fit within mytype
#define MAXN 100000
#define RG 10
#define USECPSEC 1000000ULL
#define nTPB 256
typedef double mytype;
void conv(const mytype *A, const mytype *B, mytype* out, int N) {
for (int i = 0; i < N; ++i)
for (int j = 0; j < N; ++j)
out[i + j] += A[i] * B[j];
}
unsigned long long dtime_usec(unsigned long long prev){
timeval tv1;
gettimeofday(&tv1,0);
return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev;
}
__global__ void conv_Kernel2(const mytype * __restrict__ A, const mytype * __restrict__ B, mytype *out, const int N){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < (2*N)-1){
mytype my_sum = 0;
for (int i = 0; i < N; i++)
if (((idx < N) && (i <= idx)) || ((idx >= N) && (i > (idx-N)))) my_sum += A[i]*B[idx-i];
out[idx] = my_sum;
}
}
int main(int argc, char *argv[]){
mytype *h_A, *d_A, *h_result, *d_result, *result, *h_B, *d_B, *A, *B;
if (argc != 2) {printf("must specify N on the command line\n"); return 1;}
int my_N = atoi(argv[1]);
if ((my_N < 1) || (my_N > MAXN)) {printf("N out of range\n"); return 1;}
B = (mytype *)malloc(my_N*sizeof(mytype));
A = (mytype *)malloc(my_N*sizeof(mytype));
h_A = (mytype *)malloc(my_N*sizeof(mytype));
h_B = (mytype *)malloc(my_N*sizeof(mytype));
h_result = (mytype *)malloc(2*my_N*sizeof(mytype));
result = (mytype *)malloc(2*my_N*sizeof(mytype));
cudaMalloc(&d_B, my_N*sizeof(mytype));
cudaMalloc(&d_A, my_N*sizeof(mytype));
cudaMalloc(&d_result, 2*my_N*sizeof(mytype));
for (int i=0; i < my_N; i++){
A[i] = rand()%RG;
B[i] = rand()%RG;
h_A[i] = A[i];
h_B[i] = B[i];}
for (int i=0; i < 2*my_N; i++){
result[i] = 0;
h_result[i] = 0;}
unsigned long long cpu_time = dtime_usec(0);
conv(A, B, result, my_N);
cpu_time = dtime_usec(cpu_time);
cudaMemset(d_result, 0, 2*my_N*sizeof(mytype));
unsigned long long gpu_time = dtime_usec(0);
cudaMemcpy(d_A, h_A, my_N*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, my_N*sizeof(mytype), cudaMemcpyHostToDevice);
conv_Kernel2<<<((2*(my_N-1))+nTPB-1)/nTPB,nTPB>>>(d_A, d_B, d_result, my_N);
cudaDeviceSynchronize();
cudaMemcpy(h_result, d_result, 2*my_N*sizeof(mytype), cudaMemcpyDeviceToHost);
gpu_time = dtime_usec(gpu_time);
for (int i = 0; i < 2*my_N; i++) if (result[i] != h_result[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, result[i], h_result[i]); return 1;}
printf("Finished. Results match. cpu time: %ldus, gpu time: %ldus\n", cpu_time, gpu_time);
printf("cpu/gpu = %f\n", cpu_time/(float)gpu_time);
return 0;
}
#包括
#包括
#包括
#包括
//RG*RG*MAXN必须适合mytype
#定义最大值100000
#定义RG 10
#定义USECPSEC 10000000ull
#定义nTPB 256
typedef双mytype;
void conv(常量mytype*A、常量mytype*B、mytype*out、int N){
对于(int i=0;i(idx-N)))我的和+=A[i]*B[idx-i];
out[idx]=我的金额;
}
}
int main(int argc,char*argv[]){
mytype*h_A、*d_A、*h_result、*d_result、*result、*h_B、*d_B、*A、*B;
如果(argc!=2){printf(“必须在命令行上指定N”);返回1;}
int my_N=atoi(argv[1]);
如果((my_N<1)|(my_N>MAXN)){printf(“N超出范围\N”);返回1;}
B=(mytype*)malloc(myn*sizeof(mytype));
A=(mytype*)malloc(myn*sizeof(mytype));
h_A=(mytype*)malloc(my_N*sizeof(mytype));
h_B=(mytype*)malloc(my_N*sizeof(mytype));
h_结果=(mytype*)malloc(2*my_N*sizeof(mytype));
结果=(mytype*)malloc(2*my_N*sizeof(mytype));
cudaMalloc(&d_B,my_N*sizeof(mytype));
cudaMalloc(&d_A,my_N*sizeof(mytype));
cudaMalloc(&d_结果,2*my_N*sizeof(mytype));
for(int i=0;i
$ ./t617
Finished. Results match. cpu time: 13792us, gpu time: 3209us
Finished. Results match. cpu time: 13792us, gpu2 time: 1626us
$
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
// RG*RG*MAXN must fit within mytype
#define MAXN 100000
#define RG 10
#define USECPSEC 1000000ULL
#define nTPB 256
typedef double mytype;
void conv(const mytype *A, const mytype *B, mytype* out, int N) {
for (int i = 0; i < N; ++i)
for (int j = 0; j < N; ++j)
out[i + j] += A[i] * B[j];
}
unsigned long long dtime_usec(unsigned long long prev){
timeval tv1;
gettimeofday(&tv1,0);
return ((tv1.tv_sec * USECPSEC)+tv1.tv_usec) - prev;
}
__global__ void conv_Kernel2(const mytype * __restrict__ A, const mytype * __restrict__ B, mytype *out, const int N){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if (idx < (2*N)-1){
mytype my_sum = 0;
for (int i = 0; i < N; i++)
if (((idx < N) && (i <= idx)) || ((idx >= N) && (i > (idx-N)))) my_sum += A[i]*B[idx-i];
out[idx] = my_sum;
}
}
int main(int argc, char *argv[]){
mytype *h_A, *d_A, *h_result, *d_result, *result, *h_B, *d_B, *A, *B;
if (argc != 2) {printf("must specify N on the command line\n"); return 1;}
int my_N = atoi(argv[1]);
if ((my_N < 1) || (my_N > MAXN)) {printf("N out of range\n"); return 1;}
B = (mytype *)malloc(my_N*sizeof(mytype));
A = (mytype *)malloc(my_N*sizeof(mytype));
h_A = (mytype *)malloc(my_N*sizeof(mytype));
h_B = (mytype *)malloc(my_N*sizeof(mytype));
h_result = (mytype *)malloc(2*my_N*sizeof(mytype));
result = (mytype *)malloc(2*my_N*sizeof(mytype));
cudaMalloc(&d_B, my_N*sizeof(mytype));
cudaMalloc(&d_A, my_N*sizeof(mytype));
cudaMalloc(&d_result, 2*my_N*sizeof(mytype));
for (int i=0; i < my_N; i++){
A[i] = rand()%RG;
B[i] = rand()%RG;
h_A[i] = A[i];
h_B[i] = B[i];}
for (int i=0; i < 2*my_N; i++){
result[i] = 0;
h_result[i] = 0;}
unsigned long long cpu_time = dtime_usec(0);
conv(A, B, result, my_N);
cpu_time = dtime_usec(cpu_time);
cudaMemset(d_result, 0, 2*my_N*sizeof(mytype));
unsigned long long gpu_time = dtime_usec(0);
cudaMemcpy(d_A, h_A, my_N*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, my_N*sizeof(mytype), cudaMemcpyHostToDevice);
conv_Kernel2<<<((2*(my_N-1))+nTPB-1)/nTPB,nTPB>>>(d_A, d_B, d_result, my_N);
cudaDeviceSynchronize();
cudaMemcpy(h_result, d_result, 2*my_N*sizeof(mytype), cudaMemcpyDeviceToHost);
gpu_time = dtime_usec(gpu_time);
for (int i = 0; i < 2*my_N; i++) if (result[i] != h_result[i]) {printf("mismatch2 at %d, cpu: %d, gpu %d\n", i, result[i], h_result[i]); return 1;}
printf("Finished. Results match. cpu time: %ldus, gpu time: %ldus\n", cpu_time, gpu_time);
printf("cpu/gpu = %f\n", cpu_time/(float)gpu_time);
return 0;
}