用于内环的CUDA螺纹
我有这个内核用于内环的CUDA螺纹,cuda,gpu,gpgpu,nvidia,Cuda,Gpu,Gpgpu,Nvidia,我有这个内核 __global__ void kernel1(int keep, int include, int width, int* d_Xco, int* d_Xnum, bool* d_Xvalid, float* d_Xblas) { int i, k; i = threadIdx.x + blockIdx.x * blockDim.x; if(i < keep){ for(k = 0; k < include
__global__ void kernel1(int keep, int include, int width, int* d_Xco,
int* d_Xnum, bool* d_Xvalid, float* d_Xblas)
{
int i, k;
i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < keep){
for(k = 0; k < include ; k++){
int val = (d_Xblas[i*include + k] >= 1e5);
int aux = d_Xnum[i];
d_Xblas[i*include + k] *= (!val);
d_Xco[i*width + aux] = k;
d_Xnum[i] +=val;
d_Xvalid[i*include + k] = (!val);
}
}
}
使用二维栅格启动:
int keep = 9000;
int include = 23000;
int width = 0.2*include;
int th = 32;
dim3 threads(th,th);
dim3 blocks ((keep+threads.x-1)/threads.x, (include+threads.y-1)/threads.y);
kernel2 <<< blocks,threads >>>( keep, include, width, d_Xco, d_Xnum,
d_Xvalid, d_Xblas );
在原始代码中,您有
for(k = 0; k < include ; k++){
...
int aux = d_Xnum[i];
...
d_Xco[i*width + aux] = k;
...
}
一旦您这样做,当您并行化
k
循环时,您将不再具有当前的竞争条件,因为许多k
线程同时将不同的值分配到dxco
中的同一位置(不保证排序)。网格应创建为dim3块((keep+threads.x-1)/螺纹.x,(包括+螺纹.y-1)/threads.y)代码>。你忘了括号了。@sgar91对不起,这是个打字错误。为什么内核2不能工作?它给出了错误的结果?它根本不执行?@pQB是的,错误的结果。它确实加快了速度,但…执行很好、快速且平稳。您的数据是如何安排的?根据内核2,您有保留
列和包含
行。@harrism:我确实需要dxco
的数据,而不仅仅是最后一个元素。@Manolete,在您的顺序版本中,只有最后一个元素会保留!逐步完成调试器中的代码,请参见。
int keep = 9000;
int include = 23000;
int width = 0.2*include;
int th = 32;
dim3 threads(th,th);
dim3 blocks ((keep+threads.x-1)/threads.x, (include+threads.y-1)/threads.y);
kernel2 <<< blocks,threads >>>( keep, include, width, d_Xco, d_Xnum,
d_Xvalid, d_Xblas );
d_Xco
-------------------------------
| 2|3 |15 |4 |5 |5 | | | | | | .......
-------------------------------
for(k = 0; k < include ; k++){
...
int aux = d_Xnum[i];
...
d_Xco[i*width + aux] = k;
...
}
d_Xco[i*width + d_Xnum[i]] = include - 1;