C++ CUDA tex1Dfetch()错误行为
我对CUDA编程非常陌生,我正面临一个让我发疯的问题。发生什么事: 我有一个非常简单的程序(只是为了学习),其中创建了一个输入图像和一个输出图像16x16。输入图像初始化为0..255之间的值,然后绑定到纹理。CUDA内核只是将输入映像复制到输出映像。输入图像值是通过调用tex1Dfetch()获得的,该函数在某些情况下返回非常奇怪的值。请参阅下面的代码、内核中的注释和程序的输出。代码完整且可编译,因此您可以在VC中创建CUDA项目,并将代码粘贴到主“.cu”文件中 请帮帮我!我做错了什么 我正在使用VS 2013社区和CUDA SDK 6.5+CUDA集成来支持VS 2013C++ CUDA tex1Dfetch()错误行为,c++,c,cuda,textures,fetch,C++,C,Cuda,Textures,Fetch,我对CUDA编程非常陌生,我正面临一个让我发疯的问题。发生什么事: 我有一个非常简单的程序(只是为了学习),其中创建了一个输入图像和一个输出图像16x16。输入图像初始化为0..255之间的值,然后绑定到纹理。CUDA内核只是将输入映像复制到输出映像。输入图像值是通过调用tex1Dfetch()获得的,该函数在某些情况下返回非常奇怪的值。请参阅下面的代码、内核中的注释和程序的输出。代码完整且可编译,因此您可以在VC中创建CUDA项目,并将代码粘贴到主“.cu”文件中 请帮帮我!我做错了什么 我正
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
texture<unsigned char> tex;
cudaError_t testMyKernel(unsigned char * inputImg, unsigned char * outputImg, int width, int height);
__global__ void myKernel(unsigned char *outImg, int width)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int idx = row*width + col;
__shared__ unsigned char input;
__shared__ unsigned char input2;
unsigned char *outPix = outImg + idx;
//It fetches strange value, for example, when the idx==0 then the input is 51.
//But I expect that input==idx (according to the input image initialization).
input = tex1Dfetch(tex, idx);
printf("Fetched for idx=%d: %d\n", idx, input);
*outPix = input;
//Very strange is that when I test the following code then the tex1Dfetch() returns correct values.
if (idx == 0)
{
printf("\nKernel test print:\n");
for (int i = 0; i < 256; i++)
{
input2 = tex1Dfetch(tex, i);
printf("%d,", input2);
}
}
}
int main()
{
const int width = 16;
const int height = 16;
const int count = width * height;
unsigned char imgIn[count];
unsigned char imgOut[count];
for (int i = 0; i < count; i++)
{
imgIn[i] = i;
}
cudaError_t cudaStatus = testMyKernel(imgIn, imgOut, width, height);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "testMyKernel failed!");
return 1;
}
printf("\n\nOutput values:\n");
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
printf("%d,", imgOut[i * width + j]);
}
}
printf("\n");
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
getchar();
return 0;
}
cudaError_t testMyKernel(unsigned char * inputImg, unsigned char * outputImg, int width, int height)
{
unsigned char * dev_in;
unsigned char * dev_out;
size_t size = width * height * sizeof(unsigned char);
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// input data
cudaStatus = cudaMalloc((void**)&dev_in, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_in, inputImg, size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaBindTexture(NULL, tex, dev_in, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaBindTexture failed!");
goto Error;
}
// output data
cudaStatus = cudaMalloc((void**)&dev_out, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
dim3 threadsPerBlock(4, 4);
int blk_x = width / threadsPerBlock.x;
int blk_y = height / threadsPerBlock.y;
dim3 numBlocks(blk_x, blk_y);
// Launch a kernel on the GPU with one thread for each element.
myKernel<<<numBlocks, threadsPerBlock>>>(dev_out, width);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "myKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
goto Error;
}
//copy output image to host
cudaStatus = cudaMemcpy(outputImg, dev_out, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaUnbindTexture(tex);
cudaFree(dev_in);
cudaFree(dev_out);
return cudaStatus;
}
这是内核的一行
input = tex1Dfetch(tex, idx);
正在导致块的线程之间出现争用状况。块中的所有线程都试图将纹理中的值提取到\uuuuuuuuuuuuuuu
变量输入
中,同时导致未定义的行为。您应该以\uuuu shared\uuuu
数组的形式为块的每个线程分配单独的共享内存空间
对于你目前的情况,它可能是这样的
__shared__ unsigned char input[16]; //4 x 4 block size
内核的其余部分应类似于:
int idx_local = threadIdx.y * blockDim.x + threadIdx.x; //local id of thread in a block
input[idx_local] = tex1Dfetch(tex, idx);
printf("Fetched for idx=%d: %d\n", idx, input[idx_local]);
*outPix = input[idx_local];
内核末尾的条件中的代码工作正常,因为由于指定的条件
if(idx==0)
,只有第一个块的第一个线程将串行执行所有处理,而所有其他线程将保持空闲,因此问题将因缺少竞争条件而消失。谢谢!现在可以了。我必须将我的思维转换为并行模式:)
int idx_local = threadIdx.y * blockDim.x + threadIdx.x; //local id of thread in a block
input[idx_local] = tex1Dfetch(tex, idx);
printf("Fetched for idx=%d: %d\n", idx, input[idx_local]);
*outPix = input[idx_local];