使用CUDA C添加两个向量,启动内核

使用CUDA C添加两个向量,启动内核,cuda,Cuda,我有一个关于异构并行编程的作业要做。代码是由教学人员编写的,我们的职责是只填充标有/@的区域。代码应该使用CUDA C添加两个向量。我尝试了下面的解决方案,虽然程序执行时没有错误,但反馈说代码的输出与预期结果不匹配。下面是我添加了我认为需要的代码: // MP 1 #include <wb.h> __global__ void vecAdd(float* in1, float* in2, float* out, int len) { //@@ Insert code to i

我有一个关于异构并行编程的作业要做。代码是由教学人员编写的,我们的职责是只填充标有
/@
的区域。代码应该使用CUDA C添加两个向量。我尝试了下面的解决方案,虽然程序执行时没有错误,但反馈说代码的输出与预期结果不匹配。下面是我添加了我认为需要的代码:

// MP 1
#include    <wb.h>

__global__ void vecAdd(float* in1, float* in2, float* out, int len) {
//@@ Insert code to implement vector addition here
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len ) out[i] = in1[i] + in2[i]; 
}



int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
//@@ i added ######
int size = inputLength*sizeof(float);
//@@ ########
args = wbArg_read(argc, argv);

wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));

wbTime_stop(Generic, "Importing data and creating memory on host");

wbLog(TRACE, "The input length is ", inputLength);

wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here

cudaMalloc((void**)&deviceInput1 , size);
cudaMalloc((void**)&deviceInput2 , size);
cudaMalloc((void**)&deviceOutput , size);
wbTime_stop(GPU, "Allocating GPU memory.");

wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput1, hostInput1, size, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, size, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");

//@@ Initialize the grid and block dimensions here  
dim3 DimGrid((inputLength -1)/256 +1 , 1 , 1);
dim3 DimBlock(256 , 1, 1); 

wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here     
vecAdd<<<DimGrid , DimBlock>>>(deviceInput1 , deviceInput2 , deviceOutput , inputLength); 
cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");

wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, size , cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");

wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
free(deviceInput1);
free(deviceInput2);
free(deviceOutput);

wbTime_stop(GPU, "Freeing GPU Memory");

wbSolution(args, hostOutput, inputLength);

free(hostInput1);
free(hostInput2);
free(hostOutput);

return 0;
}  
//mp1
#包括
__全局无效向量添加(浮点*in1,浮点*in2,浮点*out,整数len){
//@@在此处插入实现向量加法的代码
int i=threadIdx.x+blockDim.x*blockIdx.x;
如果(i
将代码下移到
inputLength
变量具有正确值的位置。更改此项:

//@@ i added ######
int size = inputLength*sizeof(float);
//@@ ########
args = wbArg_read(argc, argv);

wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
为此:

args = wbArg_read(argc, argv);

wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));

//@@ i added ######
int size = inputLength*sizeof(float);
//@@ ########

此外,按照评论中的技巧建议执行。

将代码下移到
inputLength
变量具有正确值的位置。更改此项:

//@@ i added ######
int size = inputLength*sizeof(float);
//@@ ########
args = wbArg_read(argc, argv);

wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
为此:

args = wbArg_read(argc, argv);

wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));

//@@ i added ######
int size = inputLength*sizeof(float);
//@@ ########

此外,按照Talonmes在评论中的建议去做。

谢谢Talonmes和ahmad,他们都帮助我得到了正确的答案,这对我来说很有用,完整的答案(对感兴趣的人来说)如下:

// MP 1
#include    <wb.h>

__global__ void vecAdd(float* in1, float* in2, float* out, int len) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len ) out[i] = in1[i] + in2[i];
 }



int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;


args = wbArg_read(argc, argv);

wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
int size = inputLength*sizeof(float);

wbTime_stop(Generic, "Importing data and creating memory on host");

wbLog(TRACE, "The input length is ", inputLength);

wbTime_start(GPU, "Allocating GPU memory.");


cudaMalloc((void**)&deviceInput1 , size);
cudaMalloc((void**)&deviceInput2 , size);
cudaMalloc((void**)&deviceOutput , size);
wbTime_stop(GPU, "Allocating GPU memory.");

wbTime_start(GPU, "Copying input memory to the GPU.");

cudaMemcpy(deviceInput1, hostInput1, size, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, size, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");


dim3 DimGrid((inputLength -1)/256 +1 , 1 , 1);
dim3 DimBlock(256 , 1, 1); 

wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel 

vecAdd<<<DimGrid , DimBlock>>>(deviceInput1 , deviceInput2 , deviceOutput ,  inputLength);

cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");

wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU 
cudaMemcpy(hostOutput, deviceOutput, size , cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");

wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory 
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);

wbTime_stop(GPU, "Freeing GPU Memory");

wbSolution(args, hostOutput, inputLength);

free(hostInput1);
free(hostInput2);
free(hostOutput);

return 0;
}
//mp1
#包括
__全局无效向量添加(浮点*in1,浮点*in2,浮点*out,整数len){
int i=threadIdx.x+blockDim.x*blockIdx.x;
如果(i
谢谢Talonmes和ahmad,他们都帮助我找到了正确的答案,而完整的答案(对于感兴趣的人)如下所示:

// MP 1
#include    <wb.h>

__global__ void vecAdd(float* in1, float* in2, float* out, int len) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len ) out[i] = in1[i] + in2[i];
 }



int main(int argc, char ** argv) {
wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;


args = wbArg_read(argc, argv);

wbTime_start(Generic, "Importing data and creating memory on host");
hostInput1 = (float *) wbImport(wbArg_getInputFile(args, 0), &inputLength);
hostInput2 = (float *) wbImport(wbArg_getInputFile(args, 1), &inputLength);
hostOutput = (float *) malloc(inputLength * sizeof(float));
int size = inputLength*sizeof(float);

wbTime_stop(Generic, "Importing data and creating memory on host");

wbLog(TRACE, "The input length is ", inputLength);

wbTime_start(GPU, "Allocating GPU memory.");


cudaMalloc((void**)&deviceInput1 , size);
cudaMalloc((void**)&deviceInput2 , size);
cudaMalloc((void**)&deviceOutput , size);
wbTime_stop(GPU, "Allocating GPU memory.");

wbTime_start(GPU, "Copying input memory to the GPU.");

cudaMemcpy(deviceInput1, hostInput1, size, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, size, cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");


dim3 DimGrid((inputLength -1)/256 +1 , 1 , 1);
dim3 DimBlock(256 , 1, 1); 

wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel 

vecAdd<<<DimGrid , DimBlock>>>(deviceInput1 , deviceInput2 , deviceOutput ,  inputLength);

cudaThreadSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");

wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU 
cudaMemcpy(hostOutput, deviceOutput, size , cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");

wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory 
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);

wbTime_stop(GPU, "Freeing GPU Memory");

wbSolution(args, hostOutput, inputLength);

free(hostInput1);
free(hostInput2);
free(hostOutput);

return 0;
}
//mp1
#包括
__全局无效向量添加(浮点*in1,浮点*in2,浮点*out,整数len){
int i=threadIdx.x+blockDim.x*blockIdx.x;
如果(i