Matrix 简单cuda矩阵乘法中的nan错误 我最近开始编写一些简单的cuda代码。请看下面我的代码。 #包括“矩阵乘法.h” void MatMul(常数矩阵A、常数矩阵B、矩阵C) { 矩阵d_A; d_A.宽度=A.宽度; d_A.高度=A.高度; 尺寸=A.宽度*A.高度*尺寸(浮动); cudaError\u t err=cudamaloc(&d_A.元素,大小); printf(“CUDA malloc A:%s\n”,cudaGetErrorString(err)); err=cudaMemcpy(d_A.元素,A.元素,大小,cudamemcpyhostodevice); printf(“复制到设备:%s\n”,cudaGetErrorString(err)); 矩阵d_B; d_B.宽度=B.宽度; d_B.高度=B.高度; 尺寸=B.宽度*B.高度*sizeof(浮动); err=cudaMalloc(&d_B.元素、大小); printf(“CUDA malloc B:%s\n”,cudaGetErrorString(err)); err=cudaMemcpy(d_B.元素,B.元素,大小,cudamemcpyhostodevice); printf(“将B复制到设备:%s\n”,cudaGetErrorString(err)); 矩阵d_C; d_C.宽度=C.宽度; d_C.高度=C.高度; 尺寸=C.宽度*C.高度*尺寸(浮动); err=cudaMalloc(&d_C.元素、大小); printf(“CUDA malloc C:%s\n”,cudaGetErrorString(err)); dim3 dimBlock(块大小,块大小); dim3 dimGrid((B.宽度+dimBlock.x-1)/dimBlock.x,(A.高度)+ dimBlock.y-1)/dimBlock.y); MatMulKernel(d_A,d_B,d_C); err=cudaThreadSynchronize(); printf(“运行内核:%s\n”,cudaGetErrorString(err)); 错误= cudaMemcpy(C.元素、d_C.元素、大小、cudaMemcpyDeviceToHost); printf(“从设备%s\n复制C”,cudaGetErrorString(err)); cudaFree(d_A.元素); cudaFree(d_B.元素); cudaFree(d_C.元素); } __全局无效MatMulKernel(矩阵A、矩阵B、矩阵C) { 浮动Cvalue=0.0; int row=blockIdx.y*blockDim.y+threadIdx.y; int col=blockIdx.x*blockDim.x+threadIdx.x; 如果(行>A.高度|列>B.宽度)返回; 对于(int e=0;e

Matrix 简单cuda矩阵乘法中的nan错误 我最近开始编写一些简单的cuda代码。请看下面我的代码。 #包括“矩阵乘法.h” void MatMul(常数矩阵A、常数矩阵B、矩阵C) { 矩阵d_A; d_A.宽度=A.宽度; d_A.高度=A.高度; 尺寸=A.宽度*A.高度*尺寸(浮动); cudaError\u t err=cudamaloc(&d_A.元素,大小); printf(“CUDA malloc A:%s\n”,cudaGetErrorString(err)); err=cudaMemcpy(d_A.元素,A.元素,大小,cudamemcpyhostodevice); printf(“复制到设备:%s\n”,cudaGetErrorString(err)); 矩阵d_B; d_B.宽度=B.宽度; d_B.高度=B.高度; 尺寸=B.宽度*B.高度*sizeof(浮动); err=cudaMalloc(&d_B.元素、大小); printf(“CUDA malloc B:%s\n”,cudaGetErrorString(err)); err=cudaMemcpy(d_B.元素,B.元素,大小,cudamemcpyhostodevice); printf(“将B复制到设备:%s\n”,cudaGetErrorString(err)); 矩阵d_C; d_C.宽度=C.宽度; d_C.高度=C.高度; 尺寸=C.宽度*C.高度*尺寸(浮动); err=cudaMalloc(&d_C.元素、大小); printf(“CUDA malloc C:%s\n”,cudaGetErrorString(err)); dim3 dimBlock(块大小,块大小); dim3 dimGrid((B.宽度+dimBlock.x-1)/dimBlock.x,(A.高度)+ dimBlock.y-1)/dimBlock.y); MatMulKernel(d_A,d_B,d_C); err=cudaThreadSynchronize(); printf(“运行内核:%s\n”,cudaGetErrorString(err)); 错误= cudaMemcpy(C.元素、d_C.元素、大小、cudaMemcpyDeviceToHost); printf(“从设备%s\n复制C”,cudaGetErrorString(err)); cudaFree(d_A.元素); cudaFree(d_B.元素); cudaFree(d_C.元素); } __全局无效MatMulKernel(矩阵A、矩阵B、矩阵C) { 浮动Cvalue=0.0; int row=blockIdx.y*blockDim.y+threadIdx.y; int col=blockIdx.x*blockDim.x+threadIdx.x; 如果(行>A.高度|列>B.宽度)返回; 对于(int e=0;e,matrix,cuda,nan,multiplication,Matrix,Cuda,Nan,Multiplication,结果如下所示(5乘5矩阵),它有时在某些元素中包含nan或无意义值 211200 南1 2 1 10211 南0 2 1 0 311421 我想不出原因。请帮帮我。非常感谢 内核内部的绑定检查应该是这样的: I started to write some simple cuda code recently. Please see my code below. #include "matrix_multiplication.h" void MatMul(const Matrix A, con

结果如下所示(5乘5矩阵),它有时在某些元素中包含nan或无意义值

211200

南1 2 1

10211

南0 2 1 0

311421


我想不出原因。请帮帮我。非常感谢

内核内部的绑定检查应该是这样的:

I started to write some simple cuda code recently. Please see my code below. 

#include "matrix_multiplication.h"

void MatMul(const Matrix A, const Matrix B, Matrix C)
{
  Matrix d_A;
  d_A.width = A.width;
  d_A.height = A.height;
  size_t size = A.width*A.height*sizeof(float);
  cudaError_t err = cudaMalloc(&d_A.elements,size);
  printf("CUDA malloc A: %s\n",cudaGetErrorString(err));
  err = cudaMemcpy(d_A.elements,A.elements,size,cudaMemcpyHostToDevice);
  printf("Copy A to device: %s\n",cudaGetErrorString(err));

  Matrix d_B;
  d_B.width = B.width;
  d_B.height = B.height;
  size = B.width*B.height*sizeof(float);
  err = cudaMalloc(&d_B.elements,size);
  printf("CUDA malloc B: %s\n",cudaGetErrorString(err));
  err = cudaMemcpy(d_B.elements,B.elements,size,cudaMemcpyHostToDevice);
  printf("Copy B to device: %s\n",cudaGetErrorString(err));

  Matrix d_C;
  d_C.width = C.width;
  d_C.height = C.height;
  size = C.width*C.height*sizeof(float);
  err = cudaMalloc(&d_C.elements,size);
  printf("CUDA malloc C: %s\n",cudaGetErrorString(err));

  dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
  dim3 dimGrid((B.width + dimBlock.x -1)/dimBlock.x,(A.height +
        dimBlock.y -1)/dimBlock.y);
  MatMulKernel<<<dimGrid,dimBlock>>>(d_A,d_B,d_C);
  err = cudaThreadSynchronize();
  printf("Run kernel: %s\n",cudaGetErrorString(err));

  err =
    cudaMemcpy(C.elements,d_C.elements,size,cudaMemcpyDeviceToHost);
  printf("Copy C off of device: %s\n",cudaGetErrorString(err));

  cudaFree(d_A.elements);
  cudaFree(d_B.elements);
  cudaFree(d_C.elements);
}

__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
  float Cvalue = 0.0;
  int row = blockIdx.y * blockDim.y + threadIdx.y;
  int col = blockIdx.x * blockDim.x + threadIdx.x;

  if(row > A.height || col > B.width) return;

  for(int e = 0; e < A.width; ++e)
  {
    Cvalue +=(A.elements[row*A.width + e]) * (B.elements[e*B.width +
        col]);
  }
  C.elements[row*C.width + col] = Cvalue;
}
如果没有=符号,您将在计算中包含(宽度+高度-1)个额外线程,这将执行越界内存访问并导致未定义的行为

if(row >= A.height || col >= B.width) return;