cuda编程中float和double类型的区别是什么?

cuda编程中float和double类型的区别是什么?,cuda,Cuda,我是cuda编程新手。在我的程序(使用共享内存的矩阵乘法)中,我定义了block_size=20,当矩阵为1200*1200时,程序使用双元素,但不使用浮点元素(当元素为浮点时,它使用840*840矩阵)。我的问题是为什么会发生这种情况,尽管我们知道float类型小于double // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) #include <

我是cuda编程新手。在我的程序(使用共享内存的矩阵乘法)中,我定义了block_size=20,当矩阵为1200*1200时,程序使用双元素,但不使用浮点元素(当元素为浮点时,它使用840*840矩阵)。我的问题是为什么会发生这种情况,尽管我们知道float类型小于double

// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
#include <stdio.h>
#define BLOCK_SIZE 20
typedef struct {
int width;
int height;
int stride; 
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;

Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row+ BLOCK_SIZE * col];
return Asub;
}
// Thread block size
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{

// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
siz e_t size = A.width * A.height * sizeof(float);
cudaMalloc((void **)&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B; 
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void **)&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc((void **)&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
//dim3 dimBlock(C.height, C.width);
//dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
dim3 dimGrid((B.width+dimBlock.x-1) / dimBlock.x, (A.height+dimBlock.y-1) /dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
 __syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
//////////////////////////////////////////////////////////
/// print_matrix function ///////////////////////////
////////////////////////////////////////////////////////
void print_matrix(float *c,int row,int col){
for (int i = 0; i < row; ++i){
for (int j = 0; j < col; ++j)
printf("%f ",c[col*i +j]);
printf("\n\n");
}
}
//////////////////////////////////////////////////////////
/// random_init function ///////////////////////////
////////////////////////////////////////////////////////
void random_init(float *a,int size){
for(int i=0;i<size;i++)
a[i]=rand()%10;
}
////////////////////////////////////////////////////////
int main(void){

//////////////////////////////////////////////////////\|/
cudaEvent_t start,stop;
///////////////////////////////////////////////////////|\

Matrix A,B,C;
A.width=1200;
A.height=1200;/////
B.width=1200;/////
B.height=1200;
C.width=B.width;
C.height=A.height;

size_t size = A.width * A.height * sizeof(float);
A.elements = (float *)malloc(size);
//random_init(A.elements,A.width * A.height );
size = B.width * B.height * sizeof(float);
B.elements= (float *)malloc(size);
//random_init(B.elements,B.width * B.height);
size = C.width * C.height * sizeof(float);
C.elements= (float *)malloc(size);
for(int i=0;i<A.width*A.height;i++)
A.elements[i]=1;
for(int i=0;i<B.width*B.height;i++)
B.elements[i]=1;
printf("matrix A(%d,%d) & matrix B(%d,%d) & matrix   C(%d,%d)\n",A.width,A.height,B.width,
B.height,C.width,C.height);
//////////////////////////////////////////////////////\|/
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
///////////////////////////////////////////////////////|\

MatMul(A,B,C);
//////////////////////////////////////////////////////\|/
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Time to genreat : %3.5f ms\n",elapsedTime);
///////////////////////////////////////////////////////|\
printf("\nC\n");
//print_matrix(C.elements,C.height,C.width);


printf("C[%d]=%f\n",0,C.elements[0]);
printf("C[%d]=%f\n",C.width -1,C.elements[C.width-1]);
printf("C[%d]=%f\n",(C.width * C.height)-1,C.elements[(C.width * C.height)-1]);

getchar();
return(0);
}
//矩阵按行主顺序存储:
//M(行,列)=*(M.elements+行*M.stride+col)
#包括
#定义块大小20
类型定义结构{
整数宽度;
内部高度;
步幅;
浮动*元素;
}基质;
//得到一个矩阵元素
__设备\uuuuu浮点GetElement(常量矩阵A,整数行,整数列)
{
返回A.elements[行*A.stride+col];
}
//设置一个矩阵元素
__设备\无效集合元素(矩阵A,整数行,整数列,
浮动值)
{
A.元素[行*A.步长+列]=值;
}
//获取A的块大小块大小子矩阵Asub,即
//将列子矩阵定位到右侧,将行子矩阵定位到下方
//从左上角
__设备矩阵GetSubMatrix(矩阵A,int行,int列)
{
矩阵Asub;
Asub.width=块大小;
Asub.height=块大小;
Asub.stride=A.stride;
Asub.elements=&A.elements[A.stride*块大小*行+块大小*列];
返回Asub;
}
//螺纹块尺寸
//矩阵乘法核的前向声明
__全局无效MatMulKernel(常数矩阵,常数矩阵,矩阵);
//矩阵乘法-主机代码
//矩阵尺寸假定为块大小的倍数
void MatMul(常数矩阵A、常数矩阵B、矩阵C)
{
//将A和B加载到设备内存
矩阵d_A;
d_A.宽度=d_A.步幅=A.宽度;d_A.高度=A.高度;
尺寸=A.宽度*A.高度*尺寸(浮动);
Cudamaloc((空心**)和d_A.元件、尺寸);
cudaMemcpy(d_A.元素,A.元素,大小,
cudamemcpyhostodevice);
矩阵d_B;
宽度=步幅=宽度;高度=高度;
尺寸=B.宽度*B.高度*sizeof(浮动);
Cudamaloc((空心**)和d_B.元件、尺寸);
cudaMemcpy(d_B.元素,B.元素,大小,
cudamemcpyhostodevice);
//在设备内存中分配C
矩阵d_C;
d_C.宽度=d_C.步幅=C.宽度;d_C.高度=C.高度;
尺寸=C.宽度*C.高度*尺寸(浮动);
Cudamaloc((空心**)和d_C.元件、尺寸);
//调用内核
dim3 dimBlock(块大小,块大小);
//dim3 dimBlock(C.高度,C.宽度);
//dim3 dimGrid(B.宽度/dimBlock.x,A.高度/dimBlock.y);
dim3 dimGrid((B.宽度+dimBlock.x-1)/dimBlock.x,(A.高度+dimBlock.y-1)/dimBlock.y);
MatMulKernel(d_A,d_B,d_C);
//从设备内存中读取C
cudaMemcpy(C.元件、d_C.元件、尺寸、,
cudaMemcpyDeviceToHost);
//可用设备内存
cudaFree(d_A.元素);
cudaFree(d_B.元素);
cudaFree(d_C.元素);
}
//MatMul()调用的矩阵乘法核
__全局无效MatMulKernel(矩阵A、矩阵B、矩阵C)
{
//阻止行和列
int blockRow=blockIdx.y;
int blockCol=blockIdx.x;
//每个线程块计算C的一个子矩阵Csub
矩阵Csub=GetSubMatrix(C,blockRow,blockCol);
//每个线程计算Csub的一个元素
//通过将结果累加到C值中
浮点值=0;
//在Csub中执行线程行和列
int row=threadIdx.y;
int col=threadIdx.x;
//循环A和B的所有子矩阵,这些子矩阵是
//需要计算Csub
//将每对子矩阵相乘
//并累积结果
对于(int m=0;m<(A.宽度/块大小);+m){
//求A的子矩阵Asub
矩阵Asub=GetSubMatrix(A,blockRow,m);
//得到B的子矩阵Bsub
矩阵Bsub=GetSubMatrix(B,m,blockCol);
//分别用于存储Asub和Bsub的共享内存
__共享浮点数为[块大小][块大小];
__共享浮点数Bs[块大小][块大小];
//将Asub和Bsub从设备内存加载到共享内存
//每个线程加载每个子矩阵的一个元素
As[row][col]=GetElement(Asub,row,col);
Bs[row][col]=GetElement(Bsub,row,col);
//同步以确保子矩阵已加载
//在开始计算之前
__同步线程();
//将Asub和Bsub相乘
对于(int e=0;e
“”显示驱动程序已停止响应并已恢复“”

表示您遇到了问题

在windows下,执行时间过长的内核将导致windows显示看门狗计时器重置显示设备,这将导致CUDA代码执行终止。执行时间超过2秒的内核可能会遇到这种情况


如果您在“windows TDR”上搜索,您将找到其他描述和解决此问题的可能方法。您还可以调查更改后代码执行时间过长的原因。

可能存在“sizeof(float)vs sizeof(cl_float)”的问题一个可能是64位,而cl一个的长度必须是32位。特别是如果您的cpu和操作系统以及编译器是64位的。我的cpu和操作系统都是64位的,使用nvidia geforce gt555M和cuda4.2,我的编译器是visual studio 2010,但它不识别cl_浮点。我应该包括任何特殊的库吗?我编译并运行了您的coda,我在