Memory 需要CUDA设备内存事务

Memory 需要CUDA设备内存事务,memory,cuda,gpu,shared-memory,Memory,Cuda,Gpu,Shared Memory,我编写了小型cuda代码来理解全局内存到共享内存的传输事务。代码如下: #include <iostream> using namespace std; __global__ void readUChar4(uchar4* c, uchar4* o){ extern __shared__ uchar4 gc[]; int tid = threadIdx.x; gc[tid] = c[tid]; o[tid] = gc[tid]; } int main(){ s

我编写了小型cuda代码来理解全局内存到共享内存的传输事务。代码如下:

#include <iostream>
using namespace std;

__global__ void readUChar4(uchar4* c, uchar4* o){
  extern __shared__ uchar4 gc[];
  int tid = threadIdx.x;
  gc[tid] = c[tid];
  o[tid] = gc[tid];
}

int main(){
  string a = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
  uchar4* c;
  cudaError_t e1 = cudaMalloc((void**)&c, 128*sizeof(uchar4));
  if(e1==cudaSuccess){
    uchar4* o;
    cudaError_t e11 = cudaMalloc((void**)&o, 128*sizeof(uchar4));

    if(e11 == cudaSuccess){
      cudaError_t e2 = cudaMemcpy(c, a.c_str(), 128*sizeof(uchar4), cudaMemcpyHostToDevice);
      if(e2 == cudaSuccess){
        readUChar4<<<1,128, 128*sizeof(uchar4)>>>(c, o);
        uchar4* oFromGPU = (uchar4*)malloc(128*sizeof(uchar4));
        cudaError_t e22 = cudaMemcpy(oFromGPU, o, 128*sizeof(uchar4), cudaMemcpyDeviceToHost);
        if(e22 == cudaSuccess){
          for(int i =0; i < 128; i++){
            cout << oFromGPU[i].x << " ";
            cout << oFromGPU[i].y << " ";
            cout << oFromGPU[i].z << " ";
            cout << oFromGPU[i].w << " " << endl;

          }
        }
        else{
          cout << "Failed to copy from GPU" << endl;
        }
      }
      else{
        cout << "Failed to copy" << endl;
      }
    }
    else{
      cout << "Failed to allocate output memory" << endl;
    }
  }
  else{
    cout << "Failed to allocate memory" << endl;
  }
  return 0;
}
#包括
使用名称空间std;
__全局无效readUChar4(uchar4*c,uchar4*o){
外部资源共享资源4 gc[];
int tid=threadIdx.x;
gc[tid]=c[tid];
o[tid]=gc[tid];
}
int main(){
字符串a=“aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa;
uchar4*c;
cudaError_t e1=cudamaloc((void**)和c,128*sizeof(uchar4));
如果(e1==cudaSuccess){
uchar4*o;
cudaError_t e11=cudamaloc((无效**)和o,128*sizeof(uchar4));
如果(e11==cudaSuccess){
cudaError_t e2=cudaMemcpy(c,a.c_str(),128*sizeof(uchar4),cudaMemcpyHostToDevice);
如果(e2==cudaSuccess){
readUChar4(c,o);
uchar4*oFromGPU=(uchar4*)malloc(128*sizeof(uchar4));
cudaError_t e22=cudaMemcpy(对于romgpu,o,128*sizeof(uchar4),cudamemcpydevicetoost);
如果(e22==cudaSuccess){
对于(int i=0;i<128;i++){
cout在您提供的代码中(此处重复),编译器将完全删除共享内存存储和加载,因为它们不会对代码执行任何必要或有益的操作

 __global__ void readUChar4(uchar4* c, uchar4* o){
  extern __shared__ uchar4 gc[];
  int tid = threadIdx.x;
  gc[tid] = c[tid];
  o[tid] = gc[tid];
}
假设您使用共享内存做了一些事情,因此它没有被消除,那么:

  • 在这段代码中,从全局内存加载和存储到全局内存将需要每个扭曲(假设Fermi或更高版本的GPU)一个事务,因为每个线程只有32位(
    uchar4
    =4*8位)(每个扭曲总共128字节)。
    cudamaloc
    连续分配内存
  • 1.的答案也适用于商店,是的
  • 此代码中没有组冲突。扭曲中的线程始终是连续的,第一个线程是扭曲大小的倍数。因此,线程32和64将永远不会在同一个扭曲中。而且,由于加载和存储的是32位数据类型,并且组宽为32位,因此不存在冲突

  • 编写这样的微基准时要小心;编译器可能会优化写入共享内存。谢谢你的回答。对于问题1,是每个线程一个事务,还是每个扭曲一个事务?因为每个线程访问4个字节,一个扭曲将访问128个字节。正如我所理解的Cuda C程序g指南,每个warp事务可以是128字节。由于cudaMalloc连续分配内存,在1 128字节事务中,整个warp应该能够读取设备内存。请建议我是否按照正确的思路思考。感谢更正。如果cudaMalloc连续分配内存,那么fragmen的情况会发生什么ted内存(即,如果连续的物理内存块不存在怎么办)?