CUDA内核启动参数解释正确吗?

CUDA内核启动参数解释正确吗?,c,cuda,C,Cuda,在这里,我尝试使用一些伪代码自我解释CUDA启动参数模型(或执行配置模型),但我不知道是否有一些大错误,因此希望有人帮助检查它,并给我一些建议。谢谢 这是: /* normally, we write kernel function like this. note, __global__ means this function will be called from host codes, and executed on device. and a __global__ functi

在这里,我尝试使用一些伪代码自我解释CUDA启动参数模型(或执行配置模型),但我不知道是否有一些大错误,因此希望有人帮助检查它,并给我一些建议。谢谢

这是:

/*
  normally, we write kernel function like this.
  note, __global__ means this function will be called from host codes,
  and executed on device. and a __global__ function could only return void.
  if there's any parameter passed into __global__ function, it should be stored
  in shared memory on device. so, kernel function is so different from the *normal*
  C/C++ functions. if I was the CUDA authore, I should make the kernel function more
  different  from a normal C function.
*/

__global__ void
kernel(float *arr_on_device, int n) {
        int idx = blockIdx.x * blockDIm.x + threadIdx.x;
        if (idx < n) {
                arr_on_device[idx] = arr_on_device[idx] * arr_on_device[idx];
        }
}

/*
  after this definition, we could call this kernel function in our normal C/C++ codes !!
  do you feel something wired ? un-consistant ?
  normally, when I write C codes, I will think a lot about the execution process down to
  the metal in my mind, and this one...it's like some fragile codes. break the sequential
  thinking process in my mind.
  in order to make things normal, I found a way to explain: I expand the *__global__ * function
  to some pseudo codes:
*/

#define __foreach(var, start, end) for (var = start, var < end; ++var)

__device__ int
__indexing() {
        const int blockId = blockIdx.x * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;

        return 
                blockId * (blockDim.x * blockDim.y * blockDim.z) +
                threadIdx.z * (blockDim.x * blockDim.y) +
                threadIdx.x;
}

global_config =:
        {
                /*
                  global configuration.
                  note the default values are all 1, so in the kernel codes,
                  we could just ignore those dimensions.
                 */ 
                gridDim.x = gridDim.y = gridDim.z = 1;
                blockDim.x = blockDim.y = blockDim.z = 1;
        };

kernel =:
        {
                /*
                  I thought CUDA did some bad evil-detail-covering things here.
                  it's said that CUDA C is an extension of C, but in my mind,
                  CUDA C is more like C++, and the *<<<>>>* part is too tricky.
                  for example:
                  kernel<<<10, 32>>>(); means kernel will execute in 10 blocks each have 32 threads.

                  dim3 dimG(10, 1, 1);
                  dim3 dimB(32, 1, 1);
                  kernel<<<dimG, dimB>>>(); this is exactly the same thing with above.

                  it's not C style, and C++ style ? at first, I thought this could be done by
                  C++'s constructor stuff, but I checked structure *dim3*, there's no proper
                  constructor for this. this just brroke the semantics of both C and C++. I thought
                  force user to use *kernel<<<dim3, dim3>>>* would be better. So I'd like to keep
                  this rule in my future codes.
                */

                gridDim  = dimG;
                blockDim = dimB;

                __foreach(blockIdx.z,  0, gridDim.z)
                __foreach(blockIdx.y,  0, gridDim.y)
                __foreach(blockIdx.x,  0, gridDim.x)
                __foreach(threadIdx.z, 0, blockDim.z)
                __foreach(threadIdx.y, 0, blockDim.y)
                __foreach(threadIdx.x, 0, blockDim.x)
                {
                        const int idx = __indexing();        
                        if (idx < n) {
                                arr_on_device[idx] = arr_on_device[idx] * arr_on_device[idx];
                        }
                }
        };

/*
  so, for me, gridDim & blockDim is like some boundaries.
  e.g. gridDim.x is the upper bound of blockIdx.x, this is not that obvious for people like me.
 */

/* the declaration of dim3 from vector_types.h of CUDA/include */
struct __device_builtin__ dim3
{
        unsigned int x, y, z;
#if defined(__cplusplus)
        __host__ __device__ dim3(unsigned int vx = 1, unsigned int vy = 1, unsigned int vz = 1) : x(vx), y(vy), z(vz) {}
        __host__ __device__ dim3(uint3 v) : x(v.x), y(v.y), z(v.z) {}
        __host__ __device__ operator uint3(void) { uint3 t; t.x = x; t.y = y; t.z = z; return t; }
#endif /* __cplusplus */
};

typedef __device_builtin__ struct dim3 dim3;
/*
通常,我们这样编写内核函数。
注意,_uglobal_;表示将从主机代码调用此函数,
并在设备上执行。而一个全局函数只能返回void。
如果有任何参数传递到_全局_函数中,则应将其存储
在设备上的共享内存中。所以,核函数和普通函数有很大的不同*
C/C++函数。如果我是CUDA作者,我应该使内核函数更
与普通的C函数不同。
*/
__全局无效
内核(浮点*设备上的arr\u,int n){
int idx=blockIdx.x*blockDIm.x+threadIdx.x;
if(idx
通过使用
,您将在GPU中启动多个线程。这些线程被分组成块并形成一个大网格。所有线程都将执行调用的内核函数代码

在内核函数中,内置变量如
threadIdx
blockIdx
使代码能够知道它运行哪个线程,并执行计划的部分工作

编辑 基本上,
简化了启动内核的配置过程。如果不使用它,一次内核启动可能需要调用4~5个API,就像OpenCL方法一样,它只使用C99语法


事实上,您可以检查CUDA驱动程序API。它可以提供所有这些API,因此您无需使用

基本上,GPU分为单独的“设备”GPU(例如GeForce 690有2个)->多个SM(流式多处理器)->多个CUDA内核。据我所知,块或网格的维数只是一个与硬件无关的逻辑赋值,但块的总大小(x*y*z)非常重要

块中的线程必须位于同一个SM上,才能使用共享内存和同步功能。因此,SM中包含的线程数不能超过CUDA内核数

如果我们有一个简单的场景,其中我们有16个SMs,每个都有32个CUDA内核,我们有31x1x1块大小和20x1x1x1网格大小,那么我们将失去至少1/32的进程
cuFuncSetCacheConfig
cuFuncSetSharedMemConfig
cuLaunchKernel
cuFuncSetBlockShape()
cuFuncSetSharedSize()
cuParamSet{Size,i,fv}()
cuLaunch
cuLaunchGrid
CUresult CUDAAPI cuLaunchKernel(CUfunction f,
    unsigned int gridDimX,
    unsigned int gridDimY,
    unsigned int gridDimZ,
    unsigned int blockDimX,
    unsigned int blockDimY,
    unsigned int blockDimZ,
    unsigned int sharedMemBytes,
    CUstream hStream,
    void **kernelParams,
    void **extra);
__global__ void kernel(float* buf, int j)
{
    // ...
}

kernel<<<blocks,threads,0,myStream>>>(d_buf,j);
void __device_stub__Z6kernelPfi(float *__par0, int __par1){__cudaSetupArgSimple(__par0, 0U);__cudaSetupArgSimple(__par1, 4U);__cudaLaunch(((char *)((void ( *)(float *, int))kernel)));}