cuda上的矢量步加法 我尝试在CUDA C++代码中运行向量步加法函数,但是对于大小为5000000的大型浮点数组,它运行速度比CPU版本慢。下面是我正在谈论的相关CUDA和cpu代码: #define THREADS_PER_BLOCK 1024 typedef float real; __global__ void vectorStepAddKernel2(real*x, real*y, real*z, real alpha, real beta, int size, int xstep, int ystep, int zstep) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) { x[i*xstep] = alpha* y[i*ystep] + beta*z[i*zstep]; } } cudaError_t vectorStepAdd2(real *x, real*y, real* z, real alpha, real beta, int size, int xstep, int ystep, int zstep) { cudaError_t cudaStatus; int threadsPerBlock = THREADS_PER_BLOCK; int blocksPerGrid = (size + threadsPerBlock -1)/threadsPerBlock; vectorStepAddKernel2<<<blocksPerGrid, threadsPerBlock>>>(x, y, z, alpha, beta, size, xstep, ystep, zstep); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching vectorStepAddKernel!\n", cudaStatus); exit(1); } return cudaStatus; } //CPU function: void vectorStepAdd3(real *x, real*y, real* z, real alpha, real beta, int size, int xstep, int ystep, int zstep) { for(int i=0;i<size;i++) { x[i*xstep] = alpha* y[i*ystep] + beta*z[i*zstep]; } } #为每个块1024定义线程 typedef浮点实数; __全局无效向量StepAddKernel2(实*x,实*y,实*z,实alpha,实beta,int size,int xstep,int ystep,int zstep) { int i=blockDim.x*blockIdx.x+threadIdx.x; 如果(i

cuda上的矢量步加法 我尝试在CUDA C++代码中运行向量步加法函数,但是对于大小为5000000的大型浮点数组,它运行速度比CPU版本慢。下面是我正在谈论的相关CUDA和cpu代码: #define THREADS_PER_BLOCK 1024 typedef float real; __global__ void vectorStepAddKernel2(real*x, real*y, real*z, real alpha, real beta, int size, int xstep, int ystep, int zstep) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) { x[i*xstep] = alpha* y[i*ystep] + beta*z[i*zstep]; } } cudaError_t vectorStepAdd2(real *x, real*y, real* z, real alpha, real beta, int size, int xstep, int ystep, int zstep) { cudaError_t cudaStatus; int threadsPerBlock = THREADS_PER_BLOCK; int blocksPerGrid = (size + threadsPerBlock -1)/threadsPerBlock; vectorStepAddKernel2<<<blocksPerGrid, threadsPerBlock>>>(x, y, z, alpha, beta, size, xstep, ystep, zstep); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching vectorStepAddKernel!\n", cudaStatus); exit(1); } return cudaStatus; } //CPU function: void vectorStepAdd3(real *x, real*y, real* z, real alpha, real beta, int size, int xstep, int ystep, int zstep) { for(int i=0;i<size;i++) { x[i*xstep] = alpha* y[i*ystep] + beta*z[i*zstep]; } } #为每个块1024定义线程 typedef浮点实数; __全局无效向量StepAddKernel2(实*x,实*y,实*z,实alpha,实beta,int size,int xstep,int ystep,int zstep) { int i=blockDim.x*blockIdx.x+threadIdx.x; 如果(i,c++,cuda,parallel-processing,gpu,gpgpu,C++,Cuda,Parallel Processing,Gpu,Gpgpu,回答您的问题“我能做些什么来加速GPU代码?” 首先,让我先说一句,建议的操作X=alpha*Y+beta*Z每字节数据传输所需的计算强度不高。因此,我无法在这段特定代码上节省CPU时间。不过,介绍两种加速这段代码的方法可能是有益的: 使用内存进行数据传输操作。这使GPU版本的数据传输时间减少了约2倍,而GPU版本的数据传输时间占GPU版本总执行时间的主导地位 使用@njuffa建议的跨步复制技术。结果是2倍:我们可以将数据传输量减少到计算所需的量,然后我们可以重新编写内核,以便按照注释中的建议

回答您的问题“我能做些什么来加速GPU代码?”

首先,让我先说一句,建议的操作
X=alpha*Y+beta*Z
每字节数据传输所需的计算强度不高。因此,我无法在这段特定代码上节省CPU时间。不过,介绍两种加速这段代码的方法可能是有益的:

  • 使用内存进行数据传输操作。这使GPU版本的数据传输时间减少了约2倍,而GPU版本的数据传输时间占GPU版本总执行时间的主导地位

  • 使用@njuffa建议的跨步复制技术。结果是2倍:我们可以将数据传输量减少到计算所需的量,然后我们可以重新编写内核,以便按照注释中的建议(同样由njuffa)连续操作数据。这使得数据传输时间进一步提高了3倍,内核计算时间提高了10倍

  • 此代码提供了这些操作的示例:

    #include <stdio.h>
    #include <stdlib.h>
    
    
    #define THREADS_PER_BLOCK 1024
    #define DSIZE 5000000
    #define WSIZE 50000
    #define XSTEP 47
    #define YSTEP 43
    #define ZSTEP 41
    #define TOL 0.00001f
    
    
    #define cudaCheckErrors(msg) \
        do { \
            cudaError_t __err = cudaGetLastError(); \
            if (__err != cudaSuccess) { \
                fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                    msg, cudaGetErrorString(__err), \
                    __FILE__, __LINE__); \
                fprintf(stderr, "*** FAILED - ABORTING\n"); \
                exit(1); \
            } \
        } while (0)
    
    typedef float real;
    
    __global__ void vectorStepAddKernel2(real *x, real *y, real *z, real alpha, real beta, int size, int xstep, int ystep, int zstep)
    {
        int i = blockDim.x * blockIdx.x + threadIdx.x;
        if (i < size)
        {
            x[i*xstep] = alpha* y[i*ystep] + beta*z[i*zstep];
        }
    }
    
    __global__ void vectorStepAddKernel2i(real *x, real *y, real *z, real alpha, real beta, int size)
    {
        int i = blockDim.x * blockIdx.x + threadIdx.x;
        if (i < size)
        {
            x[i] = alpha* y[i] + beta*z[i];
        }
    }
    
    void vectorStepAdd2(real *x, real *y, real *z, real alpha, real beta, int size, int xstep, int ystep, int zstep)
    {
    
        int threadsPerBlock = THREADS_PER_BLOCK;
        int blocksPerGrid = (size + threadsPerBlock -1)/threadsPerBlock;
        vectorStepAddKernel2<<<blocksPerGrid, threadsPerBlock>>>(x, y, z, alpha, beta, size, xstep, ystep, zstep);
        cudaDeviceSynchronize();
        cudaCheckErrors("kernel2 fail");
    }
    
    
    void vectorStepAdd2i(real *x, real *y, real *z, real alpha, real beta, int size)
    {
    
        int threadsPerBlock = THREADS_PER_BLOCK;
        int blocksPerGrid = (size + threadsPerBlock -1)/threadsPerBlock;
        vectorStepAddKernel2i<<<blocksPerGrid, threadsPerBlock>>>(x, y, z, alpha, beta, size);
        cudaDeviceSynchronize();
        cudaCheckErrors("kernel3 fail");
    }
    
    //CPU function:
    
    void vectorStepAdd3(real *x, real*y, real* z, real alpha, real beta, int size, int xstep, int ystep, int zstep)
    {
        for(int i=0;i<size;i++)
        {
            x[i*xstep] = alpha* y[i*ystep] + beta*z[i*zstep];
        }
    }
    
    int main() {
    
      real *h_x, *h_y, *h_z, *c_x, *h_x1;
      real *d_x, *d_y, *d_z, *d_x1, *d_y1, *d_z1;
    
      int dsize = DSIZE;
      int wsize = WSIZE;
      int xstep = XSTEP;
      int ystep = YSTEP;
      int zstep = ZSTEP;
      real alpha = 0.5f;
      real beta = 0.5f;
      float et;
    
    /*
      h_x = (real *)malloc(dsize*sizeof(real));
      if (h_x == 0){printf("malloc1 fail\n"); return 1;}
      h_y = (real *)malloc(dsize*sizeof(real));
      if (h_y == 0){printf("malloc2 fail\n"); return 1;}
      h_z = (real *)malloc(dsize*sizeof(real));
      if (h_z == 0){printf("malloc3 fail\n"); return 1;}
      c_x = (real *)malloc(dsize*sizeof(real));
      if (c_x == 0){printf("malloc4 fail\n"); return 1;}
      h_x1 = (real *)malloc(dsize*sizeof(real));
      if (h_x1 == 0){printf("malloc1 fail\n"); return 1;}
    */
    
      cudaHostAlloc((void **)&h_x, dsize*sizeof(real), cudaHostAllocDefault);
      cudaCheckErrors("cuda Host Alloc 1 fail");
      cudaHostAlloc((void **)&h_y, dsize*sizeof(real), cudaHostAllocDefault);
      cudaCheckErrors("cuda Host Alloc 2 fail");
      cudaHostAlloc((void **)&h_z, dsize*sizeof(real), cudaHostAllocDefault);
      cudaCheckErrors("cuda Host Alloc 3 fail");
      cudaHostAlloc((void **)&c_x, dsize*sizeof(real), cudaHostAllocDefault);
      cudaCheckErrors("cuda Host Alloc 4 fail");
      cudaHostAlloc((void **)&h_x1, dsize*sizeof(real), cudaHostAllocDefault);
      cudaCheckErrors("cuda Host Alloc 5 fail");
    
    
      cudaMalloc((void **)&d_x, dsize*sizeof(real));
      cudaCheckErrors("cuda malloc1 fail");
      cudaMalloc((void **)&d_y, dsize*sizeof(real));
      cudaCheckErrors("cuda malloc2 fail");
      cudaMalloc((void **)&d_z, dsize*sizeof(real));
      cudaCheckErrors("cuda malloc3 fail");
      cudaMalloc((void **)&d_x1, wsize*sizeof(real));
      cudaCheckErrors("cuda malloc4 fail");
      cudaMalloc((void **)&d_y1, wsize*sizeof(real));
      cudaCheckErrors("cuda malloc5 fail");
      cudaMalloc((void **)&d_z1, wsize*sizeof(real));
      cudaCheckErrors("cuda malloc6 fail");
    
      for (int i=0; i< dsize; i++){
        h_x[i] = 0.0f;
        h_x1[i] = 0.0f;
        c_x[i] = 0.0f;
        h_y[i] = (real)(rand()/(real)RAND_MAX);
        h_z[i] = (real)(rand()/(real)RAND_MAX);
        }
    
    
      cudaEvent_t t_start, t_stop, k_start, k_stop;
      cudaEventCreate(&t_start);
      cudaEventCreate(&t_stop);
      cudaEventCreate(&k_start);
      cudaEventCreate(&k_stop);
      cudaCheckErrors("event fail");
    
      // first test original GPU version
    
      cudaEventRecord(t_start);
      cudaMemcpy(d_x, h_x, dsize * sizeof(real), cudaMemcpyHostToDevice);
      cudaCheckErrors("cuda memcpy 1 fail");
      cudaMemcpy(d_y, h_y, dsize * sizeof(real), cudaMemcpyHostToDevice);
      cudaCheckErrors("cuda memcpy 2 fail");
      cudaMemcpy(d_z, h_z, dsize * sizeof(real), cudaMemcpyHostToDevice);
      cudaCheckErrors("cuda memcpy 3 fail");
    
    
      cudaEventRecord(k_start);
      vectorStepAdd2(d_x, d_y, d_z, alpha, beta, wsize, xstep, ystep, zstep);
      cudaEventRecord(k_stop);
    
      cudaMemcpy(h_x, d_x, dsize * sizeof(real), cudaMemcpyDeviceToHost);
      cudaCheckErrors("cuda memcpy 4 fail");
      cudaEventRecord(t_stop);
      cudaEventSynchronize(t_stop);
      cudaEventElapsedTime(&et, t_start, t_stop);
      printf("GPU original version total elapsed time is: %f ms.\n", et);
      cudaEventElapsedTime(&et, k_start, k_stop);
      printf("GPU original kernel elapsed time is: %f ms.\n", et);
    
      //now test CPU version
    
      cudaEventRecord(t_start);
      vectorStepAdd3(c_x, h_y, h_z, alpha, beta, wsize, xstep, ystep, zstep);
      cudaEventRecord(t_stop);
      cudaEventSynchronize(t_stop);
      cudaEventElapsedTime(&et, t_start, t_stop);
      printf("CPU version total elapsed time is: %f ms.\n", et);
      for (int i = 0; i< dsize; i++)
        if (fabsf((float)(h_x[i]-c_x[i])) > TOL) {
          printf("cpu/gpu results mismatch at i = %d, cpu = %f, gpu = %f\n", i, c_x[i], h_x[i]);
          return 1;
          }
    
    
      // now test improved GPU version
    
      cudaEventRecord(t_start);
    //  cudaMemcpy2D(d_x1, sizeof(real),  h_x, xstep * sizeof(real), sizeof(real), wsize, cudaMemcpyHostToDevice);
    //  cudaCheckErrors("cuda memcpy 5 fail");
      cudaMemcpy2D(d_y1, sizeof(real),  h_y, ystep * sizeof(real), sizeof(real), wsize, cudaMemcpyHostToDevice);
      cudaCheckErrors("cuda memcpy 6 fail");
      cudaMemcpy2D(d_z1, sizeof(real),  h_z, zstep * sizeof(real), sizeof(real), wsize, cudaMemcpyHostToDevice);
      cudaCheckErrors("cuda memcpy 7 fail");
    
      cudaEventRecord(k_start);
      vectorStepAdd2i(d_x1, d_y1, d_z1, alpha, beta, wsize);
      cudaEventRecord(k_stop);
    
      cudaMemcpy2D(h_x1, xstep*sizeof(real), d_x1, sizeof(real), sizeof(real), wsize, cudaMemcpyDeviceToHost);
      cudaCheckErrors("cuda memcpy 8 fail");
      cudaEventRecord(t_stop);
      cudaEventSynchronize(t_stop);
      cudaEventElapsedTime(&et, t_start, t_stop);
      printf("GPU improved version total elapsed time is: %f ms.\n", et);
      cudaEventElapsedTime(&et, k_start, k_stop);
      printf("GPU improved kernel elapsed time is: %f ms.\n", et);
    
      for (int i = 0; i< dsize; i++)
        if (fabsf((float)(h_x[i]-h_x1[i])) > TOL) {
          printf("gpu/gpu improved results mismatch at i = %d, gpu = %f, gpu imp = %f\n", i, h_x[i], h_x1[i]);
          return 1;
          }
    
      printf("Results:i   CPU     GPU     GPUi \n");
      for (int i = 0; i< 20*xstep; i+=xstep)
        printf("    %d         %f      %f     %f    %f    %f\n",i, c_x[i], h_x[i], h_x1[i]);
    
    
      return 0;
    }
    
    我们可以看到,与原始内核相比,改进后的内核总体上减少了约3倍,几乎所有这一切都是由于数据复制时间的减少。数据复制时间的减少是因为使用改进的2D memcpy,我们只需要复制实际使用的数据。(在没有页面锁定内存的情况下,这些数据传输时间大约是原来的两倍)。我们还可以看到,内核计算时间比原始内核的CPU计算速度快约10倍,比改进内核的CPU计算速度快约100倍。然而,考虑到数据传输时间,我们无法克服CPU速度


    最后一点意见是“成本”cudaMemcpy2D操作的成本仍然很高。为了将向量大小减少100倍,我们只看到复制时间减少了3倍。因此,快速访问仍然是使用GPU的一种相对昂贵的方式。如果我们只是传输50000个连续元素的向量,我们预计向量大小将几乎线性减少100倍复制时间(与5000000个元素的原始复制向量相比)。这意味着复制时间将少于1ms,我们的GPU版本将比CPU快,至少这是一个简单的单线程CPU代码。

    跨步访问并不适合GPU的内存子系统,后者更喜欢连续访问。如果跨步很小(例如<10个元素)而且向量很长,通过纹理访问只读数组可能会有所帮助,值得一试。如果您正在为sm_35平台构建,只需对函数原型进行简单更改,就可以让您的代码通过LDG指令自动使用纹理路径:
    vectorStepAddKernel2(real*.\uuuu restrict\uuuux,const real*.\uuu restrict\uuuuy,const real*.\uuuu restrict\uuuuz,…)
    您正在使用的xstep、ystep和ystep的值是什么?@talonmes-我正在使用的xstep、ystep、zstep的值分别是4,5,7…但是,它们作为参数动态传递给函数(如您所见)可以是任何such@njuffa-很抱歉,我没有完全理解您的意思..我没有太多构建特定于体系结构的代码的经验,但我认为我的Visual Studio设置是为安腾平台上的x64体系结构而设置的。我认为指令是特定于处理器的,而不是基于芯片组的?如果需要,请更正我我错了x64是主机系统的64位x86体系结构。sm_35=计算能力3.5。这是使用nvcc为这样的平台编译时传递的信息:nvcc-arch=sm_35-o[obj file][src file]。LDG指令在sm_35体系结构的GPU上是新的,例如K20、K20x和Geforce Titan。谢谢!我在我的机器上观察到您改进的内核,内核运行时比CPU func调用时间慢。我知道这可能是由于多种原因…但是,正如您指出的,这似乎不是一个具有挑战性的问题通过GPU解决的问题(或者说现代CPU相当快:)
    GPU original version total elapsed time is: 13.352256 ms.
    GPU original kernel elapsed time is: 0.195808 ms.
    CPU version total elapsed time is: 2.599584 ms.
    GPU improved version total elapsed time is: 4.228288 ms.
    GPU improved kernel elapsed time is: 0.027392 ms.
    Results:i   CPU     GPU     GPUi
        0         0.617285      0.617285     0.617285
        47         0.554522      0.554522     0.554522
        94         0.104245      0.104245     0.104245
    ....