Optimization 无效核函数

Optimization 无效核函数,optimization,cuda,shared-memory,Optimization,Cuda,Shared Memory,有没有可能加速这个简单的内核函数?我曾考虑过使用共享内存,但N等于507904,所以它比共享内存数组多得多。 我的程序创建了256个线程的块 __global__ void compute(COMPLEX_TYPE *a, COMPLEX_TYPE *b, FLOAT_TYPE *F, FLOAT_TYPE f, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N)

有没有可能加速这个简单的内核函数?我曾考虑过使用共享内存,但N等于507904,所以它比共享内存数组多得多。
我的程序创建了256个线程的块

__global__ void compute(COMPLEX_TYPE *a, COMPLEX_TYPE *b,
              FLOAT_TYPE *F, FLOAT_TYPE f, int N) 
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < N) 
    {
        F[i] = ( a[i].x*a[i].x + a[i].y*a[i].y + b[i].x*b[i].x + b[i].y*b[i].y) / (f);
    }
}
\uuuu全局\uuuuu无效计算(复杂类型*a,复杂类型*b,
浮点型*F,浮点型F,整数N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i
最简单的一般优化如下:

__global__ void compute(const COMPLEX_TYPE * __restrict__ a, 
                        const COMPLEX_TYPE * __restrict__ b,
                        FLOAT_TYPE *F, FLOAT_TYPE f, int N) 
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    #pragma unroll 8
    for(; i < N; i += blockDim.x * gridDim.x;)
    {
        COMPLEX_TYPE aval = a[i], bval = b[i]
        FLOAT_TYPE Fval;
        Fval = ( aval.x*aval.x + aval.y*aval.y + bval.x*bval.x + bval.y*bval.y) / (f);
        F[i] = Fval;
    }
}
\uuuuu全局\uuuuuu无效计算(常数复杂\uu类型*\uuuuu限制\uuuuua,
常数复杂类型*\uuuuu限制\uuuub,
浮点型*F,浮点型F,整数N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
#布拉格展开8
对于(;i
[免责声明:在浏览器中编写,未经测试,使用风险自负]

这里的想法是只启动将在目标GPU上并发执行的尽可能多的线程,然后让每个线程执行多个操作,而不是一个操作。这有助于在块调度程序和设置代码级别摊销大量固定开销,并提高总体效率。在大多数体系结构上,这可能会受到内存带宽的限制,因此内存合并和事务优化是您能够进行的最重要的性能优化

编辑:由于此答案标记为CW,我选择在此处添加测试,而不是创建自己的答案。如果有人反对,请将编辑回滚到以前可接受的版本。我没有添加任何新想法,只是测试@Talonmes和@JanLucas提供的想法

在我的测试用例中,@talonmies提供的建议(unroll pragma除外)似乎可以提高约10%的性能。@JanLucas提出的用浮点乘法代替浮点除法的建议,如果可以接受的话,似乎可以使性能提高一倍。这显然会因GPU和其他细节而有所不同。这是我的测试:

$ cat t891.cu
#include <cuComplex.h>
#include <stdio.h>
#include <stdlib.h>

#define DSIZE 507904
#define nTPB 256
#define nBLK 256

#define cudaCheckErrors(msg) \
    do { \
        cudaError_t __err = cudaGetLastError(); \
        if (__err != cudaSuccess) { \
            fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                msg, cudaGetErrorString(__err), \
                __FILE__, __LINE__); \
            fprintf(stderr, "*** FAILED - ABORTING\n"); \
            exit(1); \
        } \
    } while (0)

#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL

long long dtime_usec(unsigned long long start){

  timeval tv;
  gettimeofday(&tv, 0);
  return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}

typedef cuFloatComplex COMPLEX_TYPE;
typedef float FLOAT_TYPE;

__global__ void compute(COMPLEX_TYPE *a, COMPLEX_TYPE *b,
              FLOAT_TYPE *F, FLOAT_TYPE f, int N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
    if (i < N)
    {
        F[i] = ( a[i].x*a[i].x + a[i].y*a[i].y + b[i].x*b[i].x + b[i].y*b[i].y) / (f);
    }
}

__global__ void compute_imp(const COMPLEX_TYPE * __restrict__ a,
                        const COMPLEX_TYPE * __restrict__ b,
                        FLOAT_TYPE *F, FLOAT_TYPE f, int N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
//    #pragma unroll 8
    for(; i < N; i += blockDim.x * gridDim.x)
    {
        COMPLEX_TYPE aval = a[i];
        COMPLEX_TYPE bval = b[i];
        FLOAT_TYPE Fval = ( aval.x*aval.x + aval.y*aval.y + bval.x*bval.x + bval.y*bval.y) / (f);
        F[i] = Fval;
    }
}

__global__ void compute_imp2(const COMPLEX_TYPE * __restrict__ a,
                        const COMPLEX_TYPE * __restrict__ b,
                        FLOAT_TYPE *F, FLOAT_TYPE f, int N)
{
    int i = blockIdx.x * blockDim.x + threadIdx.x;
//    #pragma unroll 8
    for(; i < N; i += blockDim.x * gridDim.x)
    {
        COMPLEX_TYPE aval = a[i];
        COMPLEX_TYPE bval = b[i];
        FLOAT_TYPE Fval = ( aval.x*aval.x + aval.y*aval.y + bval.x*bval.x + bval.y*bval.y) * (f);
        F[i] = Fval;
    }
}

int main(){

  COMPLEX_TYPE *d_A, *d_B;
  FLOAT_TYPE *d_F, f = 4.0f;

  cudaMalloc(&d_A, DSIZE*sizeof(COMPLEX_TYPE));
  cudaMalloc(&d_B, DSIZE*sizeof(COMPLEX_TYPE));
  cudaMalloc(&d_F, DSIZE*sizeof(FLOAT_TYPE));

  //warm-up
  compute<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(d_A, d_B, d_F, f, DSIZE);
  cudaDeviceSynchronize();
  unsigned long long t1 = dtime_usec(0);
  compute<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(d_A, d_B, d_F, f, DSIZE);
  cudaDeviceSynchronize();
  t1 = dtime_usec(t1);

  //warm-up
  compute_imp<<<DSIZE/(8*nTPB),nTPB>>>(d_A, d_B, d_F, f, DSIZE);
  cudaDeviceSynchronize();
  unsigned long long t2 = dtime_usec(0);
  compute_imp<<<nBLK,nTPB>>>(d_A, d_B, d_F, f, DSIZE);
  cudaDeviceSynchronize();
  t2 = dtime_usec(t2);

  //warm-up
  compute_imp2<<<(DSIZE+nTPB-1)/nTPB,nTPB>>>(d_A, d_B, d_F, 1/f, DSIZE);
  cudaDeviceSynchronize();
  unsigned long long t3 = dtime_usec(0);
  compute_imp2<<<nBLK,nTPB>>>(d_A, d_B, d_F, 1/f, DSIZE);
  cudaDeviceSynchronize();
  t3 = dtime_usec(t3);
  cudaCheckErrors("some error");
  printf("t1: %fs, t2: %fs, t3: %fs\n", t1/(float)USECPSEC, t2/(float)(USECPSEC), t3/(float)USECPSEC);
}
$ nvcc -O3 -o t891 t891.cu
$ ./t891
t1: 0.000226s, t2: 0.000209s, t3: 0.000110s
$
$cat t891.cu
#包括
#包括
#包括
#定义DSIZE 507904
#定义nTPB 256
#定义nBLK 256
#定义cudaCheckErrors(msg)\
做{\
cudaError\u t\u err=cudaGetLastError()\
如果(_err!=cudaSuccess){\
fprintf(标准,“致命错误:%s(%s位于%s:%d)\n”\
msg,cudaGetErrorString(_err)\
__文件(行)\
fprintf(stderr,“***失败-中止\n”)\
出口(1)\
} \
}而(0)
#包括
#包括
#定义USECPSEC 10000000ull
long long dtime\u usec(无符号长启动){
蒂梅瓦尔电视;
gettimeofday(&tv,0);
返回((tv.tv_sec*USECPSEC)+tv.tv_usec)-开始;
}
typedef为复杂型;
typedef float_类型;
__全局无效计算(复杂类型*a,复杂类型*b,
浮点型*F,浮点型F,整数N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i
注:

  • 展开pragma似乎没有帮助(对于我尝试过的几个测试用例,它使它运行得更慢)。在某些情况下,编译器会在没有特定提示的情况下展开循环,而循环展开通常是一种需要调优的优化,可能需要仔细调优
  • @Talonmes建议对内核进行修改,以创建一个跨网格循环,这是在创建一个特定的loop-u时需要考虑的因素之一