Concurrency CUDA流与并发内核执行

Concurrency CUDA流与并发内核执行,concurrency,cuda,cuda-streams,Concurrency,Cuda,Cuda Streams,我想使用streams来并行执行在单独的设备数据阵列上工作的内核。数据在设备上分配,并从以前的内核中填充 我写了下面的程序,表明我到目前为止还不能达到我的目标。实际上,两个非默认流上的内核在各自的流中顺序执行 在2台最新Debian linux版本的Intel机器上也观察到了相同的行为。其中一款配备了带有CUDA 4.2的特斯拉C2075,另一款配备了带有CUDA 5.0的Geforce 460GT。Visual Profiler显示4.2和5.0Cuda版本中的顺序执行 代码如下: #incl

我想使用streams来并行执行在单独的设备数据阵列上工作的内核。数据在设备上分配,并从以前的内核中填充

我写了下面的程序,表明我到目前为止还不能达到我的目标。实际上,两个非默认流上的内核在各自的流中顺序执行

在2台最新Debian linux版本的Intel机器上也观察到了相同的行为。其中一款配备了带有CUDA 4.2的特斯拉C2075,另一款配备了带有CUDA 5.0的Geforce 460GT。Visual Profiler显示4.2和5.0Cuda版本中的顺序执行

代码如下:

#include <iostream>
#include <stdio.h>
#include <ctime>

#include <curand.h>

using namespace std;

// compile and run this way:
// nvcc cuStreamsBasics.cu  -arch=sm_20  -o testCuStream   -lcuda -lcufft -lcurand 
// testCuStream  1024 512 512


/* -------------------------------------------------------------------------- */
//  "useful" macros
/* -------------------------------------------------------------------------- */


#define MSG_ASSERT( CONDITION, MSG )                    \
  if (! (CONDITION))                            \
    {                                   \
    std::cerr << std::endl << "Dynamic assertion `" #CONDITION "` failed in " << __FILE__ \
          << " line " << __LINE__ << ": <" << MSG << ">" << std::endl;  \
    exit( 1 );                              \
    } \ 



#define ASSERT( CONDITION ) \
  MSG_ASSERT( CONDITION, " " )



// allocate data on the GPU memory, unpinned
#define CUDALLOC_GPU( _TAB, _DIM, _DATATYPE ) \
  MSG_ASSERT( \
  cudaMalloc( (void**) &_TAB, _DIM * sizeof( _DATATYPE) ) \ 
== cudaSuccess , "failed CUDALLOC" );



/* -------------------------------------------------------------------------- */
// the CUDA kernels
/* -------------------------------------------------------------------------- */


// finds index in 1D array from sequential blocks
#define CUDAINDEX_1D                \
  blockIdx.y * ( gridDim.x * blockDim.x ) + \
  blockIdx.x * blockDim.x +         \
  threadIdx.x;                  \



__global__ void 
kernel_diva(float* data, float value, int array_size)
{
  int i = CUDAINDEX_1D
    if (i < array_size) 
      data[i] /= value;
}


__global__ void 
kernel_jokea(float* data, float value, int array_size)
{
  int i = CUDAINDEX_1D
    if (i < array_size) 
      data[i] *= value + sin( double(i)) * 1/ cos( double(i) );
}


/* -------------------------------------------------------------------------- */
// usage
/* -------------------------------------------------------------------------- */


static void
usage(int argc, char **argv) 
{
  if ((argc -1) != 3)
    {

      printf("Usage: %s <dimx> <dimy> <dimz> \n", argv[0]);
      printf("do stuff\n");

      exit(1);
    }  
}


/* -------------------------------------------------------------------------- */
// main program, finally!
/* -------------------------------------------------------------------------- */


int 
main(int argc, char** argv)
{
  usage(argc, argv);
  size_t x_dim = atoi( argv[1] );
  size_t y_dim = atoi( argv[2] );
  size_t z_dim = atoi( argv[3] );



  cudaStream_t stream1, stream2;
  ASSERT( cudaStreamCreate( &stream1 ) == cudaSuccess ); 
  ASSERT( cudaStreamCreate( &stream2 ) == cudaSuccess ); 



  size_t size = x_dim * y_dim * z_dim;
  float *data1, *data2;
  CUDALLOC_GPU( data1, size, float);
  CUDALLOC_GPU( data2, size, float);


  curandGenerator_t gen;
  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
  /* Set seed */
  curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
  /* Generate n floats on device */
  curandGenerateUniform(gen, data1, size);
  curandGenerateUniform(gen, data2, size);


  dim3 dimBlock( z_dim, 1, 1);
  dim3 dimGrid( x_dim, y_dim, 1);

  clock_t start;
  double diff;


  cudaDeviceSynchronize();
  start = clock(); 
  kernel_diva <<< dimGrid, dimBlock>>>( data1, 5.55f, size);   
  kernel_jokea<<< dimGrid, dimBlock>>>( data1, 5.55f, size);   
  kernel_diva <<< dimGrid, dimBlock>>>( data2, 5.55f, size); 
  kernel_jokea<<< dimGrid, dimBlock>>>( data2, 5.55f, size); 
  cudaDeviceSynchronize();
  diff = ( std::clock() - start ) / (double)CLOCKS_PER_SEC;

  cout << endl << "sequential: " << diff;


  cudaDeviceSynchronize();
  start = clock(); 
  kernel_diva <<< dimGrid, dimBlock, 0, stream1 >>>( data1, 5.55f, size); 
  kernel_diva <<< dimGrid, dimBlock, 0, stream2 >>>( data2, 5.55f, size); 
  kernel_jokea<<< dimGrid, dimBlock, 0, stream1 >>>( data1, 5.55f, size); 
  kernel_jokea<<< dimGrid, dimBlock, 0, stream2 >>>( data2, 5.55f, size); 
  cudaDeviceSynchronize();
  diff = ( std::clock() - start ) / (double)CLOCKS_PER_SEC;

  cout << endl << "parallel: " << diff;



  cudaStreamDestroy( stream1 ); 
  cudaStreamDestroy( stream2 ); 


  return 0;
}
#包括
#包括
#包括
#包括
使用名称空间std;
//以这种方式编译和运行:
//nvcc cuStreamsBasics.cu-arch=sm_20-o testCuStream-lcuda-lcufft-lcurand
//testCuStream 1024 512
/* -------------------------------------------------------------------------- */
//“有用”宏
/* -------------------------------------------------------------------------- */
#定义消息断言(条件,消息)\
如果(!(条件))\
{                                   \
标准:cerr(数据2,5.55f,尺寸);
cudaDeviceSynchronize();
diff=(标准::时钟()-开始)/(双)每秒时钟;

cout我试图解释为什么您看不到两个内核的执行重叠。为此,我构建了下面报告的代码,它使用您的两个内核并监视每个块运行的流式多处理器(SM)。我使用的是CUDA 6.5(候选发行版)我使用的是GT540M卡,它只有
2
SMs,因此它提供了一个简单的工作场所。新的CUDA 6.5
CUDAOccupcyMappotentialBlockSize
设施被授予了
blockSize
选择权

代码

#include <stdio.h>
#include <time.h>

//#define DEBUG_MODE

/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
    if (code != cudaSuccess) 
    {
        fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
        if (abort) exit(code);
    }
}

/**************************************************/
/* STREAMING MULTIPROCESSOR IDENTIFICATION NUMBER */
/**************************************************/
__device__ unsigned int get_smid(void) {
    unsigned int ret;
    asm("mov.u32 %0, %smid;" : "=r"(ret) );
    return ret;
}

/************/
/* KERNEL 1 */
/************/
__global__ void kernel_1(float * __restrict__ data, const float value, int *sm, int N)
{
    int i = threadIdx.x + blockIdx.x * blockDim.x;

    if (i < N) {
        data[i] = data[i] / value;
        if (threadIdx.x==0) sm[blockIdx.x]=get_smid();
    }

}

//__global__ void kernel_1(float* data, float value, int N)
//{
//    int start = blockIdx.x * blockDim.x + threadIdx.x;
//    for (int i = start; i < N; i += blockDim.x * gridDim.x)
//    {
//        data[i] = data[i] / value; 
//    }
//}

/************/
/* KERNEL 2 */
/************/
__global__ void  kernel_2(float * __restrict__ data, const float value, int *sm, int N)
{
    int i = threadIdx.x + blockIdx.x*blockDim.x;

    if (i < N) {
        data[i] = data[i] * (value + sin(double(i)) * 1./cos(double(i)));
        if (threadIdx.x==0) sm[blockIdx.x]=get_smid();
    }
}

//__global__ void  kernel_2(float* data, float value, int N)
//{
//    int start = blockIdx.x * blockDim.x + threadIdx.x;
//    for (int i = start; i < N; i += blockDim.x * gridDim.x)
//    {
//        data[i] = data[i] * (value + sin(double(i)) * 1./cos(double(i)));
//    }
//}

/********/
/* MAIN */
/********/
int main()
{
    const int N = 10000;

    const float value = 5.55f;

    const int rep_num = 20;

    // --- CPU memory allocations
    float *h_data1 = (float*) malloc(N*sizeof(float));
    float *h_data2 = (float*) malloc(N*sizeof(float));
    float *h_data1_ref = (float*) malloc(N*sizeof(float));
    float *h_data2_ref = (float*) malloc(N*sizeof(float));

    // --- CPU data initializations
    srand(time(NULL));
    for (int i=0; i<N; i++) {
        h_data1[i] = rand() / RAND_MAX;
        h_data2[i] = rand() / RAND_MAX;
    }

    // --- GPU memory allocations
    float *d_data1, *d_data2;
    gpuErrchk(cudaMalloc((void**)&d_data1, N*sizeof(float)));
    gpuErrchk(cudaMalloc((void**)&d_data2, N*sizeof(float)));

    // --- CPU -> GPU memory transfers
    gpuErrchk(cudaMemcpy(d_data1, h_data1, N*sizeof(float), cudaMemcpyHostToDevice));
    gpuErrchk(cudaMemcpy(d_data2, h_data2, N*sizeof(float), cudaMemcpyHostToDevice));

    // --- CPU data initializations
    srand(time(NULL));
    for (int i=0; i<N; i++) {
        h_data1_ref[i] = h_data1[i] / value;
        h_data2_ref[i] = h_data2[i] * (value + sin(double(i)) * 1./cos(double(i)));
    }

    // --- Stream creations
    cudaStream_t stream1, stream2;
    gpuErrchk(cudaStreamCreate(&stream1)); 
    gpuErrchk(cudaStreamCreate(&stream2)); 

    // --- Launch parameters configuration
    int blockSize1, blockSize2, minGridSize1, minGridSize2, gridSize1, gridSize2;
    cudaOccupancyMaxPotentialBlockSize(&minGridSize1, &blockSize1, kernel_1, 0, N); 
    cudaOccupancyMaxPotentialBlockSize(&minGridSize2, &blockSize2, kernel_2, 0, N); 

    gridSize1 = (N + blockSize1 - 1) / blockSize1; 
    gridSize2 = (N + blockSize2 - 1) / blockSize2; 

    // --- Allocating space for SM IDs
    int *h_sm_11 = (int*) malloc(gridSize1*sizeof(int)); 
    int *h_sm_12 = (int*) malloc(gridSize1*sizeof(int));
    int *h_sm_21 = (int*) malloc(gridSize2*sizeof(int));
    int *h_sm_22 = (int*) malloc(gridSize2*sizeof(int));
    int *d_sm_11, *d_sm_12, *d_sm_21, *d_sm_22;
    gpuErrchk(cudaMalloc((void**)&d_sm_11, gridSize1*sizeof(int)));
    gpuErrchk(cudaMalloc((void**)&d_sm_12, gridSize1*sizeof(int)));
    gpuErrchk(cudaMalloc((void**)&d_sm_21, gridSize2*sizeof(int)));
    gpuErrchk(cudaMalloc((void**)&d_sm_22, gridSize2*sizeof(int)));

    // --- Timing individual kernels
    float time;
    cudaEvent_t start, stop;
    cudaEventCreate(&start);
    cudaEventCreate(&stop);
    cudaEventRecord(start, 0);

    for (int i=0; i<rep_num; i++) kernel_1<<<gridSize1, blockSize1>>>(d_data1, value, d_sm_11, N);   

    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    printf("Kernel 1 - elapsed time:  %3.3f ms \n", time/rep_num);

    cudaEventRecord(start, 0);

    for (int i=0; i<rep_num; i++) kernel_2<<<gridSize2, blockSize2>>>(d_data1, value, d_sm_21, N);   

    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    printf("Kernel 2 - elapsed time:  %3.3f ms \n", time/rep_num);

    // --- No stream case
    cudaEventRecord(start, 0);

    kernel_1<<<gridSize1, blockSize1>>>(d_data1, value, d_sm_11, N);   
#ifdef DEBUG_MODE
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize()); 
    gpuErrchk(cudaMemcpy(h_data1, d_data1, N*sizeof(float), cudaMemcpyDeviceToHost));
    // --- Results check
    for (int i=0; i<N; i++) {
        if (h_data1[i] != h_data1_ref[i]) {
            printf("Kernel1 - Error at i = %i; Host = %f; Device = %f\n", i, h_data1_ref[i], h_data1[i]);
            return;
        }
    }
#endif
    kernel_2<<<gridSize2, blockSize2>>>(d_data1, value, d_sm_21, N);   
#ifdef DEBUG_MODE
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize()); 
#endif
    kernel_1<<<gridSize1, blockSize1>>>(d_data2, value, d_sm_12, N); 
#ifdef DEBUG_MODE
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize()); 
    gpuErrchk(cudaMemcpy(d_data2, h_data2, N*sizeof(float), cudaMemcpyHostToDevice));
#endif
    kernel_2<<<gridSize2, blockSize2>>>(d_data2, value, d_sm_22, N); 
#ifdef DEBUG_MODE
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize()); 
    gpuErrchk(cudaMemcpy(h_data2, d_data2, N*sizeof(float), cudaMemcpyDeviceToHost));
    for (int i=0; i<N; i++) {
        if (h_data2[i] != h_data2_ref[i]) {
            printf("Kernel2 - Error at i = %i; Host = %f; Device = %f\n", i, h_data2_ref[i], h_data2[i]);
            return;
        }
    }
#endif

    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    printf("No stream - elapsed time:  %3.3f ms \n", time);

    // --- Stream case
    cudaEventRecord(start, 0);

    kernel_1<<<gridSize1, blockSize1, 0, stream1 >>>(d_data1, value, d_sm_11, N); 
#ifdef DEBUG_MODE
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize()); 
#endif
    kernel_1<<<gridSize1, blockSize1, 0, stream2 >>>(d_data2, value, d_sm_12, N); 
#ifdef DEBUG_MODE
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize()); 
#endif
    kernel_2<<<gridSize2, blockSize2, 0, stream1 >>>(d_data1, value, d_sm_21, N); 
#ifdef DEBUG_MODE
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize()); 
#endif
    kernel_2<<<gridSize2, blockSize2, 0, stream2 >>>(d_data2, value, d_sm_22, N); 
#ifdef DEBUG_MODE
    gpuErrchk(cudaPeekAtLastError());
    gpuErrchk(cudaDeviceSynchronize()); 
#endif

    cudaEventRecord(stop, 0);
    cudaEventSynchronize(stop);
    cudaEventElapsedTime(&time, start, stop);
    printf("Stream - elapsed time:  %3.3f ms \n", time);

    cudaStreamDestroy(stream1); 
    cudaStreamDestroy(stream2); 

    printf("Test passed!\n");

    gpuErrchk(cudaMemcpy(h_sm_11, d_sm_11, gridSize1*sizeof(int), cudaMemcpyDeviceToHost));
    gpuErrchk(cudaMemcpy(h_sm_12, d_sm_12, gridSize1*sizeof(int), cudaMemcpyDeviceToHost));
    gpuErrchk(cudaMemcpy(h_sm_21, d_sm_21, gridSize2*sizeof(int), cudaMemcpyDeviceToHost));
    gpuErrchk(cudaMemcpy(h_sm_22, d_sm_22, gridSize2*sizeof(int), cudaMemcpyDeviceToHost));

    printf("Kernel 1: gridSize = %i; blockSize = %i\n", gridSize1, blockSize1);
    printf("Kernel 2: gridSize = %i; blockSize = %i\n", gridSize2, blockSize2);
    for (int i=0; i<gridSize1; i++) {
        printf("Kernel 1 - Data 1: blockNumber = %i; SMID = %d\n", i, h_sm_11[i]);
        printf("Kernel 1 - Data 2: blockNumber = %i; SMID = %d\n", i, h_sm_12[i]);
    }
    for (int i=0; i<gridSize2; i++) {
        printf("Kernel 2 - Data 1: blockNumber = %i; SMID = %d\n", i, h_sm_21[i]);
        printf("Kernel 2 - Data 2: blockNumber = %i; SMID = %d\n", i, h_sm_22[i]);
    }
    cudaDeviceReset();

    return 0;
}
因此,内核1在计算上比内核2更昂贵

N=100的
结果

N = 100
kernel_1    0.003ms
kernel_2    0.005ms    

N = 10000
kernel_1    0.011ms
kernel_2    0.053ms    
Kernel 1: gridSize = 1; blockSize = 100
Kernel 2: gridSize = 1; blockSize = 100
Kernel 1 - Data 1: blockNumber = 0; SMID = 0
Kernel 1 - Data 2: blockNumber = 0; SMID = 1
Kernel 2 - Data 1: blockNumber = 0; SMID = 0
Kernel 2 - Data 2: blockNumber = 0; SMID = 1
Kernel 1: gridSize = 14; blockSize = 768
Kernel 2: gridSize = 10; blockSize = 1024
Kernel 1 - Data 1: blockNumber = 0; SMID = 0
Kernel 1 - Data 2: blockNumber = 0; SMID = 1
Kernel 1 - Data 1: blockNumber = 1; SMID = 1
Kernel 1 - Data 2: blockNumber = 1; SMID = 0
Kernel 1 - Data 1: blockNumber = 2; SMID = 0
Kernel 1 - Data 2: blockNumber = 2; SMID = 1
Kernel 1 - Data 1: blockNumber = 3; SMID = 1
Kernel 1 - Data 2: blockNumber = 3; SMID = 0
Kernel 1 - Data 1: blockNumber = 4; SMID = 0
Kernel 1 - Data 2: blockNumber = 4; SMID = 1
Kernel 1 - Data 1: blockNumber = 5; SMID = 1
Kernel 1 - Data 2: blockNumber = 5; SMID = 0
Kernel 1 - Data 1: blockNumber = 6; SMID = 0
Kernel 1 - Data 2: blockNumber = 6; SMID = 0
Kernel 1 - Data 1: blockNumber = 7; SMID = 1
Kernel 1 - Data 2: blockNumber = 7; SMID = 1
Kernel 1 - Data 1: blockNumber = 8; SMID = 0
Kernel 1 - Data 2: blockNumber = 8; SMID = 1
Kernel 1 - Data 1: blockNumber = 9; SMID = 1
Kernel 1 - Data 2: blockNumber = 9; SMID = 0
Kernel 1 - Data 1: blockNumber = 10; SMID = 0
Kernel 1 - Data 2: blockNumber = 10; SMID = 0
Kernel 1 - Data 1: blockNumber = 11; SMID = 1
Kernel 1 - Data 2: blockNumber = 11; SMID = 1
Kernel 1 - Data 1: blockNumber = 12; SMID = 0
Kernel 1 - Data 2: blockNumber = 12; SMID = 1
Kernel 1 - Data 1: blockNumber = 13; SMID = 1
Kernel 1 - Data 2: blockNumber = 13; SMID = 0
Kernel 2 - Data 1: blockNumber = 0; SMID = 0
Kernel 2 - Data 2: blockNumber = 0; SMID = 0
Kernel 2 - Data 1: blockNumber = 1; SMID = 1
Kernel 2 - Data 2: blockNumber = 1; SMID = 1
Kernel 2 - Data 1: blockNumber = 2; SMID = 1
Kernel 2 - Data 2: blockNumber = 2; SMID = 0
Kernel 2 - Data 1: blockNumber = 3; SMID = 0
Kernel 2 - Data 2: blockNumber = 3; SMID = 1
Kernel 2 - Data 1: blockNumber = 4; SMID = 1
Kernel 2 - Data 2: blockNumber = 4; SMID = 0
Kernel 2 - Data 1: blockNumber = 5; SMID = 0
Kernel 2 - Data 2: blockNumber = 5; SMID = 1
Kernel 2 - Data 1: blockNumber = 6; SMID = 1
Kernel 2 - Data 2: blockNumber = 6; SMID = 0
Kernel 2 - Data 1: blockNumber = 7; SMID = 0
Kernel 2 - Data 2: blockNumber = 7; SMID = 1
Kernel 2 - Data 1: blockNumber = 8; SMID = 1
Kernel 2 - Data 2: blockNumber = 8; SMID = 0
Kernel 2 - Data 1: blockNumber = 9; SMID = 0
Kernel 2 - Data 2: blockNumber = 9; SMID = 1
在这种情况下,每个内核只启动一个块,这是时间线

正如您所看到的,重叠发生了。通过查看上述结果,调度程序将两个调用的单个块与两个可用SMs并行地发送到内核1,然后对内核2执行相同的操作。这似乎是重叠发生的主要原因

N=10000的
结果

N = 100
kernel_1    0.003ms
kernel_2    0.005ms    

N = 10000
kernel_1    0.011ms
kernel_2    0.053ms    
Kernel 1: gridSize = 1; blockSize = 100
Kernel 2: gridSize = 1; blockSize = 100
Kernel 1 - Data 1: blockNumber = 0; SMID = 0
Kernel 1 - Data 2: blockNumber = 0; SMID = 1
Kernel 2 - Data 1: blockNumber = 0; SMID = 0
Kernel 2 - Data 2: blockNumber = 0; SMID = 1
Kernel 1: gridSize = 14; blockSize = 768
Kernel 2: gridSize = 10; blockSize = 1024
Kernel 1 - Data 1: blockNumber = 0; SMID = 0
Kernel 1 - Data 2: blockNumber = 0; SMID = 1
Kernel 1 - Data 1: blockNumber = 1; SMID = 1
Kernel 1 - Data 2: blockNumber = 1; SMID = 0
Kernel 1 - Data 1: blockNumber = 2; SMID = 0
Kernel 1 - Data 2: blockNumber = 2; SMID = 1
Kernel 1 - Data 1: blockNumber = 3; SMID = 1
Kernel 1 - Data 2: blockNumber = 3; SMID = 0
Kernel 1 - Data 1: blockNumber = 4; SMID = 0
Kernel 1 - Data 2: blockNumber = 4; SMID = 1
Kernel 1 - Data 1: blockNumber = 5; SMID = 1
Kernel 1 - Data 2: blockNumber = 5; SMID = 0
Kernel 1 - Data 1: blockNumber = 6; SMID = 0
Kernel 1 - Data 2: blockNumber = 6; SMID = 0
Kernel 1 - Data 1: blockNumber = 7; SMID = 1
Kernel 1 - Data 2: blockNumber = 7; SMID = 1
Kernel 1 - Data 1: blockNumber = 8; SMID = 0
Kernel 1 - Data 2: blockNumber = 8; SMID = 1
Kernel 1 - Data 1: blockNumber = 9; SMID = 1
Kernel 1 - Data 2: blockNumber = 9; SMID = 0
Kernel 1 - Data 1: blockNumber = 10; SMID = 0
Kernel 1 - Data 2: blockNumber = 10; SMID = 0
Kernel 1 - Data 1: blockNumber = 11; SMID = 1
Kernel 1 - Data 2: blockNumber = 11; SMID = 1
Kernel 1 - Data 1: blockNumber = 12; SMID = 0
Kernel 1 - Data 2: blockNumber = 12; SMID = 1
Kernel 1 - Data 1: blockNumber = 13; SMID = 1
Kernel 1 - Data 2: blockNumber = 13; SMID = 0
Kernel 2 - Data 1: blockNumber = 0; SMID = 0
Kernel 2 - Data 2: blockNumber = 0; SMID = 0
Kernel 2 - Data 1: blockNumber = 1; SMID = 1
Kernel 2 - Data 2: blockNumber = 1; SMID = 1
Kernel 2 - Data 1: blockNumber = 2; SMID = 1
Kernel 2 - Data 2: blockNumber = 2; SMID = 0
Kernel 2 - Data 1: blockNumber = 3; SMID = 0
Kernel 2 - Data 2: blockNumber = 3; SMID = 1
Kernel 2 - Data 1: blockNumber = 4; SMID = 1
Kernel 2 - Data 2: blockNumber = 4; SMID = 0
Kernel 2 - Data 1: blockNumber = 5; SMID = 0
Kernel 2 - Data 2: blockNumber = 5; SMID = 1
Kernel 2 - Data 1: blockNumber = 6; SMID = 1
Kernel 2 - Data 2: blockNumber = 6; SMID = 0
Kernel 2 - Data 1: blockNumber = 7; SMID = 0
Kernel 2 - Data 2: blockNumber = 7; SMID = 1
Kernel 2 - Data 1: blockNumber = 8; SMID = 1
Kernel 2 - Data 2: blockNumber = 8; SMID = 0
Kernel 2 - Data 1: blockNumber = 9; SMID = 0
Kernel 2 - Data 2: blockNumber = 9; SMID = 1
这是时间表:

在这种情况下,不会发生重叠。根据上述结果,这并不意味着这两个SMs不会同时被利用,但是(我认为)由于要启动的块的数量较多,分配两个不同内核的块或同一内核的两个块在性能方面没有太大差异,因此调度器选择第二个选项


我已经测试过,考虑到每个线程完成更多的工作,行为保持不变。

当前代码示例启动了>2^19个扭曲。kernel\u diva和kernel\u jokea执行很少的处理。计算能力<3.5的设备将在从第二个内核分派工作之前从第一个内核分派所有工作。由于处理时间较短您可能看不到任何重叠。如果您将gridDim减少到(1,1,1),并将每个线程的工时增加1000倍(只需执行for循环)您看到这两个内核之间的重叠了吗?如果让每个线程处理多个数据元素以减少启动和索引计算开销,您的内核性能可能会大大提高。谢谢您的评论。到目前为止,我假设一个线程应该只处理一个数组插槽。我最近发现一些注释打破了这一假设n、 包括。我会仔细看一看,当我有重要的结果时再回来。再次感谢!wordpress文章包含许多不准确的内容。如果你想对GPU有一个较低层次的了解,我建议你观看GTC 2013演讲《性能优化:编程指南和GPU架构师》他们背后隐藏着许多细节