CUPTI以多线程代码阻止CUDA内核启动

CUPTI以多线程代码阻止CUDA内核启动,cuda,Cuda,我在特斯拉C2070上使用CUDA 4.1和CUPTI 代码有两个线程。第一个线程启动一个长内核并等待cudaDeviceSynchronize(),然后第二个线程启动一个小内核 我已经订阅了CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020和UPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020 这会导致第二个内核的启动被阻止,直到第一个线程完成cudaDeviceSynchronize()。在第一个线程完成cudaDev

我在特斯拉C2070上使用CUDA 4.1和CUPTI

代码有两个线程。第一个线程启动一个长内核并等待cudaDeviceSynchronize(),然后第二个线程启动一个小内核

我已经订阅了CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020和UPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020

这会导致第二个内核的启动被阻止,直到第一个线程完成cudaDeviceSynchronize()。在第一个线程完成cudaDeviceSynchronize()之前,第二个线程不会从cudaConfigureCall()返回

如果我不订阅CUPTI,这种情况就不会发生。这看起来像是CUPTI的一个令人讨厌的性能缺陷

下面的调用堆栈显示每个线程的状态。我已经附上了这篇文章的代码

(gdb) info threads
  4 Thread 0x7f731467c710 (LWP 29708)  0x00000037f4ada083 in select () from /lib64/libc.so.6
  3 Thread 0x7f7312b50710 (LWP 29709)  0x00007f7314d7e3a6 in ?? () from /usr/lib64/libcuda.so.1
  2 Thread 0x7f731214f710 (LWP 29710)  0x00000037f4ac88d7 in sched_yield () from /lib64/libc.so.6
* 1 Thread 0x7f731477e720 (LWP 29707)  0x00000037f520803d in pthread_join () from /lib64/libpthread.so.0
(gdb) thread 2
[Switching to thread 2 (Thread 0x7f731214f710 (LWP 29710))]#0  0x00000037f4ac88d7 in sched_yield () from /lib64/libc.so.6
(gdb) bt
#0  0x00000037f4ac88d7 in sched_yield () from /lib64/libc.so.6
#1  0x00007f73149fb73c in ?? () from /usr/local/cuda/extras/CUPTI/lib64/libcupti.so.4
#2  0x00007f7314dabac3 in ?? () from /usr/lib64/libcuda.so.1
#3  0x00007f7314db1020 in ?? () from /usr/lib64/libcuda.so.1
#4  0x00007f73147bbee8 in cudaConfigureCall () from /usr/local/cuda/lib64/libcudart.so.4
#5  0x000000000040110f in Thread2 () at event_sampling.cu:121
#6  0x00000037f52077e1 in start_thread () from /lib64/libpthread.so.0
#7  0x00000037f4ae152d in clone () from /lib64/libc.so.6
(gdb) thread 3
[Switching to thread 3 (Thread 0x7f7312b50710 (LWP 29709))]#0  0x00007f7314d7e3a6 in ?? () from /usr/lib64/libcuda.so.1
(gdb) bt
#0  0x00007f7314d7e3a6 in ?? () from /usr/lib64/libcuda.so.1
#1  0x00007f7314d36b5a in ?? () from /usr/lib64/libcuda.so.1
#2  0x00007f7314d08976 in ?? () from /usr/lib64/libcuda.so.1
#3  0x00007f7314d396a3 in ?? () from /usr/lib64/libcuda.so.1
#4  0x00007f7314d39a06 in ?? () from /usr/lib64/libcuda.so.1
#5  0x00007f7314d08a29 in ?? () from /usr/lib64/libcuda.so.1
#6  0x00007f7314cfb830 in ?? () from /usr/lib64/libcuda.so.1
#7  0x00007f7314cdafa4 in ?? () from /usr/lib64/libcuda.so.1
#8  0x00007f731478ea13 in ?? () from /usr/local/cuda/lib64/libcudart.so.4
#9  0x00007f73147c3827 in cudaDeviceSynchronize () from /usr/local/cuda/lib64/libcudart.so.4
#10 0x0000000000400fe2 in Thread1 (ip=0x0) at event_sampling.cu:101
#11 0x00000037f52077e1 in start_thread () from /lib64/libpthread.so.0
#12 0x00000037f4ae152d in clone () from /lib64/libc.so.6
(gdb) thread 4
[Switching to thread 4 (Thread 0x7f731467c710 (LWP 29708))]#0  0x00000037f4ada083 in select () from /lib64/libc.so.6
(gdb) bt
#0  0x00000037f4ada083 in select () from /lib64/libc.so.6
#1  0x00007f731524147b in ?? () from /usr/lib64/libcuda.so.1
#2  0x00007f7314d45d9b in ?? () from /usr/lib64/libcuda.so.1
#3  0x00007f7315242819 in ?? () from /usr/lib64/libcuda.so.1
#4  0x00000037f52077e1 in start_thread () from /lib64/libpthread.so.0
#5  0x00000037f4ae152d in clone () from /lib64/libc.so.6
(gdb) 
代码

/*
*版权所有2011英伟达公司。版权所有
*
*示例应用程序演示如何使用CUPTI库获取分析器
*通过采样获取事件值。
*/
#包括
#包括
#包括
#包括
#包括
#定义检查错误(err,cufunc)\
如果(错误!=CUDA_成功)\
{                                                                   \
printf(“CUDA驱动程序API函数'%s'的错误%d”。\n”\
呃,cufunc)\
出口(-1)\
}
#定义N 100000
静态语境;
静态压缩装置;
静态字符*事件名;
//设备代码
__全局无效向量添加(常量int*A、常量int*B、int*C、int size)
{
int i=blockDim.x*blockIdx.x+threadIdx.x;
用于(长m=0;m<100;m++)
用于(长n=0;n<100000;n++)
如果(i<尺寸)
C[i]=A[i]+B[i];
}
静态空隙
initVec(int*vec,int n)
{
对于(int i=0;icallbackSite==CUPTI\u API\u ENTER){
fprintf(stderr,“\n已创建事件”);
cudaConfigureCall_v3020_参数*参数=(cudaConfigureCall_v3020_参数*)cbInfo->functionParams;
cuptiGetStreamId(cbInfo->context,(CUstream)params->stream和streamId);
printf(“\n流%d”,流ID);
}
}
int
main(int argc,char*argv[])
{
治疗结果错误;
cudaStreamCreate(&stream[0]);
cudaStreamCreate(&stream[1]);
#如果1
CUpti_用户句柄用户;
cuptiSubscribe(&subscriber,(CUpti_CallbackFunc)回调,0);
cuptiEnableCallback(1,订户,CUPTI\u CB\u DOMAIN\u RUNTIME\u API,CUPTI\u RUNTIME\u TRACE\u CBID\u cudaLaunch\u v3020);
cuptiEnableCallback(1,订阅服务器,CUPTI_CB_DOMAIN_RUNTIME_API,CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020);
#恩迪夫
cudaDeviceSynchronize();
pthread_创建(&threads[0],0,Thread1,0);
pthread_create(&threads[1],0,Thread2,0);
pthread_join(线程[0],0);
pthread_join(线程[1],0);
fprintf(标准格式,“\n---------------超过----------”;
返回0;
}

这可能是由于在两个线程中都使用了
cudaDeviceSynchronize()
cudaDeviceSynchronize()
强制整个设备在执行任何后续命令之前完成之前发出的所有命令。这是一把重锤;少用它

在这种情况下,我建议使用
cudaStreamSynchronize()
。如果需要一个流等待另一个流,请使用
cudaEvent
cudaStreamWaitEvent()

只有在使用
CUPTI\u EVENT\u COLLECTION\u MODE\u KERNEL
收集事件时,CUPTI才会禁用并发内核。授权教授
        /*
 * Copyright 2011 NVIDIA Corporation. All rights reserved
 *
 * Sample app to demonstrate use of CUPTI library to obtain profiler
 * event values by sampling.
 */


#include <stdio.h> 
#include <cuda.h>
#include <cupti.h>
#include <unistd.h>
#include <pthread.h>

#define CHECK_CU_ERROR(err, cufunc)                                     \
  if (err != CUDA_SUCCESS)                                              \
    {                                                                   \
      printf ("Error %d for CUDA Driver API function '%s'.\n",          \
              err, cufunc);                                             \
      exit(-1);                                                         \
    }


#define N 100000


static CUcontext context;
static CUdevice device;
static char *eventName;


// Device code
__global__ void VecAdd(const int* A, const int* B, int* C, int size)
{
  int i = blockDim.x * blockIdx.x + threadIdx.x;
  for(long long m = 0 ; m < 100; m ++)
  for(long long n = 0 ; n < 100000 ; n ++)
  if (i < size)
    C[i] = A[i] + B[i];
}
static void
initVec(int *vec, int n)
{
  for (int i=0; i< n; i++)
    vec[i] = i;
}


// Device code
__global__ void VecSub(const int* A, const int* B, int* C, int size)
{
  int i = blockDim.x * blockIdx.x + threadIdx.x;
  for(long long n = 0 ; n < 100000 ; n ++)
  if (i < size)
    C[i] = A[i] - B[i];
}

int *d_A; int *d_B; int *d_C;


cudaStream_t stream[2];
pthread_t threads[2];

static void *
Thread1(void * ip)
{
fprintf(stderr, "\n Thread1 started");
  size_t size = N * sizeof(int);
  int threadsPerBlock = 0;
  int blocksPerGrid = 0;
  int sum, i;
  int *h_A, *h_B, *h_C;

  // Allocate input vectors h_A and h_B in host memory
  h_A = (int*)malloc(size);
  h_B = (int*)malloc(size);
  h_C = (int*)malloc(size);

  // Initialize input vectors
  initVec(h_A, N);
  initVec(h_B, N);
  memset(h_C, 0, size);

  // Allocate vectors in device memory
  cudaMalloc((void**)&d_A, size);
  cudaMalloc((void**)&d_B, size);
  cudaMalloc((void**)&d_C, size);


  // Copy vectors from host memory to device memory
  cudaMemcpyAsync(d_A, h_A, size, cudaMemcpyHostToDevice,stream[0]);
  cudaMemcpyAsync(d_B, h_B, size, cudaMemcpyHostToDevice,stream[0]);

  threadsPerBlock = 256;
  blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;

  fprintf(stderr,"\n Kernel Launch Thread1"); fflush(stderr);
  VecAdd<<<blocksPerGrid, threadsPerBlock,0 , stream[0]>>>(d_A, d_B, d_C, N);
  fprintf(stderr,"\n Kernel Launched Thread1");fflush(stderr);
  fprintf(stderr,"\n Start cudaDeviceSynchronize Thread1");fflush(stderr);
  cudaDeviceSynchronize();
  fprintf(stderr,"\n End cudaDeviceSynchronize Thread1");fflush(stderr);
  return 0;
}

static void * Thread2(void *)
{
    sleep(5);

fprintf(stderr,"\n Thread2 started");
  size_t size = N * sizeof(int);
  int threadsPerBlock = 0;
  int blocksPerGrid = 0;
  int sum, i;
  int *h_A, *h_B, *h_C;

  threadsPerBlock = 256;
  blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;

  fprintf(stderr,"\n Kernel Launch Thread2");fflush(stderr);
  VecSub<<<blocksPerGrid, threadsPerBlock,0 , stream[1]>>>(d_A, d_B, d_C, N);
  fprintf(stderr,"\n Kernel Launched Thread2");fflush(stderr);
  fprintf(stderr,"\n Start cudaDeviceSynchronize Thread2");fflush(stderr);
  cudaDeviceSynchronize();
  fprintf(stderr,"\n End cudaDeviceSynchronize Thread2");fflush(stderr);
  return 0;


}

void CUPTIAPI CallBack(void *userdata, CUpti_CallbackDomain domain, CUpti_CallbackId cbid, const void *cbData)
{
 uint32_t streamId = 0;
 const CUpti_CallbackData * cbInfo = (const CUpti_CallbackData *) cbData;
 if(cbid == CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020 && cbInfo->callbackSite == CUPTI_API_ENTER) {
  fprintf(stderr,"\n Event created");
    cudaConfigureCall_v3020_params * params = (cudaConfigureCall_v3020_params *) cbInfo->functionParams;
    cuptiGetStreamId(cbInfo->context, (CUstream) params->stream, &streamId);
    printf("\n stream %d", streamId);

 }

}

int
main(int argc, char *argv[])
{


  CUresult err;


  cudaStreamCreate(&stream[0]);
  cudaStreamCreate(&stream[1]);

#if 1
 CUpti_SubscriberHandle subscriber;
 cuptiSubscribe(&subscriber, (CUpti_CallbackFunc) CallBack, 0);
 cuptiEnableCallback(1,subscriber, CUPTI_CB_DOMAIN_RUNTIME_API, CUPTI_RUNTIME_TRACE_CBID_cudaLaunch_v3020);
 cuptiEnableCallback(1,subscriber, CUPTI_CB_DOMAIN_RUNTIME_API, CUPTI_RUNTIME_TRACE_CBID_cudaConfigureCall_v3020);
#endif



    cudaDeviceSynchronize();

    pthread_create(&threads[0],0,Thread1,0);
    pthread_create(&threads[1],0,Thread2,0);





    pthread_join(threads[0],0);
    pthread_join(threads[1],0);

  fprintf(stderr,"\n --------------over -----------");
  return 0;
}