Cuda 如何在多个内核启动之间同步全局内存?

Cuda 如何在多个内核启动之间同步全局内存?,cuda,Cuda,我想在FOR循环(伪)中多次启动以下内核: \uuuu全局\uuuuu无效内核(t\u dev是全局mem中的输入数组){ __共享预处理tt[块尺寸]; if(thid

我想在FOR循环(伪)中多次启动以下内核:

\uuuu全局\uuuuu无效内核(t\u dev是全局mem中的输入数组){
__共享预处理tt[块尺寸];
if(thid
从概念上讲,我所做的是从t_dev读取值。修改它们,并再次向global mem写信!然后我再次启动同一个内核

为什么我显然需要u threadfence或u syncthread 否则结果会出错,因为当同一内核再次启动时,内存写入没有完成。这里就是这样,我的GTX580启用了设备重叠

但是为什么在下一个内核启动时全局mem写入没有完成。。。这是因为设备重叠还是因为它总是这样?我想,当我们一个接一个地启动内核时,mem的写入/读取在一个内核之后完成…:-)

谢谢你的回答

一些代码:

 for(int kernelAIdx = 0; kernelAIdx < loops; kernelAIdx++){

      proxGPU::sorProxContactOrdered_1threads_StepA_kernelWrap<PREC,SorProxSettings1>(
              mu_dev,x_new_dev,T_dev,x_old_dev,d_dev,
              t_dev,
              kernelAIdx,
              pConvergedFlag_dev,
              m_absTOL,m_relTOL);


      proxGPU::sorProx_StepB_kernelWrap<PREC,SorProxSettings1>(
              t_dev,
              T_dev,
              x_new_dev,
              kernelAIdx
              );

        }
for(int-kernelAIdx=0;kernelAIdx
这是循环中的两个内核,t_dev和x_new_dev,从步骤A移动到步骤B

内核A如下所示:

 template<typename PREC, int THREADS_PER_BLOCK, int BLOCK_DIM, int PROX_PACKAGES, typename TConvexSet>
 __global__ void sorProxContactOrdered_1threads_StepA_kernel( 
  utilCuda::Matrix<PREC> mu_dev,
  utilCuda::Matrix<PREC> y_dev,
  utilCuda::Matrix<PREC> T_dev,  
  utilCuda::Matrix<PREC> x_old_dev,
  utilCuda::Matrix<PREC> d_dev, 
  utilCuda::Matrix<PREC> t_dev, 
  int kernelAIdx,
  int maxNContacts, 
  bool * convergedFlag_dev, 
  PREC _absTOL, PREC _relTOL){

 //__threadfence() HERE OR AT THE END; THEN IT WORKS???? WHY

 // Assumend 1 Block, with THREADS_PER_BLOCK Threads and Column Major Matrix T_dev 

     int thid = threadIdx.x;
     int m = min(maxNContacts*PROX_PACKAGE_SIZE, BLOCK_DIM); // this is the actual size of the diagonal block!
     int i = kernelAIdx * BLOCK_DIM;
     int ii = i + thid;

     //First copy x_old_dev in shared
     __shared__ PREC xx[BLOCK_DIM]; // each thread writes one element, if its in the limit!!
     __shared__ PREC tt[BLOCK_DIM];

     if(thid < m){
        xx[thid] = x_old_dev.data[ii];
        tt[thid] = t_dev.data[ii];
     }
     __syncthreads();


     PREC absTOL = _absTOL;
     PREC relTOL = _relTOL;

     int jj;
     //PREC T_iijj;
     //Offset the T_dev_ptr to the start of the Block
     PREC * T_dev_ptr  = PtrElem_ColM(T_dev,i,i);
     PREC * mu_dev_ptr = &mu_dev.data[PROX_PACKAGES*kernelAIdx];
     __syncthreads();
     for(int j_t = 0; j_t < m ; j_t+=PROX_PACKAGE_SIZE){

        //Select the number of threads we need!

        // Here we process one [m x PROX_PACKAGE_SIZE] Block

        // First  Normal Direction ==========================================================
        jj =  i  +  j_t;
        __syncthreads();

        if( ii == jj ){ // select thread on the diagonal ...

           PREC x_new_n = (d_dev.data[ii] + tt[thid]);

           //Prox Normal! 
           if(x_new_n <= 0.0){
              x_new_n = 0.0;
           }
          /* if( !checkConverged(x_new,xx[thid],absTOL,relTOL)){
              *convergedFlag_dev = 0;
           }*/

           xx[thid] = x_new_n;
           tt[thid] = 0.0;
        }
        // all threads not on the diagonal fall into this sync!
        __syncthreads();


        // Select only m threads!
        if(thid < m){
           tt[thid] += T_dev_ptr[thid] * xx[j_t];
        }
        // ====================================================================================


        // wee need to syncronize here because one threads finished lambda_t2 with shared mem tt, which is updated from another thread!
        __syncthreads();



         // Second  Tangential Direction ==========================================================
        jj++;
        __syncthreads();
        if( ii == jj ){ // select thread on diagonal, one thread finishs T1 and T2 directions.

           // Prox tangential
           PREC lambda_T1 =  (d_dev.data[ii] + tt[thid]);
           PREC lambda_T2 =  (d_dev.data[ii+1] + tt[thid+1]);
           PREC radius = (*mu_dev_ptr) * xx[thid-1];
           PREC absvalue = sqrt(lambda_T1*lambda_T1 + lambda_T2*lambda_T2);

           if(absvalue > radius){
              lambda_T1   =  (lambda_T1  * radius ) / absvalue;
              lambda_T2   =  (lambda_T2  * radius ) / absvalue;
           }


           /*if( !checkConverged(lambda_T1,xx[thid],absTOL,relTOL)){
              *convergedFlag_dev = 0;
           }

           if( !checkConverged(lambda_T2,xx[thid+1],absTOL,relTOL)){
              *convergedFlag_dev = 0;
           }*/

           //Write the two values back!
           xx[thid] = lambda_T1;
           tt[thid] = 0.0;
           xx[thid+1] = lambda_T2;
           tt[thid+1] = 0.0;
        }

        // all threads not on the diagonal fall into this sync!
        __syncthreads();


        T_dev_ptr = PtrColOffset_ColM(T_dev_ptr,1,T_dev.outerStrideBytes);
        __syncthreads();
        if(thid < m){
           tt[thid] += T_dev_ptr[thid] * xx[j_t+1];
        }
        __syncthreads();
        T_dev_ptr = PtrColOffset_ColM(T_dev_ptr,1,T_dev.outerStrideBytes);
        __syncthreads();
        if(thid < m){
           tt[thid] += T_dev_ptr[thid] * xx[j_t+2];
        }
        // ====================================================================================


        __syncthreads();
        // move T_dev_ptr 1 column
        T_dev_ptr = PtrColOffset_ColM(T_dev_ptr,1,T_dev.outerStrideBytes);
        // move mu_ptr to nex contact
        __syncthreads();
        mu_dev_ptr = &mu_dev_ptr[1];
        __syncthreads();

     }
     __syncthreads();
     // Write back the results, dont need to syncronize because 
     // do it anyway to be safe for testing first!

     if(thid < m){
        y_dev.data[ii] = xx[thid]; THIS IS UPDATED IN KERNEL B
        t_dev.data[ii] = tt[thid]; THIS IS UPDATED IN KERNEL B
     }


     //__threadfence(); /// THIS STUPID THREADFENCE MAKES IT WORKING!
模板
__全局线程\uuuuuuuuuuuuuuuu1有序线程\uuStepa\u内核(
utilCuda::Matrix mu_dev,
utilCuda::Matrix y_dev,
utilCuda::Matrix T_dev,
utilCuda::矩阵x_old_dev,
utilCuda::Matrix d_dev,
utilCuda::Matrix t_dev,
int-kernelAIdx,
int MAXN联系人,
bool*聚合滞后(dev),
预抽,预抽){
//__threadfence()在这里或末端;然后它就可以工作了???为什么
//假设1个块,每个块的线程数和列主矩阵T\u dev
int thid=threadIdx.x;
int m=min(maxNContacts*PROX\u PACKAGE\u SIZE,BLOCK\u DIM);//这是对角块的实际大小!
int i=内核aidx*块尺寸;
int ii=i+thid;
//共享中的第一份x_old_dev
__shared_uuu PREC xx[BLOCK_DIM];//如果每个线程在限制内,则每个线程写入一个元素!!
__共享预处理tt[块尺寸];
if(thid
我将最后的解决方案与CPU进行比较,在这里,我将同步线程放在所有我能放的地方,只是为了安全,首先!(这段代码做了gauss seidel的东西) 但是,如果没有结尾或开头的线栅栏,它根本就不起作用,因为它没有意义

很抱歉有这么多的代码,但也许你可以猜到问题出在哪里,因为我已经有点力不从心了,请解释为什么会发生这种情况? 我们多次检查算法,没有内存错误(Nsight报告)或错误
其他东西,一切正常…内核A只启动一个块!

如果启动连续的i
 template<typename PREC, int THREADS_PER_BLOCK, int BLOCK_DIM, int PROX_PACKAGES, typename TConvexSet>
 __global__ void sorProxContactOrdered_1threads_StepA_kernel( 
  utilCuda::Matrix<PREC> mu_dev,
  utilCuda::Matrix<PREC> y_dev,
  utilCuda::Matrix<PREC> T_dev,  
  utilCuda::Matrix<PREC> x_old_dev,
  utilCuda::Matrix<PREC> d_dev, 
  utilCuda::Matrix<PREC> t_dev, 
  int kernelAIdx,
  int maxNContacts, 
  bool * convergedFlag_dev, 
  PREC _absTOL, PREC _relTOL){

 //__threadfence() HERE OR AT THE END; THEN IT WORKS???? WHY

 // Assumend 1 Block, with THREADS_PER_BLOCK Threads and Column Major Matrix T_dev 

     int thid = threadIdx.x;
     int m = min(maxNContacts*PROX_PACKAGE_SIZE, BLOCK_DIM); // this is the actual size of the diagonal block!
     int i = kernelAIdx * BLOCK_DIM;
     int ii = i + thid;

     //First copy x_old_dev in shared
     __shared__ PREC xx[BLOCK_DIM]; // each thread writes one element, if its in the limit!!
     __shared__ PREC tt[BLOCK_DIM];

     if(thid < m){
        xx[thid] = x_old_dev.data[ii];
        tt[thid] = t_dev.data[ii];
     }
     __syncthreads();


     PREC absTOL = _absTOL;
     PREC relTOL = _relTOL;

     int jj;
     //PREC T_iijj;
     //Offset the T_dev_ptr to the start of the Block
     PREC * T_dev_ptr  = PtrElem_ColM(T_dev,i,i);
     PREC * mu_dev_ptr = &mu_dev.data[PROX_PACKAGES*kernelAIdx];
     __syncthreads();
     for(int j_t = 0; j_t < m ; j_t+=PROX_PACKAGE_SIZE){

        //Select the number of threads we need!

        // Here we process one [m x PROX_PACKAGE_SIZE] Block

        // First  Normal Direction ==========================================================
        jj =  i  +  j_t;
        __syncthreads();

        if( ii == jj ){ // select thread on the diagonal ...

           PREC x_new_n = (d_dev.data[ii] + tt[thid]);

           //Prox Normal! 
           if(x_new_n <= 0.0){
              x_new_n = 0.0;
           }
          /* if( !checkConverged(x_new,xx[thid],absTOL,relTOL)){
              *convergedFlag_dev = 0;
           }*/

           xx[thid] = x_new_n;
           tt[thid] = 0.0;
        }
        // all threads not on the diagonal fall into this sync!
        __syncthreads();


        // Select only m threads!
        if(thid < m){
           tt[thid] += T_dev_ptr[thid] * xx[j_t];
        }
        // ====================================================================================


        // wee need to syncronize here because one threads finished lambda_t2 with shared mem tt, which is updated from another thread!
        __syncthreads();



         // Second  Tangential Direction ==========================================================
        jj++;
        __syncthreads();
        if( ii == jj ){ // select thread on diagonal, one thread finishs T1 and T2 directions.

           // Prox tangential
           PREC lambda_T1 =  (d_dev.data[ii] + tt[thid]);
           PREC lambda_T2 =  (d_dev.data[ii+1] + tt[thid+1]);
           PREC radius = (*mu_dev_ptr) * xx[thid-1];
           PREC absvalue = sqrt(lambda_T1*lambda_T1 + lambda_T2*lambda_T2);

           if(absvalue > radius){
              lambda_T1   =  (lambda_T1  * radius ) / absvalue;
              lambda_T2   =  (lambda_T2  * radius ) / absvalue;
           }


           /*if( !checkConverged(lambda_T1,xx[thid],absTOL,relTOL)){
              *convergedFlag_dev = 0;
           }

           if( !checkConverged(lambda_T2,xx[thid+1],absTOL,relTOL)){
              *convergedFlag_dev = 0;
           }*/

           //Write the two values back!
           xx[thid] = lambda_T1;
           tt[thid] = 0.0;
           xx[thid+1] = lambda_T2;
           tt[thid+1] = 0.0;
        }

        // all threads not on the diagonal fall into this sync!
        __syncthreads();


        T_dev_ptr = PtrColOffset_ColM(T_dev_ptr,1,T_dev.outerStrideBytes);
        __syncthreads();
        if(thid < m){
           tt[thid] += T_dev_ptr[thid] * xx[j_t+1];
        }
        __syncthreads();
        T_dev_ptr = PtrColOffset_ColM(T_dev_ptr,1,T_dev.outerStrideBytes);
        __syncthreads();
        if(thid < m){
           tt[thid] += T_dev_ptr[thid] * xx[j_t+2];
        }
        // ====================================================================================


        __syncthreads();
        // move T_dev_ptr 1 column
        T_dev_ptr = PtrColOffset_ColM(T_dev_ptr,1,T_dev.outerStrideBytes);
        // move mu_ptr to nex contact
        __syncthreads();
        mu_dev_ptr = &mu_dev_ptr[1];
        __syncthreads();

     }
     __syncthreads();
     // Write back the results, dont need to syncronize because 
     // do it anyway to be safe for testing first!

     if(thid < m){
        y_dev.data[ii] = xx[thid]; THIS IS UPDATED IN KERNEL B
        t_dev.data[ii] = tt[thid]; THIS IS UPDATED IN KERNEL B
     }


     //__threadfence(); /// THIS STUPID THREADFENCE MAKES IT WORKING!