Linux CUDA-对“;分行”;和“;发散分支&x201D;(2)

Linux CUDA-对“;分行”;和“;发散分支&x201D;(2),linux,cuda,profiler,Linux,Cuda,Profiler,我使用NVIDIA Visual Profiler分析我的代码。测试内核是: //////////////////////////////////////////////////////////////// Group 1 static __global__ void gpu_test_divergency_0(float *a, float *b) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < 0

我使用NVIDIA Visual Profiler分析我的代码。测试内核是:

//////////////////////////////////////////////////////////////// Group 1
static __global__ void gpu_test_divergency_0(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid < 0)
    {
         a[tid] = tid;
    }
    else
    {
         b[tid] = tid;
    }
}
static __global__ void gpu_test_divergency_1(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid == 0)
    {
         a[tid] = tid;
    }
    else
    {
         b[tid] = tid;
    }
}
static __global__ void gpu_test_divergency_2(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid >= 0)
    {
         a[tid] = tid;
    }
    else
    {
         b[tid] = tid;
    }
}
static __global__ void gpu_test_divergency_3(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid > 0)
    {
         a[tid] = tid;
    }
    else
    {
         b[tid] = tid;
    }
}
//////////////////////////////////////////////////////////////// Group 2
static __global__ void gpu_test_divergency_4(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid < 0)
    {
         a[tid] = tid + 1;
    }
    else
    {
         b[tid] = tid + 2;
    }
}
static __global__ void gpu_test_divergency_5(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid == 0)
    {
         a[tid] = tid + 1;
    }
    else
    {
         b[tid] = tid + 2;
    }
}
static __global__ void gpu_test_divergency_6(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid >= 0)
    {
         a[tid] = tid + 1;
    }
    else
    {
         b[tid] = tid + 2;
    }
}
static __global__ void gpu_test_divergency_7(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid > 0)
    {
         a[tid] = tid + 1;
    }
    else
    {
         b[tid] = tid + 2;
    }
}
//////////////////////////////////////////////////////////////// Group 3
static __global__ void gpu_test_divergency_8(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid < 0)
    {
         a[tid] = tid + 1.0;
    }
    else
    {
         b[tid] = tid + 2.0;
    }
}
static __global__ void gpu_test_divergency_9(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid == 0)
    {
         a[tid] = tid + 1.0;
    }
    else
    {
         b[tid] = tid + 2.0;
    }
}
static __global__ void gpu_test_divergency_10(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid >= 0)
    {
         a[tid] = tid + 1.0;
    }
    else
    {
         b[tid] = tid + 2.0;
    }
}
static __global__ void gpu_test_divergency_11(float *a, float *b)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;
    if (tid > 0)
    {
         a[tid] = tid + 1.0;
    }
    else
    {
         b[tid] = tid + 2.0;
    }
}
当我用>启动测试内核时,我从profiler获得如下结果:

gpu_test_divergency_0 :  Branch Efficiency = 100% branch = 1 divergent branch = 0
gpu_test_divergency_1 :  Branch Efficiency = 100% branch = 1 divergent branch = 0
gpu_test_divergency_2 :  Branch Efficiency = 100% branch = 1 divergent branch = 0
gpu_test_divergency_3 :  Branch Efficiency = 100% branch = 1 divergent branch = 0

gpu_test_divergency_4 :  Branch Efficiency = 100% branch = 3 divergent branch = 0
gpu_test_divergency_5 :  Branch Efficiency = 100% branch = 3 divergent branch = 0
gpu_test_divergency_6 :  Branch Efficiency = 100% branch = 2 divergent branch = 0
gpu_test_divergency_7 :  Branch Efficiency = 100% branch = 3 divergent branch = 0

gpu_test_divergency_8 :  Branch Efficiency = 100% branch = 3 divergent branch = 0
gpu_test_divergency_9 :  Branch Efficiency = 75%  branch = 4 divergent branch = 1
gpu_test_divergency_10 : Branch Efficiency = 100% branch = 2 divergent branch = 0
gpu_test_divergency_11 : Branch Efficiency = 75%  branch = 4 divergent branch = 1
gpu_test_divergency_0 :  Branch Efficiency = 100% branch = 2 divergent branch = 0
gpu_test_divergency_1 :  Branch Efficiency = 100% branch = 2 divergent branch = 0
gpu_test_divergency_2 :  Branch Efficiency = 100% branch = 2 divergent branch = 0
gpu_test_divergency_3 :  Branch Efficiency = 100% branch = 2 divergent branch = 0

gpu_test_divergency_4 :  Branch Efficiency = 100% branch = 6 divergent branch = 0
gpu_test_divergency_5 :  Branch Efficiency = 100% branch = 6 divergent branch = 0
gpu_test_divergency_6 :  Branch Efficiency = 100% branch = 4 divergent branch = 0
gpu_test_divergency_7 :  Branch Efficiency = 100% branch = 5 divergent branch = 0

gpu_test_divergency_8 :  Branch Efficiency = 100%  branch = 6 divergent branch = 0
gpu_test_divergency_9 :  Branch Efficiency = 85.7% branch = 7 divergent branch = 1
gpu_test_divergency_10 : Branch Efficiency = 100%  branch = 4 divergent branch = 0
gpu_test_divergency_11 : Branch Efficiency = 83.3% branch = 6 divergent branch = 1
我在Linux上使用“GeForce GTX 570”和CUDA功能2.0以及NVIDIA Visual Profiler v4.2。根据文件:

“分支”-“执行内核的线程执行的分支数。如果扭曲中至少有一个线程执行该分支,则此计数器将增加1。”

“发散分支”-“扭曲内发散分支的数目。如果扭曲中至少有一个踏板通过依赖数据的条件分支发散(即遵循不同的执行路径),则此计数器将增加1。”

但我真的对结果感到困惑。为什么每个测试组的“分支”数量不同?为什么只有第三个测试组似乎有正确的“发散分支”

@JackOLantern:我在发布模式下编译。我用你的方式把它拆开了。“gpu测试\ U发散\ 4”的结果与您的结果完全相同,但“gpu测试\ U发散\ 0”的结果不同:

    Function : _Z21gpu_test_divergency_0PfS_
/*0000*/     /*0x00005de428004404*/     MOV R1, c [0x1] [0x100];
/*0008*/     /*0x94001c042c000000*/     S2R R0, SR_CTAid_X;
/*0010*/     /*0x84009c042c000000*/     S2R R2, SR_Tid_X;
/*0018*/     /*0x20009ca320044000*/     IMAD R2, R0, c [0x0] [0x8], R2;
/*0020*/     /*0xfc21dc23188e0000*/     ISETP.LT.AND P0, pt, R2, RZ, pt;
/*0028*/     /*0x0920de0418000000*/     I2F.F32.S32 R3, R2;
/*0030*/     /*0x9020204340004000*/     @!P0 ISCADD R0, R2, c [0x0] [0x24], 0x2;
/*0038*/     /*0x8020804340004000*/     @P0 ISCADD R2, R2, c [0x0] [0x20], 0x2;
/*0040*/     /*0x0000e08590000000*/     @!P0 ST [R0], R3;
/*0048*/     /*0x0020c08590000000*/     @P0 ST [R2], R3;
/*0050*/     /*0x00001de780000000*/     EXIT;
我想,正如您所说,转换指令(本例中为I2F)不会添加额外的分支


但我看不到这些反汇编代码与分析器结果之间的关系。我从另一篇文章()中了解到,发散分支是根据SMs上的实际线程(warp)运行情况计算的。所以我想我们不能仅仅根据这些反汇编代码来推断每个实际运行的分支发散。我说得对吗

跟进-使用投票内在函数检查线程分歧

我认为检查warps中线程分歧的最佳方法是使用投票内部函数,特别是
\uuuuuuuuball
\uuuuuuupopc
内部函数。Shane Cook、CUDA Programming、Morgan Kaufmann的书中提供了有关
\uuuuu选票
\uuuuuuu popc
的详细说明

\u选票的原型如下

unsigned int __ballot(int predicate);
gpu_test_divergency_0

/*0000*/        MOV R1, c[0x1][0x100];                 /* 0x2800440400005de4 */
/*0008*/        S2R R0, SR_CTAID.X;                    /* 0x2c00000094001c04 */
/*0010*/        S2R R2, SR_TID.X;                      /* 0x2c00000084009c04 */
/*0018*/        IMAD R2, R0, c[0x0][0x8], R2;          /* 0x2004400020009ca3 */
/*0020*/        ISETP.LT.AND P0, PT, R2, RZ, PT;       /* 0x188e0000fc21dc23 */
/*0028*/        I2F.F32.S32 R0, R2;                    /* 0x1800000009201e04 */
/*0030*/   @!P0 ISCADD R3, R2, c[0x0][0x24], 0x2;      /* 0x400040009020e043 */
/*0038*/    @P0 ISCADD R2, R2, c[0x0][0x20], 0x2;      /* 0x4000400080208043 */
/*0040*/   @!P0 ST [R3], R0;                           /* 0x9000000000302085 */
/*0048*/    @P0 ST [R2], R0;                           /* 0x9000000000200085 */
/*0050*/        EXIT ;                                 /* 0x8000000000001de7 */
如果谓词为非零,
\uu
返回设置了
N
th位的值,其中
N
threadIdx.x

另一方面,
\uu popc
返回使用
32
位参数设置的位数

因此,通过联合使用
\uuuuuuuuuuupopc
atomicAdd
,可以检查扭曲是否发散

为此,我设置了以下代码

#include <cuda.h>
#include <stdio.h>
#include <iostream>

#include <cuda.h>
#include <cuda_runtime.h>

__device__ unsigned int __ballot_non_atom(int predicate)
{
    if (predicate != 0) return (1 << (threadIdx.x % 32));
    else return 0;
}

__global__ void gpu_test_divergency_0(unsigned int* d_ballot, int Num_Warps_per_Block)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;

    const unsigned int warp_num = threadIdx.x >> 5;

    atomicAdd(&d_ballot[warp_num+blockIdx.x*Num_Warps_per_Block],__popc(__ballot_non_atom(tid > 2)));
    //  atomicAdd(&d_ballot[warp_num+blockIdx.x*Num_Warps_per_Block],__popc(__ballot(tid > 2)));

}

#include <conio.h>

int main(int argc, char *argv[])
{
    unsigned int Num_Threads_per_Block      = 64;
    unsigned int Num_Blocks_per_Grid        = 1;
    unsigned int Num_Warps_per_Block        = Num_Threads_per_Block/32;
    unsigned int Num_Warps_per_Grid         = (Num_Threads_per_Block*Num_Blocks_per_Grid)/32;

    unsigned int* h_ballot = (unsigned int*)malloc(Num_Warps_per_Grid*sizeof(unsigned int));
    unsigned int* d_ballot; cudaMalloc((void**)&d_ballot, Num_Warps_per_Grid*sizeof(unsigned int));

    for (int i=0; i<Num_Warps_per_Grid; i++) h_ballot[i] = 0;

    cudaMemcpy(d_ballot, h_ballot, Num_Warps_per_Grid*sizeof(unsigned int), cudaMemcpyHostToDevice);

    gpu_test_divergency_0<<<Num_Blocks_per_Grid,Num_Threads_per_Block>>>(d_ballot,Num_Warps_per_Block);

    cudaMemcpy(h_ballot, d_ballot, Num_Warps_per_Grid*sizeof(unsigned int), cudaMemcpyDeviceToHost);

    for (int i=0; i<Num_Warps_per_Grid; i++) { 
        if ((h_ballot[i] == 0)||(h_ballot[i] == 32)) std::cout << "Warp " << i << " IS NOT divergent- Predicate true for " << h_ballot[i] << " threads\n";
            else std::cout << "Warp " << i << " IS divergent - Predicate true for " << h_ballot[i] << " threads\n";
    }

    getch();
    return EXIT_SUCCESS;
}

从上面的缺点来看,我希望分支发散性测试的结果是相同的


您是在调试模式下编译还是在发布模式下编译?

后续-使用投票内在函数检查线程分歧

我认为检查warps中线程分歧的最佳方法是使用投票内部函数,特别是
\uuuuuuuuball
\uuuuuuupopc
内部函数。Shane Cook、CUDA Programming、Morgan Kaufmann的书中提供了有关
\uuuuu选票
\uuuuuuu popc
的详细说明

\u选票的原型如下

unsigned int __ballot(int predicate);
gpu_test_divergency_0

/*0000*/        MOV R1, c[0x1][0x100];                 /* 0x2800440400005de4 */
/*0008*/        S2R R0, SR_CTAID.X;                    /* 0x2c00000094001c04 */
/*0010*/        S2R R2, SR_TID.X;                      /* 0x2c00000084009c04 */
/*0018*/        IMAD R2, R0, c[0x0][0x8], R2;          /* 0x2004400020009ca3 */
/*0020*/        ISETP.LT.AND P0, PT, R2, RZ, PT;       /* 0x188e0000fc21dc23 */
/*0028*/        I2F.F32.S32 R0, R2;                    /* 0x1800000009201e04 */
/*0030*/   @!P0 ISCADD R3, R2, c[0x0][0x24], 0x2;      /* 0x400040009020e043 */
/*0038*/    @P0 ISCADD R2, R2, c[0x0][0x20], 0x2;      /* 0x4000400080208043 */
/*0040*/   @!P0 ST [R3], R0;                           /* 0x9000000000302085 */
/*0048*/    @P0 ST [R2], R0;                           /* 0x9000000000200085 */
/*0050*/        EXIT ;                                 /* 0x8000000000001de7 */
如果谓词为非零,
\uu
返回设置了
N
th位的值,其中
N
threadIdx.x

另一方面,
\uu popc
返回使用
32
位参数设置的位数

因此,通过联合使用
\uuuuuuuuuuupopc
atomicAdd
,可以检查扭曲是否发散

为此,我设置了以下代码

#include <cuda.h>
#include <stdio.h>
#include <iostream>

#include <cuda.h>
#include <cuda_runtime.h>

__device__ unsigned int __ballot_non_atom(int predicate)
{
    if (predicate != 0) return (1 << (threadIdx.x % 32));
    else return 0;
}

__global__ void gpu_test_divergency_0(unsigned int* d_ballot, int Num_Warps_per_Block)
{
    int tid = threadIdx.x + blockIdx.x * blockDim.x;

    const unsigned int warp_num = threadIdx.x >> 5;

    atomicAdd(&d_ballot[warp_num+blockIdx.x*Num_Warps_per_Block],__popc(__ballot_non_atom(tid > 2)));
    //  atomicAdd(&d_ballot[warp_num+blockIdx.x*Num_Warps_per_Block],__popc(__ballot(tid > 2)));

}

#include <conio.h>

int main(int argc, char *argv[])
{
    unsigned int Num_Threads_per_Block      = 64;
    unsigned int Num_Blocks_per_Grid        = 1;
    unsigned int Num_Warps_per_Block        = Num_Threads_per_Block/32;
    unsigned int Num_Warps_per_Grid         = (Num_Threads_per_Block*Num_Blocks_per_Grid)/32;

    unsigned int* h_ballot = (unsigned int*)malloc(Num_Warps_per_Grid*sizeof(unsigned int));
    unsigned int* d_ballot; cudaMalloc((void**)&d_ballot, Num_Warps_per_Grid*sizeof(unsigned int));

    for (int i=0; i<Num_Warps_per_Grid; i++) h_ballot[i] = 0;

    cudaMemcpy(d_ballot, h_ballot, Num_Warps_per_Grid*sizeof(unsigned int), cudaMemcpyHostToDevice);

    gpu_test_divergency_0<<<Num_Blocks_per_Grid,Num_Threads_per_Block>>>(d_ballot,Num_Warps_per_Block);

    cudaMemcpy(h_ballot, d_ballot, Num_Warps_per_Grid*sizeof(unsigned int), cudaMemcpyDeviceToHost);

    for (int i=0; i<Num_Warps_per_Grid; i++) { 
        if ((h_ballot[i] == 0)||(h_ballot[i] == 32)) std::cout << "Warp " << i << " IS NOT divergent- Predicate true for " << h_ballot[i] << " threads\n";
            else std::cout << "Warp " << i << " IS divergent - Predicate true for " << h_ballot[i] << " threads\n";
    }

    getch();
    return EXIT_SUCCESS;
}

从上面的缺点来看,我希望分支发散性测试的结果是相同的


您是在调试模式还是发布模式下编译?

类型转换可能会引入额外的分支。@Eric类型转换似乎没有添加任何额外的分支,请参阅下面我的回答中的分解。@Eric:我同意JackolaInternetype cast可能会引入额外的分支。@Eric类型转换似乎没有添加任何额外的分支,请看下面我的答案中的错误。@Eric:我同意JackOLantern@Ben请看我修改过的答案。关于“
0
”案例的反汇编代码,您是对的,它与“
4
”案例的反汇编代码不同。我犯了一个错误,我编辑了我的答案。不过,请看一下第一部分。我提供了一个基于投票本质的代码,通过它,您可以检查泛型代码中的线程分歧。内核函数中的投票本质指令可以添加到任何内核中,以评估线程发散。我不知道VisualProfiler使用的标准,我也没有用该工具复制您的结果。@Ben,请查看我修改的答案。关于“
0
”案例的反汇编代码,您是对的,它与“
4
”案例的反汇编代码不同。我犯了一个错误,我编辑了我的答案。不过,请看一下第一部分。我提供了一个基于投票本质的代码,通过它,您可以检查泛型代码中的线程分歧。内核函数中的投票本质指令可以添加到任何内核中,以评估线程发散。我不知道VisualProfiler使用的标准,也没有通过该工具复制您的结果。