Opencl 如何对3x3 2D卷积进行矢量化?
我正在尝试为1280x720图像编写一个优化的3x3 2D图像卷积 为简单起见,通过将输入填充到1284*724来接近边缘条件 以下是我的内核代码:Opencl 如何对3x3 2D卷积进行矢量化?,opencl,vectorization,simd,Opencl,Vectorization,Simd,我正在尝试为1280x720图像编写一个优化的3x3 2D图像卷积 为简单起见,通过将输入填充到1284*724来接近边缘条件 以下是我的内核代码: __kernel __attribute__((vec_type_hint(float4))) void conv2d3x3( __global const float* restrict input,
__kernel
__attribute__((vec_type_hint(float4)))
void conv2d3x3(
__global const float* restrict input,
__global float* restrict output,
__constant const float4* restrict hL,
/* 3x3 kernel, padded with 3 zeros on the right, used to calculate
"left" output samples*/
__constant const float4* restrict hR
/*same 3x3 kernel, padded with 3 samples on the left*/)
{
int j = get_global_id(0)*2; //[0,639]
int i = get_global_id(1)*2; //[0,359]
/* load a 4x4 block, note stride is 1284 because input is padded by 4*/
float4 data0=vload4(0,input+1284*(i+0)+j);
float4 data1=vload4(0,input+1284*(i+1)+j);
float4 data2=vload4(0,input+1284*(i+2)+j);
float4 data3=vload4(0,input+1284*(i+3)+j);
/* sum(data[0:2,0:2].* h)*/
float prodTL=dot(data0,hL[0])+dot(data1,hL[1])+dot(data2,hL[2]);
/* sum(data[0:2,1:3].* h)*/
float prodTR=dot(data0,hR[0])+dot(data1,hR[1])+dot(data2,hR[2]);
/* sum(data[1:3,0:2].* h)*/
float prodBL=dot(data1,hL[0])+dot(data2,hL[1])+dot(data3,hL[2]);
/* sum(data[1:3,1:3].* h)*/
float prodBR=dot(data1,hR[0])+dot(data2,hR[1])+dot(data3,hR[2]);
output[1280*(i+0)+j]=prodTL;
output[1280*(i+0)+j+1]=prodTR;
output[1280*(i+1)+j]=prodBL;
output[1280*(i+1)+j+1]=prodBR;
}
这种设计的合理之处在于,加载一个4x4数据块,进行四次3x4卷积,并生成四个输出样本
此代码有几个明显的问题:
1) 矢量载荷未与矢量边界对齐
2) 输出的存储不是矢量化的
3) 性能很差:使用P4600的Intel XEON 1245v3(使用Beignet OpenCL实现)上3毫秒,使用GC2000的飞思卡尔IMX6Q(使用飞思卡尔OpenCL libOpenCL)上27毫秒
问题:
1) 我做错了什么,为什么这么慢
2) 就原始失败的百分比而言,我应该期望什么样的性能?(p4600能够在350MHz和1.2GHz之间运行20EU*2PFU/EU*SIMD8=320次/周期,而GC2000能够至少运行14GFLOPS)
3) 一般来说,如何在不产生过多内存流量和缓存冲突的情况下对固定大小的不可分离二维卷积进行矢量化?首先,我的未优化结果: Amd FX 8150@3.3 GHz(32个fp元件=>1个add+1个mul=64次/周期): 3.71ms包括单独opencl缓冲区和C#数组之间的复制时间 2.05ms不包括阵列拷贝 使用1-D ndrange内核执行而不是2D。[0640x360]
__kernel
__attribute__((vec_type_hint(float4)))
void bench(
__global const float* restrict input,
__global float* restrict output,
__constant const float4* restrict hL,
__constant const float4* restrict hR
)
{
int gli=get_global_id(0);
int j = (gli%640) * 2 ;
int i = (gli/640) * 2;
/* load a 4x4 block*/
float4 data0 = vload4(0, input + 1280 * (i + 0) + j);
float4 data1 = vload4(0, input + 1280 * (i + 1) + j);
float4 data2 = vload4(0, input + 1280 * (i + 2) + j);
float4 data3 = vload4(0, input + 1280 * (i + 3) + j);
float prodTL = dot(data0, hL[0]) + dot(data1, hL[1]) + dot(data2, hL[2]);
float prodTR = dot(data0, hR[0]) + dot(data1, hR[1]) + dot(data2, hR[2]);
float prodBL = dot(data1, hL[0]) + dot(data2, hL[1]) + dot(data3, hL[2]);
float prodBR = dot(data1, hR[0]) + dot(data2, hR[1]) + dot(data3, hR[2]);
output[1280 * (i + 0) + j] = prodTL;
output[1280 * (i + 0) + j + 1] = prodTR;
output[1280 * (i + 1) + j] = prodBL;
output[1280 * (i + 1) + j + 1] = prodBR;
}
主机端(C#阵列):
通过预取到专用寄存器(我只希望驱动程序使用cpu寄存器):
2ms
优化部分:
float4 hl2=hL[2];
float4 hl1=hL[1];
float4 hl0=hL[0];
float4 hr2=hR[2];
float4 hr1=hR[1];
float4 hr0=hR[0];
float prodTL = dot(data0, hl0) + dot(data1, hl1) + dot(data2, hl2);
float prodTR = dot(data0, hr0) + dot(data1, hr1) + dot(data2, hr2);
float prodBL = dot(data1, hl0) + dot(data2, hl1) + dot(data3, hl2);
float prodBR = dot(data1, hr0) + dot(data2, hr1) + dot(data3, hr2);
现在,随着dot产品的并行性增加:
三个点之和等于一个大点
float16 prodhl =(float16)(hl0, hl1, hl2, (float4)(0.0f,0.0f,0.0f,0.0f));
float16 prodhl =(float16)(hr0, hr1, hr2, (float4)(0.0f,0.0f,0.0f,0.0f));
float16 prodTdata =(float16)(data0,data1,data2,(float4)(0.0f,0.0f,0.0f,0.0f));
float16 prodBdata=(float16)(data1,data2,data3,(float4)(0.0f,0.0f,0.0f,0.0f));
float prodTL = dot(prodTdata, prodhl);
float prodTR = dot(prodTdata, prodhr);
float prodBL = dot(prodBdata, prodhl);
float prodBR = dot(prodBdata, prodhr);
不使用任何阵列副本执行:
0.5412毫秒
也许这只是cpu的AVX功能。如果不是,那么应该有一些指令级并行发生
这部分(float16最新的float4部分)浪费了1/4的计算能力,因此必须有办法达到0.4毫秒
注意:线程组大小为256。我没有尝试增加到1024,因为它不适合所有设备,如amd gpu
您可以尝试任务进程级并行,以提高吞吐量并击败单一opencl上下文(如果您已经这样做了)。这是一种可分离卷积(例如高斯卷积)吗?除了比3x3大得多的内核外,这个操作将受到内存带宽的限制。@Zboson否。我试图解决的问题是如何处理不可分离的情况。我想优化的关键是使用本地内存将内存带宽从
O(mn)
(输出大小*内核大小)减少到O(n)
。
float16 prodhl =(float16)(hl0, hl1, hl2, (float4)(0.0f,0.0f,0.0f,0.0f));
float16 prodhl =(float16)(hr0, hr1, hr2, (float4)(0.0f,0.0f,0.0f,0.0f));
float16 prodTdata =(float16)(data0,data1,data2,(float4)(0.0f,0.0f,0.0f,0.0f));
float16 prodBdata=(float16)(data1,data2,data3,(float4)(0.0f,0.0f,0.0f,0.0f));
float prodTL = dot(prodTdata, prodhl);
float prodTR = dot(prodTdata, prodhr);
float prodBL = dot(prodBdata, prodhl);
float prodBR = dot(prodBdata, prodhr);