Memory management 写入全局或本地内存会使内核执行时间增加10000%
我有以下OpenCL内核:Memory management 写入全局或本地内存会使内核执行时间增加10000%,memory-management,opencl,Memory Management,Opencl,我有以下OpenCL内核: kernel void ndft( global float *re, global float *im, int num_values, global float *spectrum_re, global float *spectrum_im, global float *spectrum_abs, global float *sin_array, global float *cos_array, float sqrt_num
kernel void ndft(
global float *re, global float *im, int num_values,
global float *spectrum_re, global float *spectrum_im,
global float *spectrum_abs,
global float *sin_array, global float *cos_array,
float sqrt_num_values_reciprocal)
{
// MATH MAGIC - DISREGARD FROM HERE -----------
float x;
float y;
float sum_re = 0;
float sum_im = 0;
size_t thread_id = get_global_id(0);
//size_t local_id = get_local_id(0);
// num_values = 24 (live environment), 48 (test)
for (int i = 0; i < num_values; i++)
{
x = cos_array[thread_id * num_values + i] * sqrt_num_values_reciprocal;
y = sin_array[thread_id * num_values + i] * sqrt_num_values_reciprocal;
sum_re = sum_re + re[i] * x + im[i] * y;
sum_im = sum_im - re[i] * y + x * im[i];
}
// MATH MAGIC DONE ----------------------------
//spectrum_re[thread_id] = sum_re;
//spectrum_im[thread_id] = sum_im;
//spectrum_abs[thread_id] = hypot(sum_re, sum_im);
float asdf = hypot(sum_re, sum_im); // this is just a dummy calculation
}
与预期一样,执行时间约为1.8毫秒。
为我的系统生成的汇编代码是:
//
// Generated by NVIDIA NVVM Compiler
// Compiler built on Tue Apr 03 12:42:39 2012 (1333449759)
// Driver
//
.version 3.0
.target sm_21, texmode_independent
.address_size 32
.entry ndft(
.param .u32 .ptr .global .align 4 ndft_param_0,
.param .u32 .ptr .global .align 4 ndft_param_1,
.param .u32 ndft_param_2,
.param .u32 .ptr .global .align 4 ndft_param_3,
.param .u32 .ptr .global .align 4 ndft_param_4,
.param .u32 .ptr .global .align 4 ndft_param_5,
.param .u32 .ptr .global .align 4 ndft_param_6,
.param .u32 .ptr .global .align 4 ndft_param_7,
.param .f32 ndft_param_8
)
{
.reg .f32 %f;
.reg .pred %p;
.reg .s32 %r;
ld.param.u32 %r3, [ndft_param_2];
// inline asm
mov.u32 %r18, %envreg3;
// inline asm
// inline asm
mov.u32 %r19, %ntid.x;
// inline asm
// inline asm
mov.u32 %r20, %ctaid.x;
// inline asm
// inline asm
mov.u32 %r21, %tid.x;
// inline asm
add.s32 %r22, %r21, %r18;
mad.lo.s32 %r11, %r20, %r19, %r22;
setp.gt.s32 %p1, %r3, 0;
@%p1 bra BB0_2;
mov.f32 %f46, 0f00000000;
mov.f32 %f45, %f46;
bra.uni BB0_4;
BB0_2:
ld.param.u32 %r38, [ndft_param_2];
mul.lo.s32 %r27, %r38, %r11;
shl.b32 %r28, %r27, 2;
ld.param.u32 %r40, [ndft_param_6];
add.s32 %r12, %r40, %r28;
ld.param.u32 %r41, [ndft_param_7];
add.s32 %r13, %r41, %r28;
mov.f32 %f46, 0f00000000;
mov.f32 %f45, %f46;
mov.u32 %r43, 0;
mov.u32 %r42, %r43;
BB0_3:
add.s32 %r29, %r13, %r42;
ld.global.f32 %f18, [%r29];
ld.param.f32 %f44, [ndft_param_8];
mul.f32 %f19, %f18, %f44;
add.s32 %r30, %r12, %r42;
ld.global.f32 %f20, [%r30];
mul.f32 %f21, %f20, %f44;
ld.param.u32 %r35, [ndft_param_0];
add.s32 %r31, %r35, %r42;
ld.global.f32 %f22, [%r31];
fma.rn.f32 %f23, %f22, %f19, %f46;
ld.param.u32 %r36, [ndft_param_1];
add.s32 %r32, %r36, %r42;
ld.global.f32 %f24, [%r32];
fma.rn.f32 %f46, %f24, %f21, %f23;
neg.f32 %f25, %f22;
fma.rn.f32 %f26, %f25, %f21, %f45;
fma.rn.f32 %f45, %f24, %f19, %f26;
add.s32 %r42, %r42, 4;
add.s32 %r43, %r43, 1;
ld.param.u32 %r37, [ndft_param_2];
setp.lt.s32 %p2, %r43, %r37;
@%p2 bra BB0_3;
BB0_4:
// inline asm
abs.f32 %f27, %f46;
// inline asm
// inline asm
abs.f32 %f29, %f45;
// inline asm
setp.gt.f32 %p3, %f27, %f29;
selp.f32 %f8, %f29, %f27, %p3;
selp.f32 %f32, %f27, %f29, %p3;
// inline asm
abs.f32 %f31, %f32;
// inline asm
setp.gt.f32 %p4, %f31, 0f7E800000;
mov.f32 %f47, %f32;
@%p4 bra BB0_6;
mov.f32 %f48, %f8;
bra.uni BB0_7;
BB0_6:
mov.f32 %f33, 0f3E800000;
mul.rn.f32 %f10, %f8, %f33;
mul.rn.f32 %f47, %f32, %f33;
mov.f32 %f48, %f10;
BB0_7:
mov.f32 %f13, %f48;
// inline asm
div.approx.f32 %f34, %f13, %f47;
// inline asm
mul.rn.f32 %f39, %f34, %f34;
add.f32 %f38, %f39, 0f3F800000;
// inline asm
sqrt.approx.f32 %f37, %f38; // <-- this is part of hypot()
// inline asm
mul.rn.f32 %f40, %f32, %f37;
add.f32 %f41, %f32, %f8;
setp.eq.f32 %p5, %f32, 0f00000000;
selp.f32 %f42, %f41, %f40, %p5;
setp.eq.f32 %p6, %f32, 0f7F800000;
setp.eq.f32 %p7, %f8, 0f7F800000;
or.pred %p8, %p6, %p7;
selp.f32 %f43, 0f7F800000, %f42, %p8;
shl.b32 %r33, %r11, 2;
ld.param.u32 %r39, [ndft_param_5];
add.s32 %r34, %r39, %r33;
st.global.f32 [%r34], %f43; // <-- stores the hypot's result in spectrum_abs
ret;
}
//
//由NVIDIA NVVM编译器生成
//编译器构建于2012年4月3日星期二12:42:39(1333449759)
//司机
//
.3.0版
.目标sm_21,texmode_独立
.地址尺寸32
.入口ndft(
.param.u32.ptr.global.align 4 ndft_param_0,
.param.u32.ptr.global.align 4 ndft_param_1,
.param.u32 ndft_param_2,
.param.u32.ptr.global.align 4 ndft_param_3,
.param.u32.ptr.global.align 4 ndft_param_4,
.param.u32.ptr.global.align 4 ndft_param_5,
.param.u32.ptr.global.align 4 ndft_param_6,
.param.u32.ptr.global.align 4 ndft_param_7,
.param.f32 ndft_param_8
)
{
.reg.f32%f;
.reg.pred%p;
.注册号s32%r;
ld.param.u32%r3[ndft_param_2];
//内联asm
mov.u32%r18%envreg3;
//内联asm
//内联asm
mov.u32%r19,%ntid.x;
//内联asm
//内联asm
mov.u32%r20%ctaid.x;
//内联asm
//内联asm
mov.u32%r21,%tid.x;
//内联asm
添加.s32%r22、%r21、%r18;
mad.lo.s32%r11、%r20、%r19、%r22;
setp.gt.s32%p1,%r3,0;
@%p1胸罩BB0_2;
mov.f32%f46,0f0000000;
mov.f32%f45%f46;
bra.uni BB0_4;
BB0_2:
ld.param.u32%r38[ndft_param_2];
mul.lo.s32%r27、%r38、%r11;
shl.b32%r28%r27,2;
ld.param.u32%r40,[ndft_param_6];
添加.s32%r12、%r40、%r28;
ld.param.u32%r41,[ndft_param_7];
添加32%r13、%r41、%r28;
mov.f32%f46,0f0000000;
mov.f32%f45%f46;
mov.u32%r43,0;
mov.u32%r42%r43;
BB0_3:
添加.s32%r29、%r13、%r42;
ld.global.f32%f18,[%r29];
ld.param.f32%f44,[ndft_param_8];
mul.f32%f19、%f18、%f44;
添加.s32%r30、%r12、%r42;
ld.global.f32%f20,[%r30];
mul.f32%f21、%f20、%f44;
ld.param.u32%r35[ndft_param_0];
添加.s32%r31、%r35、%r42;
ld.global.f32%f22,[%r31];
fma.rn.f32%f23、%f22、%f19、%f46;
ld.param.u32%r36[ndft_param_1];
添加.s32%r32、%r36、%r42;
ld.global.f32%f24,[%r32];
fma.rn.f32%f46、%f24、%f21、%f23;
负f32%f25%f22;
fma.rn.f32%f26、%f25、%f21、%f45;
fma.rn.f32%f45、%f24、%f19、%f26;
添加32%r42,%r42,4;
添加.s32%r43,%r43,1;
ld.param.u32%r37[ndft_param_2];
setp.lt.s32%p2、%r43、%r37;
@%p2胸罩BB0_3;
BB0_4:
//内联asm
abs.f32%f27%f46;
//内联asm
//内联asm
abs.f32%f29%f45;
//内联asm
setp.gt.f32%p3,%f27,%f29;
selp.f32%f8、%f29、%f27、%p3;
selp.f32%f32、%f27、%f29、%p3;
//内联asm
abs.f32%f31%f32;
//内联asm
setp.gt.f32%p4,%f31,0f7E800000;
mov.f32%f47%f32;
@%p4胸罩BB0_6;
mov.f32%f48%f8;
bra.uni BB0_7;
BB0_6:
动产f32%f33,0f3E800000;
mul.rn.f32%f10%f8%f33;
多个rn.f32%f47、%f32、%f33;
mov.f32%f48%f10;
BB0_7:
mov.f32%f13%f48;
//内联asm
分区约为f32%f34%f13%f47;
//内联asm
mul.rn.f32%f39%f34%f34;
添加0.f32%f38%f39%f300000;
//内联asm
sqrt.approw.f32%f37,%f38;//我猜您看到了编译器优化的效果
NVIDIA编译器在消除不直接参与写入全局内存的“死代码”方面非常积极。因此,在内核中,如果您不编写sum\u re
或sum\u im
,编译器将优化整个计算循环(可能还有其他所有内容)让您的内核空着,只包含一个no-op。您看到的15微秒执行时间主要是内核启动开销,而不是其他。当您取消对全局内存写入的注释时,编译器会将所有计算代码保留在原位,您会看到代码的真实执行时间
因此,您可能应该问的真正问题是,如何优化内核,将其执行时间从当前的1.5毫秒缩短到30微秒的目标(非常雄心勃勃)
尽管对原始答案表示怀疑,但这里有一个完整的repo案例,支持这是一个编译器相关效应的断言:
#include <iostream>
#include <OpenCL/opencl.h>
size_t source_size;
const char * source_str =
"kernel void ndft( \n" \
" global float *re, global float *im, int num_values, \n" \
" global float *spectrum_re, global float *spectrum_im, \n" \
" global float *spectrum_abs, \n" \
" global float *sin_array, global float *cos_array, \n" \
" float sqrt_num_values_reciprocal) \n" \
"{ \n" \
" // MATH MAGIC - DISREGARD FROM HERE ----------- \n" \
" \n" \
" float x; \n" \
" float y; \n" \
" float sum_re = 0; \n" \
" float sum_im = 0; \n" \
" \n" \
" size_t thread_id = get_global_id(0); \n" \
" \n" \
" for (int i = 0; i < num_values; i++) \n" \
" { \n" \
" x = cos_array[thread_id * num_values + i] * sqrt_num_values_reciprocal; \n" \
" y = sin_array[thread_id * num_values + i] * sqrt_num_values_reciprocal; \n" \
" sum_re += re[i] * x + im[i] * y; \n" \
" sum_im -= re[i] * y + x * im[i]; \n" \
" } \n" \
" \n" \
" // MATH MAGIC DONE ---------------------------- \n" \
" \n" \
" //spectrum_re[thread_id] = sum_re; \n" \
" //spectrum_im[thread_id] = sum_im; \n" \
" //spectrum_abs[thread_id] = hypot(sum_re, sum_im); \n" \
"} \n";
int main(void)
{
int err;
cl_device_id device_id;
clGetDeviceIDs(NULL, CL_DEVICE_TYPE_GPU, 1, &device_id, NULL);
cl_context context = clCreateContext(0, 1, &device_id, NULL, NULL, &err);
cl_program program = clCreateProgramWithSource(context, 1, (const char **)&source_str, (const size_t *)&source_size, &err);
err = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
cl_uint program_num_devices;
clGetProgramInfo(program, CL_PROGRAM_NUM_DEVICES, sizeof(cl_uint), &program_num_devices, NULL);
size_t * binaries_sizes = new size_t[program_num_devices];
clGetProgramInfo( program, CL_PROGRAM_BINARY_SIZES, program_num_devices*sizeof(size_t), binaries_sizes, NULL);
char **binaries = new char*[program_num_devices];
for (size_t i = 0; i < program_num_devices; i++)
binaries[i] = new char[binaries_sizes[i]+1];
clGetProgramInfo(program, CL_PROGRAM_BINARIES, program_num_devices*sizeof(size_t), binaries, NULL);
for (size_t i = 0; i < program_num_devices; i++)
{
binaries[i][binaries_sizes[i]] = '\0';
std::cout << "Program " << i << ":" << std::endl;
std::cout << binaries[i];
}
return 0;
}
根据您代码中的注释num_value
为24或48。这意味着内存读取不可能合并,费米GPU上的一级缓存也不会有多大帮助。这将对内存带宽利用率产生巨大的负面影响,并使代码速度非常慢。如果您坚持输入数据顺序,则ng,那么一个更快的解决方案是使用一个扭曲来计算一个输出(因此将扭曲范围缩减为最终和)。这将使读取跨距从24或48减小到1,并合并来自这两个大型输入阵列的全局内存读取
在循环内部,对于re
和im
的24或48个元素,也会重复提取全局内存:
sum_re += re[i] * x + im[i] * y;
sum_im -= re[i] * y + x * im[i];
这是不必要的,并且浪费了大量全局内存带宽或缓存效率(GPU没有足够的寄存器让编译器在寄存器中保存每个数组的全部内容)。最好让每个工作组将这两个数组读入\uuu local
内存数组一次,并在计算循环中使用本地内存副本。如果让每个工作组计算多次,而不是只计算一次,则可以潜在地节省大量全局内存带宽,并摊销初始读取,直到它完成为止它几乎是免费的。你在什么硬件和OpenCL风格上运行它?@talonmies NVIDIA GeForce 555M GT,最新的CUDA工具包。你是累积的吗
Program 0:
bplist00?^clBinaryDriver\clBinaryData_clBinaryVersionWCLH 1.0O!.version 1.5
.target sm_12
.target texmode_independent
.reg .b32 r<126>; /* define r0..125 */
.reg .b64 x<126>; /* define r0..125 */
.reg .b32 f<128>; /* define f0..127 */
.reg .pred p<32>; /* define p0..31 */
.reg .u32 sp;
.reg .b8 wb0,wb1,wb2,wb3; /* 8-bit write buffer */
.reg .b16 ws0,ws1,ws2,ws3; /* 16-bit write buffer */
.reg .b32 tb0,tb1,tb2,tb3; /* read tex buffer */
.reg .b64 vl0,vl1; /* 64-bit vector buffer */
.reg .b16 cvt16_0,cvt16_1; /* tmps for conversions */
.const .align 1 .b8 ndft_gid_base[52];
.local .align 16 .b8 ndft_stack[8];
.entry ndft(
.param.b32 ndft_0 /* re */,
.param.b32 ndft_1 /* im */,
.param.b32 ndft_2 /* num_values */,
.param.b32 ndft_3 /* spectrum_re */,
.param.b32 ndft_4 /* spectrum_im */,
.param.b32 ndft_5 /* spectrum_abs */,
.param.b32 ndft_6 /* sin_array */,
.param.b32 ndft_7 /* cos_array */,
.param.f32 ndft_8 /* sqrt_num_values_reciprocal */
) {
mov.u32 sp, ndft_stack;
mov.u32 r0, 4294967295;
ld.param.u32 r1, [ndft_2 + 0];
LBB1_1:
add.u32 r0, r0, 1;
setp.lt.s32 p0, r0, r1;
@p0 bra LBB1_1;
LBB1_2:
ret;
}
Program 0:
S.version 1.5inaryDriver\clBinaryData_clBinaryVersionWCLH 1.0O
.target sm_12
.target texmode_independent
.reg .b32 r<126>; /* define r0..125 */
.reg .b64 x<126>; /* define r0..125 */
.reg .b32 f<128>; /* define f0..127 */
.reg .pred p<32>; /* define p0..31 */
.reg .u32 sp;
.reg .b8 wb0,wb1,wb2,wb3; /* 8-bit write buffer */
.reg .b16 ws0,ws1,ws2,ws3; /* 16-bit write buffer */
.reg .b32 tb0,tb1,tb2,tb3; /* read tex buffer */
.reg .b64 vl0,vl1; /* 64-bit vector buffer */
.reg .b16 cvt16_0,cvt16_1; /* tmps for conversions */
.const .align 1 .b8 ndft_gid_base[52];
.local .align 16 .b8 ndft_stack[8];
.entry ndft(
.param.b32 ndft_0 /* re */,
.param.b32 ndft_1 /* im */,
.param.b32 ndft_2 /* num_values */,
.param.b32 ndft_3 /* spectrum_re */,
.param.b32 ndft_4 /* spectrum_im */,
.param.b32 ndft_5 /* spectrum_abs */,
.param.b32 ndft_6 /* sin_array */,
.param.b32 ndft_7 /* cos_array */,
.param.f32 ndft_8 /* sqrt_num_values_reciprocal */
) {
mov.u32 sp, ndft_stack;
cvt.u32.u16 r0, %tid.x;
cvt.u32.u16 r1, %ntid.x;
cvt.u32.u16 r2, %ctaid.x;
mad24.lo.u32 r0, r2, r1, r0;
mov.u32 r1, 0;
shl.b32 r2, r1, 2;
mov.u32 r3, ndft_gid_base;
add.u32 r2, r2, r3;
ld.const.u32 r2, [r2 + 40];
add.u32 r0, r0, r2;
ld.param.u32 r2, [ndft_2 + 0];
mul.lo.u32 r3, r0, r2;
shl.b32 r3, r3, 2;
mov.f32 f0, 0f00000000 /* 0.000000e+00 */;
ld.param.f32 f1, [ndft_8 + 0];
ld.param.u32 r4, [ndft_7 + 0];
ld.param.u32 r5, [ndft_6 + 0];
ld.param.u32 r6, [ndft_5 + 0];
ld.param.u32 r7, [ndft_4 + 0];
ld.param.u32 r8, [ndft_3 + 0];
ld.param.u32 r9, [ndft_1 + 0];
ld.param.u32 r10, [ndft_0 + 0];
mov.u32 r11, r1;
mov.f32 f2, f0;
LBB1_1:
setp.ge.s32 p0, r11, r2;
@!p0 bra LBB1_7;
LBB1_2:
shl.b32 r1, r0, 2;
add.u32 r2, r8, r1;
st.global.f32 [r2+0], f0;
add.u32 r1, r7, r1;
st.global.f32 [r1+0], f2;
abs.f32 f1, f2;
abs.f32 f0, f0;
setp.gt.f32 p0, f0, f1;
selp.f32 f2, f0, f1, p0;
abs.f32 f3, f2;
mov.f32 f4, 0f7E800000 /* 8.507059e+37 */;
setp.gt.f32 p1, f3, f4;
selp.f32 f0, f1, f0, p0;
shl.b32 r0, r0, 2;
add.u32 r0, r6, r0;
@!p1 bra LBB1_8;
LBB1_3:
mul.rn.f32 f3, f2, 0f3E800000 /* 2.500000e-01 */;
mul.rn.f32 f1, f0, 0f3E800000 /* 2.500000e-01 */;
LBB1_4:
mov.f32 f4, 0f00000000 /* 0.000000e+00 */;
setp.eq.f32 p0, f2, f4;
@!p0 bra LBB1_9;
LBB1_5:
add.f32 f1, f2, f0;
LBB1_6:
mov.f32 f3, 0f7F800000 /* inf */;
setp.eq.f32 p0, f0, f3;
setp.eq.f32 p1, f2, f3;
or.pred p0, p1, p0;
selp.f32 f0, f3, f1, p0;
st.global.f32 [r0+0], f0;
ret;
LBB1_7:
add.u32 r12, r3, r1;
add.u32 r13, r4, r12;
ld.global.f32 f3, [r13+0];
mul.rn.f32 f3, f3, f1;
add.u32 r13, r9, r1;
ld.global.f32 f4, [r13+0];
mul.rn.f32 f5, f3, f4;
add.u32 r12, r5, r12;
ld.global.f32 f6, [r12+0];
mul.rn.f32 f6, f6, f1;
add.u32 r12, r10, r1;
ld.global.f32 f7, [r12+0];
mul.rn.f32 f8, f7, f6;
add.f32 f5, f8, f5;
sub.f32 f2, f2, f5;
mul.rn.f32 f4, f4, f6;
mul.rn.f32 f3, f7, f3;
add.f32 f3, f3, f4;
add.f32 f0, f0, f3;
add.u32 r11, r11, 1;
add.u32 r1, r1, 4;
bra LBB1_1;
LBB1_8:
mov.f32 f1, f0;
mov.f32 f3, f2;
bra LBB1_4;
LBB1_9:
div.approx.f32 f1, f1, f3;
mul.rn.f32 f1, f1, f1;
add.f32 f1, f1, 0f3F800000 /* 1.000000e+00 */;
sqrt.approx.ftz.f32 f1, f1;
mul.rn.f32 f1, f2, f1;
bra LBB1_6;
}
x = cos_array[thread_id * num_values + i] * sqrt_num_values_reciprocal;
sum_re += re[i] * x + im[i] * y;
sum_im -= re[i] * y + x * im[i];