C++ i=_mm_unpachi_epi64(e,e); e=_mm_或_si128(e,e_hi); uint64_t c=_mm_cvtsi128_si64x(e); 返回c; } uint64多方向(uint32 a、uint32 b){ uint64_t
i=_mm_unpachi_epi64(e,e); e=_mm_或_si128(e,e_hi); uint64_t c=_mm_cvtsi128_si64x(e); 返回c; } uint64多方向(uint32 a、uint32 b){ uint64_t c=0; 做{C++ i=_mm_unpachi_epi64(e,e); e=_mm_或_si128(e,e_hi); uint64_t c=_mm_cvtsi128_si64x(e); 返回c; } uint64多方向(uint32 a、uint32 b){ uint64_t,c++,x86,bit-manipulation,bitwise-operators,bmi,C++,X86,Bit Manipulation,Bitwise Operators,Bmi,i=_mm_unpachi_epi64(e,e); e=_mm_或_si128(e,e_hi); uint64_t c=_mm_cvtsi128_si64x(e); 返回c; } uint64多方向(uint32 a、uint32 b){ uint64_t c=0; 做{ c |=((uint64_t)b)您是否检查了编译器在启用优化的情况下构建时实际生成的程序集?是否确实需要手动优化此程序集?是否时间紧迫?是否经常调用此程序集?为什么如此重要?如果它不是非常重要且是瓶颈,则只需编写可读代码,而
c |=((uint64_t)b)您是否检查了编译器在启用优化的情况下构建时实际生成的程序集?是否确实需要手动优化此程序集?是否时间紧迫?是否经常调用此程序集?为什么如此重要?如果它不是非常重要且是瓶颈,则只需编写可读代码,而不是使用微优化将其混淆离子。@JesperJuhl它是时间关键型的。它用于计算子集和问题的求和集。背景是。
std::cout
在该代码中花费的时间可能比所有其他行加起来要多几个数量级。@JesperJuhl似乎你不理解抽象问题。抽象问题最好作为一个获取两个uint32\u t
参数并返回一个uint64\u t
,而不是一个打印编译时常量结果的程序。然后,您可以查看启用优化的编译器生成的asm(至少对于其中一个操作数为常量的调用方不内联的情况)@Orient:在Skylake上,bsf
有3个周期延迟,仅在端口1上运行,即与imul
的性能相同。因此,BMI1blsi
馈送imul
的性能应与bsf
馈送shlx
的性能大致相同。性能优于bsf
馈送shl
,因为e在英特尔CPU上,不带BMI2的变量计数移位是3 UOP。@Orient:您的问题非常接近于无卡利乘法(,但您希望使用或而不是xor
(无卡利乘法是指xor
替换标准移位和加法中的add
,xor
是不带进位的加法。)IDK,如果可以使用pclmuludq
作为其中的一部分;可能不是:(我在考虑a+b=(a^b)+2(a&b)=(a|b)+(a&b)
恒等式,试图弄清楚我是否可以以某种方式将pclmuludq
和正规乘法结合起来,得到或得到部分积,但我没有得到任何结果..为什么不干脆\u mm256\u set1\u epi32
?显式编写\u mm256\u广播q\u epi64(\u mm\u cvtsi32\u si128(a\u 32))
在某些方面编译得更好?虽然应该可以,并且使用-march=skylake-avx512
,编译器可能仍然能够将其优化为vpbroadcasted ymm0,eax
,而不是单独的movd/broadcast。或者如果该值在内存中启动(内联后),希望编译器能够优化掉mov
并直接从内存中广播(与movd
加载一样便宜).BTW,vpunpckhqdq
是水平OR的该步骤的最佳选择。节省1字节的代码大小,以省去vpsrldq
的imm8
移位计数。备选方案:movq
/pextrq
并使用整数或
,但这是更多的总uops(pextrq
为2 uops)@harold我确实没有尝试展开循环。现在已经解决了。展开帮助很大!@PeterCordes我真正想要的不是\u mm256\u set1\u epi32
,而是\u mm256\u set\u epi32(0,a\u 32,0,a\u 32,0,a\u 32,0,a\u 32)
。另一种可能是\u mm256\u set1\u epi64x((uint64\u 32)
,但是\u mm256\u broadcastq\u epi64(\u mm\u cvtsi32\u si128(a\u 32))
会带来最好的结果。虽然内联后可能没有任何优势,但我承认。@wim:哦,这是有道理的。我错过了64位与32位的问题,这在扩大乘法时应该是很明显的。哇,所有3个编译器都是非常死板的代码(整数加载/vmovq
/vbroadcastq
)使用内存操作数作为“更自然”的写入方式。我还尝试添加a\u 32++
来模拟已经为零的扩展情况,不幸的是\u mm256\u epi64(\u mm\u cvtsi32\u si128(a\u 32));
确实击败了gcc/clang-march=skylake-avx512
。与ICC不同,他们无法使用VPQ ymm0、rdi
。
#include <bitset>
#include <algorithm>
#include <iostream>
#include <cstdint>
#include <cassert>
#include <x86intrin.h>
std::uint64_t multishift(std::uint32_t a, std::uint32_t b)
{
std::uint64_t c = 0;
if (_popcnt32(b) < _popcnt32(a)) {
std::swap(a, b);
}
assert(a != 0);
do {
c |= std::uint64_t{b} << (_bit_scan_forward(a) + 1);
} while ((a &= (a - 1)) != 0); // clear least set bit
return c;
}
int main()
{
std::cout << std::bitset< 64 >(multishift(0b1001, 0b0101)) << std::endl; // ...0001011010
}
while (a) {
c |= b * (a & -a);
a &= a - 1;
}
Time in sec.
3.5 nonz 16 nonz
mult_shft_Orient() 0.81 1.51
mult_shft_Harold() 0.84 1.51
mult_shft_Harold_unroll2() 0.64 1.58
mult_shft_Harold_unroll4() 0.48 1.34
mult_shft_AVX2() 0.44 0.40
/* gcc -Wall -m64 -O3 -march=broadwell mult_shft.c */
#include <stdint.h>
#include <stdio.h>
#include <x86intrin.h>
uint64_t mult_shft_AVX2(uint32_t a_32, uint32_t b_32) {
__m256i a = _mm256_broadcastq_epi64(_mm_cvtsi32_si128(a_32));
__m256i b = _mm256_broadcastq_epi64(_mm_cvtsi32_si128(b_32));
// 0xFEDCBA9876543210 0xFEDCBA9876543210 0xFEDCBA9876543210 0xFEDCBA9876543210
__m256i b_0 = _mm256_and_si256(b,_mm256_set_epi64x(0x0000000000000008, 0x0000000000000004, 0x0000000000000002, 0x0000000000000001));
__m256i b_1 = _mm256_and_si256(b,_mm256_set_epi64x(0x0000000000000080, 0x0000000000000040, 0x0000000000000020, 0x0000000000000010));
__m256i b_2 = _mm256_and_si256(b,_mm256_set_epi64x(0x0000000000000800, 0x0000000000000400, 0x0000000000000200, 0x0000000000000100));
__m256i b_3 = _mm256_and_si256(b,_mm256_set_epi64x(0x0000000000008000, 0x0000000000004000, 0x0000000000002000, 0x0000000000001000));
__m256i b_4 = _mm256_and_si256(b,_mm256_set_epi64x(0x0000000000080000, 0x0000000000040000, 0x0000000000020000, 0x0000000000010000));
__m256i b_5 = _mm256_and_si256(b,_mm256_set_epi64x(0x0000000000800000, 0x0000000000400000, 0x0000000000200000, 0x0000000000100000));
__m256i b_6 = _mm256_and_si256(b,_mm256_set_epi64x(0x0000000008000000, 0x0000000004000000, 0x0000000002000000, 0x0000000001000000));
__m256i b_7 = _mm256_and_si256(b,_mm256_set_epi64x(0x0000000080000000, 0x0000000040000000, 0x0000000020000000, 0x0000000010000000));
__m256i m_0 = _mm256_mul_epu32(a, b_0);
__m256i m_1 = _mm256_mul_epu32(a, b_1);
__m256i m_2 = _mm256_mul_epu32(a, b_2);
__m256i m_3 = _mm256_mul_epu32(a, b_3);
__m256i m_4 = _mm256_mul_epu32(a, b_4);
__m256i m_5 = _mm256_mul_epu32(a, b_5);
__m256i m_6 = _mm256_mul_epu32(a, b_6);
__m256i m_7 = _mm256_mul_epu32(a, b_7);
m_0 = _mm256_or_si256(m_0, m_1);
m_2 = _mm256_or_si256(m_2, m_3);
m_4 = _mm256_or_si256(m_4, m_5);
m_6 = _mm256_or_si256(m_6, m_7);
m_0 = _mm256_or_si256(m_0, m_2);
m_4 = _mm256_or_si256(m_4, m_6);
m_0 = _mm256_or_si256(m_0, m_4);
__m128i m_0_lo = _mm256_castsi256_si128(m_0);
__m128i m_0_hi = _mm256_extracti128_si256(m_0, 1);
__m128i e = _mm_or_si128(m_0_lo, m_0_hi);
__m128i e_hi = _mm_unpackhi_epi64(e, e);
e = _mm_or_si128(e, e_hi);
uint64_t c = _mm_cvtsi128_si64x(e);
return c;
}
uint64_t mult_shft_Orient(uint32_t a, uint32_t b) {
uint64_t c = 0;
do {
c |= ((uint64_t)b) << (_bit_scan_forward(a) );
} while ((a = a & (a - 1)) != 0);
return c;
}
uint64_t mult_shft_Harold(uint32_t a_32, uint32_t b_32) {
uint64_t c = 0;
uint64_t a = a_32;
uint64_t b = b_32;
while (a) {
c |= b * (a & -a);
a &= a - 1;
}
return c;
}
uint64_t mult_shft_Harold_unroll2(uint32_t a_32, uint32_t b_32) {
uint64_t c = 0;
uint64_t a = a_32;
uint64_t b = b_32;
while (a) {
c |= b * (a & -a);
a &= a - 1;
c |= b * (a & -a);
a &= a - 1;
}
return c;
}
uint64_t mult_shft_Harold_unroll4(uint32_t a_32, uint32_t b_32) {
uint64_t c = 0;
uint64_t a = a_32;
uint64_t b = b_32;
while (a) {
c |= b * (a & -a);
a &= a - 1;
c |= b * (a & -a);
a &= a - 1;
c |= b * (a & -a);
a &= a - 1;
c |= b * (a & -a);
a &= a - 1;
}
return c;
}
int main(){
uint32_t a,b;
/*
uint64_t c0, c1, c2, c3, c4;
a = 0x10036011;
b = 0x31000107;
//a = 0x80000001;
//b = 0x80000001;
//a = 0xFFFFFFFF;
//b = 0xFFFFFFFF;
//a = 0x00000001;
//b = 0x00000001;
//a = 0b1001;
//b = 0b0101;
c0 = mult_shft_Orient(a, b);
c1 = mult_shft_Harold(a, b);
c2 = mult_shft_Harold_unroll2(a, b);
c3 = mult_shft_Harold_unroll4(a, b);
c4 = mult_shft_AVX2(a, b);
printf("%016lX \n%016lX \n%016lX \n%016lX \n%016lX \n\n", c0, c1, c2, c3, c4);
*/
uint32_t rnd = 0xA0036011;
uint32_t rnd_old;
uint64_t c;
uint64_t sum = 0;
double popcntsum =0.0;
int i;
for (i=0;i<100000000;i++){
rnd_old = rnd;
rnd = _mm_crc32_u32(rnd, i); /* simple random generator */
b = rnd; /* the actual value of b has no influence on the performance */
a = rnd; /* `a` has about 50% nonzero bits */
#if 1 == 1 /* reduce number of set bits from about 16 to 3.5 */
a = rnd & rnd_old; /* now `a` has about 25 % nonzero bits */
/*0bFEDCBA9876543210FEDCBA9876543210 */
a = (a & 0b00110000101001000011100010010000) | 1; /* about 3.5 nonzero bits on average */
#endif
/* printf("a = %08X \n", a); */
// popcntsum = popcntsum + _mm_popcnt_u32(a);
/* 3.5 nonz 50% (time in sec.) */
// c = mult_shft_Orient(a, b ); /* 0.81 1.51 */
// c = mult_shft_Harold(a, b ); /* 0.84 1.51 */
// c = mult_shft_Harold_unroll2(a, b ); /* 0.64 1.58 */
// c = mult_shft_Harold_unroll4(a, b ); /* 0.48 1.34 */
c = mult_shft_AVX2(a, b ); /* 0.44 0.40 */
sum = sum + c;
}
printf("sum = %016lX \n\n", sum);
printf("average density = %f bits per uint32_t\n\n", popcntsum/100000000);
return 0;
}