Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/132.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 浮点乘法:AVX对SSE失去速度?_C++_Performance_Sse_Avx - Fatal编程技术网

C++ 浮点乘法:AVX对SSE失去速度?

C++ 浮点乘法:AVX对SSE失去速度?,c++,performance,sse,avx,C++,Performance,Sse,Avx,我有做同样事情的代码,但是AVX版本比SSE版本慢很多。有人能解释一下吗 我已经做的是,我试图使用VerySleepy分析代码,但这并不能给我任何有用的结果,它只是证实了它的速度较慢 我已经看过SSE/AVX指南中的命令,在我的CPU(Haswell)上,它们需要相同的延迟/吞吐量,只是水平添加需要AVX的额外命令 **延迟和吞吐量** _mm_mul_ps -> L 5, T 0.5 _mm256_mul_ps -> L 5, T 0.5 _m

我有做同样事情的代码,但是AVX版本比SSE版本慢很多。有人能解释一下吗

我已经做的是,我试图使用VerySleepy分析代码,但这并不能给我任何有用的结果,它只是证实了它的速度较慢

我已经看过SSE/AVX指南中的命令,在我的CPU(Haswell)上,它们需要相同的延迟/吞吐量,只是水平添加需要AVX的额外命令

**延迟和吞吐量**

_mm_mul_ps            -> L 5, T 0.5
_mm256_mul_ps         -> L 5, T 0.5
_mm_hadd_ps           -> L 5, T 2
_mm256_hadd_ps        -> L 5, T ?
_mm256_extractf128_ps -> L 1, T 1
代码作用的摘要: Final1=总和(m_阵列1*m_阵列1*m_阵列3*m_阵列3)

Final2=总和(m_阵列2*m_阵列2*m_阵列3*m_阵列3)

Final3=总和(m_阵列1*m_阵列2*m_阵列3*m_阵列3)

init

float Final1 = 0.0f;
float Final2 = 0.0f;
float Final3 = 0.0f;

float* m_Array1 = (float*)_mm_malloc( 32 * sizeof( float ), 32 );
float* m_Array2 = (float*)_mm_malloc( 32 * sizeof( float ), 32 );
float* m_Array3 = (float*)_mm_malloc( 32 * sizeof( float ), 32 );
SSE:

for ( int k = 0; k < 32; k += 4 )
{

    __m128 g1 = _mm_load_ps( m_Array1 + k );
    __m128 g2 = _mm_load_ps( m_Array2 + k );
    __m128 g3 = _mm_load_ps( m_Array3 + k );

    __m128 g1g3 = _mm_mul_ps( g1, g3 );
    __m128 g2g3 = _mm_mul_ps( g2, g3 );

    __m128 a1 = _mm_mul_ps( g1g3, g1g3 );
    __m128 a2 = _mm_mul_ps( g2g3, g2g3 );
    __m128 a3 = _mm_mul_ps( g1g3, g2g3 );

    // horizontal add
    {
        a1 = _mm_hadd_ps( a1, a1 );
        a1 = _mm_hadd_ps( a1, a1 );
        Final1 += _mm_cvtss_f32( a1 );

        a2 = _mm_hadd_ps( a2, a2 );
        a2 = _mm_hadd_ps( a2, a2 );
        Final2 += _mm_cvtss_f32( a2 );

        a3 = _mm_hadd_ps( a3, a3 );
        a3 = _mm_hadd_ps( a3, a3 );
        Final3 += _mm_cvtss_f32( a3 );

    }

}
for ( int k = 0; k < 32; k += 8 )
{
    __m256 g1 = _mm256_load_ps( m_Array1 + k );
    __m256 g2 = _mm256_load_ps( m_Array2 + k );
    __m256 g3 = _mm256_load_ps( m_Array3 + k );

    __m256 g1g3 = _mm256_mul_ps( g1, g3 );
    __m256 g2g3 = _mm256_mul_ps( g2, g3 );

    __m256 a1 = _mm256_mul_ps( g1g3, g1g3 );
    __m256 a2 = _mm256_mul_ps( g2g3, g2g3 );
    __m256 a3 = _mm256_mul_ps( g1g3, g2g3 );

    // horizontal add1
    {
        __m256 t1 = _mm256_hadd_ps( a1, a1 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        Final1 += _mm_cvtss_f32( t4 );
    }
    // horizontal add2
    {
        __m256 t1 = _mm256_hadd_ps( a2, a2 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        Final2 += _mm_cvtss_f32( t4 );
    }
    // horizontal add3
    {
        __m256 t1 = _mm256_hadd_ps( a3, a3 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        Final3 += _mm_cvtss_f32( t4 );
    }

}
for(int k=0;k<32;k+=4)
{
__m128 g1=毫米负载ps(米阵列1+k);
__m128 g2=_mm_负载_ps(m_阵列2+k);
__m128 g3=_mm_载荷_ps(m_阵列3+k);
__m128 g1g3=_mm_mul_ps(g1,g3);
__m128 g2g3=_mm_mul_ps(g2,g3);
__m128 a1=_mm_mul_ps(g1g3,g1g3);
__m128 a2=_mm_mul_ps(g2g3,g2g3);
__m128 a3=毫米多点(g1g3,g2g3);
//水平加法
{
a1=_mm_hadd_ps(a1,a1);
a1=_mm_hadd_ps(a1,a1);
最终结果1+=_mm_cvtss_f32(a1);
a2=_mm_hadd_ps(a2,a2);
a2=_mm_hadd_ps(a2,a2);
Final2+=\uMM\uCVTSS\uF32(a2);
a3=_mm_hadd_ps(a3,a3);
a3=_mm_hadd_ps(a3,a3);
最终结果3+=\u mm\u cvtss\u f32(a3);
}
}
AVX:

for ( int k = 0; k < 32; k += 4 )
{

    __m128 g1 = _mm_load_ps( m_Array1 + k );
    __m128 g2 = _mm_load_ps( m_Array2 + k );
    __m128 g3 = _mm_load_ps( m_Array3 + k );

    __m128 g1g3 = _mm_mul_ps( g1, g3 );
    __m128 g2g3 = _mm_mul_ps( g2, g3 );

    __m128 a1 = _mm_mul_ps( g1g3, g1g3 );
    __m128 a2 = _mm_mul_ps( g2g3, g2g3 );
    __m128 a3 = _mm_mul_ps( g1g3, g2g3 );

    // horizontal add
    {
        a1 = _mm_hadd_ps( a1, a1 );
        a1 = _mm_hadd_ps( a1, a1 );
        Final1 += _mm_cvtss_f32( a1 );

        a2 = _mm_hadd_ps( a2, a2 );
        a2 = _mm_hadd_ps( a2, a2 );
        Final2 += _mm_cvtss_f32( a2 );

        a3 = _mm_hadd_ps( a3, a3 );
        a3 = _mm_hadd_ps( a3, a3 );
        Final3 += _mm_cvtss_f32( a3 );

    }

}
for ( int k = 0; k < 32; k += 8 )
{
    __m256 g1 = _mm256_load_ps( m_Array1 + k );
    __m256 g2 = _mm256_load_ps( m_Array2 + k );
    __m256 g3 = _mm256_load_ps( m_Array3 + k );

    __m256 g1g3 = _mm256_mul_ps( g1, g3 );
    __m256 g2g3 = _mm256_mul_ps( g2, g3 );

    __m256 a1 = _mm256_mul_ps( g1g3, g1g3 );
    __m256 a2 = _mm256_mul_ps( g2g3, g2g3 );
    __m256 a3 = _mm256_mul_ps( g1g3, g2g3 );

    // horizontal add1
    {
        __m256 t1 = _mm256_hadd_ps( a1, a1 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        Final1 += _mm_cvtss_f32( t4 );
    }
    // horizontal add2
    {
        __m256 t1 = _mm256_hadd_ps( a2, a2 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        Final2 += _mm_cvtss_f32( t4 );
    }
    // horizontal add3
    {
        __m256 t1 = _mm256_hadd_ps( a3, a3 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        Final3 += _mm_cvtss_f32( t4 );
    }

}
for(int k=0;k<32;k+=8)
{
__m256 g1=_mm256_load_ps(m_阵列1+k);
__m256 g2=_mm256_load_ps(m_Array2+k);
__m256 g3=_mm256_负载_ps(m_阵列3+k);
__m256 g1g3=_mm256_mul_ps(g1,g3);
__m256 g2g3=_mm256_mul_ps(g2,g3);
__m256 a1=_mm256_mul_ps(g1g3、g1g3);
__m256 a2=_mm256_mul_ps(g2g3,g2g3);
__m256 a3=_mm256_mul_ps(g1g3,g2g3);
//水平添加1
{
__m256 t1=_mm256_hadd_ps(a1,a1);
__m256t2=mm256_hadd_ps(t1,t1);
__m128 t3=_mm256_extractf128_ps(t2,1);
__m128 t4=最小值添加值(\u mm256\u castps256\u ps128(t2),t3);
最终结果1+=(t4);
}
//水平添加2
{
__m256 t1=_mm256_hadd_ps(a2,a2);
__m256t2=mm256_hadd_ps(t1,t1);
__m128 t3=_mm256_extractf128_ps(t2,1);
__m128 t4=最小值添加值(\u mm256\u castps256\u ps128(t2),t3);
Final2+=_mm_cvtss_f32(t4);
}
//水平添加3
{
__m256 t1=_mm256_hadd_ps(a3,a3);
__m256t2=mm256_hadd_ps(t1,t1);
__m128 t3=_mm256_extractf128_ps(t2,1);
__m128 t4=最小值添加值(\u mm256\u castps256\u ps128(t2),t3);
最终结果3+=_mm_cvtss_f32(t4);
}
}

我把你的代码放在一个测试工具中,编译它
clang-O3
并计时。我还实现了两个例程的更快版本,水平添加从循环中移出:

#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>   // gettimeofday
#include <immintrin.h>

static void sse(const float *m_Array1, const float *m_Array2, const float *m_Array3, size_t n, float *Final1, float *Final2, float *Final3)
{
    *Final1 = *Final2 = *Final3 = 0.0f;
    for (int k = 0; k < n; k += 4)
    {
        __m128 g1 = _mm_load_ps( m_Array1 + k );
        __m128 g2 = _mm_load_ps( m_Array2 + k );
        __m128 g3 = _mm_load_ps( m_Array3 + k );

        __m128 g1g3 = _mm_mul_ps( g1, g3 );
        __m128 g2g3 = _mm_mul_ps( g2, g3 );

        __m128 a1 = _mm_mul_ps( g1g3, g1g3 );
        __m128 a2 = _mm_mul_ps( g2g3, g2g3 );
        __m128 a3 = _mm_mul_ps( g1g3, g2g3 );

        // horizontal add
        {
            a1 = _mm_hadd_ps( a1, a1 );
            a1 = _mm_hadd_ps( a1, a1 );
            *Final1 += _mm_cvtss_f32( a1 );

            a2 = _mm_hadd_ps( a2, a2 );
            a2 = _mm_hadd_ps( a2, a2 );
            *Final2 += _mm_cvtss_f32( a2 );

            a3 = _mm_hadd_ps( a3, a3 );
            a3 = _mm_hadd_ps( a3, a3 );
            *Final3 += _mm_cvtss_f32( a3 );
        }
    }
}

static void sse_fast(const float *m_Array1, const float *m_Array2, const float *m_Array3, size_t n, float *Final1, float *Final2, float *Final3)
{
    *Final1 = *Final2 = *Final3 = 0.0f;
    __m128 a1 = _mm_setzero_ps();
    __m128 a2 = _mm_setzero_ps();
    __m128 a3 = _mm_setzero_ps();
    for (int k = 0; k < n; k += 4)
    {
        __m128 g1 = _mm_load_ps( m_Array1 + k );
        __m128 g2 = _mm_load_ps( m_Array2 + k );
        __m128 g3 = _mm_load_ps( m_Array3 + k );

        __m128 g1g3 = _mm_mul_ps( g1, g3 );
        __m128 g2g3 = _mm_mul_ps( g2, g3 );

        a1 = _mm_add_ps(a1, _mm_mul_ps( g1g3, g1g3 ));
        a2 = _mm_add_ps(a2, _mm_mul_ps( g2g3, g2g3 ));
        a3 = _mm_add_ps(a3, _mm_mul_ps( g1g3, g2g3 ));
    }
    // horizontal add
    a1 = _mm_hadd_ps( a1, a1 );
    a1 = _mm_hadd_ps( a1, a1 );
    *Final1 += _mm_cvtss_f32( a1 );

    a2 = _mm_hadd_ps( a2, a2 );
    a2 = _mm_hadd_ps( a2, a2 );
    *Final2 += _mm_cvtss_f32( a2 );

    a3 = _mm_hadd_ps( a3, a3 );
    a3 = _mm_hadd_ps( a3, a3 );
    *Final3 += _mm_cvtss_f32( a3 );
}

static void avx(const float *m_Array1, const float *m_Array2, const float *m_Array3, size_t n, float *Final1, float *Final2, float *Final3)
{
    *Final1 = *Final2 = *Final3 = 0.0f;
    for (int k = 0; k < n; k += 8 )
    {
        __m256 g1 = _mm256_load_ps( m_Array1 + k );
        __m256 g2 = _mm256_load_ps( m_Array2 + k );
        __m256 g3 = _mm256_load_ps( m_Array3 + k );

        __m256 g1g3 = _mm256_mul_ps( g1, g3 );
        __m256 g2g3 = _mm256_mul_ps( g2, g3 );

        __m256 a1 = _mm256_mul_ps( g1g3, g1g3 );
        __m256 a2 = _mm256_mul_ps( g2g3, g2g3 );
        __m256 a3 = _mm256_mul_ps( g1g3, g2g3 );

        // horizontal add1
        {
            __m256 t1 = _mm256_hadd_ps( a1, a1 );
            __m256 t2 = _mm256_hadd_ps( t1, t1 );
            __m128 t3 = _mm256_extractf128_ps( t2, 1 );
            __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
            *Final1 += _mm_cvtss_f32( t4 );
        }
        // horizontal add2
        {
            __m256 t1 = _mm256_hadd_ps( a2, a2 );
            __m256 t2 = _mm256_hadd_ps( t1, t1 );
            __m128 t3 = _mm256_extractf128_ps( t2, 1 );
            __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
            *Final2 += _mm_cvtss_f32( t4 );
        }
        // horizontal add3
        {
            __m256 t1 = _mm256_hadd_ps( a3, a3 );
            __m256 t2 = _mm256_hadd_ps( t1, t1 );
            __m128 t3 = _mm256_extractf128_ps( t2, 1 );
            __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
            *Final3 += _mm_cvtss_f32( t4 );
        }
    }
}

static void avx_fast(const float *m_Array1, const float *m_Array2, const float *m_Array3, size_t n, float *Final1, float *Final2, float *Final3)
{
    *Final1 = *Final2 = *Final3 = 0.0f;
    __m256 a1 = _mm256_setzero_ps();
    __m256 a2 = _mm256_setzero_ps();
    __m256 a3 = _mm256_setzero_ps();
    for (int k = 0; k < n; k += 8 )
    {
        __m256 g1 = _mm256_load_ps( m_Array1 + k );
        __m256 g2 = _mm256_load_ps( m_Array2 + k );
        __m256 g3 = _mm256_load_ps( m_Array3 + k );

        __m256 g1g3 = _mm256_mul_ps( g1, g3 );
        __m256 g2g3 = _mm256_mul_ps( g2, g3 );

        a1 = _mm256_add_ps(a1, _mm256_mul_ps( g1g3, g1g3 ));
        a2 = _mm256_add_ps(a2, _mm256_mul_ps( g2g3, g2g3 ));
        a3 = _mm256_add_ps(a3, _mm256_mul_ps( g1g3, g2g3 ));
    }

    // horizontal add1

    {
        __m256 t1 = _mm256_hadd_ps( a1, a1 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        *Final1 += _mm_cvtss_f32( t4 );
    }

    // horizontal add2

    {
        __m256 t1 = _mm256_hadd_ps( a2, a2 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        *Final2 += _mm_cvtss_f32( t4 );
    }

    // horizontal add3

    {
        __m256 t1 = _mm256_hadd_ps( a3, a3 );
        __m256 t2 = _mm256_hadd_ps( t1, t1 );
        __m128 t3 = _mm256_extractf128_ps( t2, 1 );
        __m128 t4 = _mm_add_ss( _mm256_castps256_ps128( t2 ), t3 );
        *Final3 += _mm_cvtss_f32( t4 );
    }

}

int main(int argc, char *argv[])
{
    size_t n = 4096;

    if (argc > 1) n = atoi(argv[1]);

    float *in_1 = valloc(n * sizeof(in_1[0]));
    float *in_2 = valloc(n * sizeof(in_2[0]));
    float *in_3 = valloc(n * sizeof(in_3[0]));
    float out_1, out_2, out_3;

    struct timeval t0, t1;
    double t_ms;

    for (int i = 0; i < n; ++i)
    {
        in_1[i] = (float)rand() / (float)(RAND_MAX / 2);
        in_2[i] = (float)rand() / (float)(RAND_MAX / 2);
        in_3[i] = (float)rand() / (float)(RAND_MAX / 2);
    }

    sse(in_1, in_2, in_3, n, &out_1, &out_2, &out_3);
    printf("sse     : %g, %g, %g\n", out_1, out_2, out_3);
    sse_fast(in_1, in_2, in_3, n, &out_1, &out_2, &out_3);
    printf("sse_fast: %g, %g, %g\n", out_1, out_2, out_3);
    avx(in_1, in_2, in_3, n, &out_1, &out_2, &out_3);
    printf("avx     : %g, %g, %g\n", out_1, out_2, out_3);
    avx_fast(in_1, in_2, in_3, n, &out_1, &out_2, &out_3);
    printf("avx_fast: %g, %g, %g\n", out_1, out_2, out_3);

    gettimeofday(&t0, NULL);
    for (int k = 0; k < 100; ++k) sse(in_1, in_2, in_3, n, &out_1, &out_2, &out_3);
    gettimeofday(&t1, NULL);
    t_ms = ((double)(t1.tv_sec - t0.tv_sec) + (double)(t1.tv_usec - t0.tv_usec) * 1.0e-6) * 1.0e3;
    printf("sse     : %g, %g, %g, %g ms\n", out_1, out_2, out_3, t_ms);

    gettimeofday(&t0, NULL);
    for (int k = 0; k < 100; ++k) sse_fast(in_1, in_2, in_3, n, &out_1, &out_2, &out_3);
    gettimeofday(&t1, NULL);
    t_ms = ((double)(t1.tv_sec - t0.tv_sec) + (double)(t1.tv_usec - t0.tv_usec) * 1.0e-6) * 1.0e3;
    printf("sse_fast: %g, %g, %g, %g ms\n", out_1, out_2, out_3, t_ms);

    gettimeofday(&t0, NULL);
    for (int k = 0; k < 100; ++k) avx(in_1, in_2, in_3, n, &out_1, &out_2, &out_3);
    gettimeofday(&t1, NULL);
    t_ms = ((double)(t1.tv_sec - t0.tv_sec) + (double)(t1.tv_usec - t0.tv_usec) * 1.0e-6) * 1.0e3;
    printf("avx     : %g, %g, %g, %g ms\n", out_1, out_2, out_3, t_ms);

    gettimeofday(&t0, NULL);
    for (int k = 0; k < 100; ++k) avx_fast(in_1, in_2, in_3, n, &out_1, &out_2, &out_3);
    gettimeofday(&t1, NULL);
    t_ms = ((double)(t1.tv_sec - t0.tv_sec) + (double)(t1.tv_usec - t0.tv_usec) * 1.0e-6) * 1.0e3;
    printf("avx_fast: %g, %g, %g, %g ms\n", out_1, out_2, out_3, t_ms);

    return 0;
}
因此,无论是原始实现还是优化实现,AVX版本确实看起来比SSE版本快。然而,优化后的实现比原始版本快得多,甚至有更大的幅度


我只能猜测,要么您的编译器没有为AVX生成非常好的代码(或者您忘记启用编译器优化?),要么您的基准测试方法有问题。

您需要一个更大的循环来可靠地基准测试这两个代码片段。另外,在这两种情况下,水平加法和最终标量值提取都应该在循环之外。这只是一个简单的例子。将32替换为“某些整数乘以8”,问题仍然是same@S.H:当然可以,但在这两种情况下,您的代码都非常低效-如果您将水平加法和标量提取移出循环,而只使用普通(垂直)在循环中添加,那么在这两种情况下,您将获得更高效的代码,并且由于不同的指令组合,相对性能也可能会发生变化?你可以很容易地做到几乎完全垂直,只需在测试结束后水平求和一次loop@S.H:由于您只是对所有乘积求和,因此可以在循环中进行垂直加法,给出四个部分和,然后在循环后进行单个水平加法,以合并这四个部分和。这将更加有效,并避免前面提到的高延迟指令。这真的很奇怪,我将检查并发布我的结果-如果有任何问题,我理解您的代码“将hadd移出循环”的意思。我以为你只想先乘法(没有最后的加法),然后神奇地求和。谢谢是的,根据经验,在SIMD代码中通常应该避免水平操作,当无法避免时,最好在任何性能关键循环之外。我使用VS2012、“-O3”和“/arch:AVX”进行编译。这可能是个问题吗?我本以为/O2或/Ox都可以使用Visual Studio,但我在Windows上并没有做太多工作,而且我不确定Visual Studio在AVX代码生成方面有多好。GCC、CLAN和英特尔的ICC都生成了相当好的AVX代码,所以您可能需要考虑尝试不同的编译器。您可能还想回顾您的基准测试方法,因为粗心的人有很多陷阱。如果您可以使用VisualStudio编译上面的测试工具,那么查看您获得的计时数字将非常有趣。