C++ 本征&;OpenMP:由于错误共享和线程开销,没有并行化

C++ 本征&;OpenMP:由于错误共享和线程开销,没有并行化,c++,parallel-processing,openmp,eigen,false-sharing,C++,Parallel Processing,Openmp,Eigen,False Sharing,系统规格: Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) { Eigen::VectorXd row(nCols); #pragma omp parallel for schedule(static,8) for (int k=0; k<nCols; ++k) { row(k) = get_Matrix_Entry(

系统规格:

Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {  
    Eigen::VectorXd row(nCols);

     #pragma omp parallel for schedule(static,8)    
     for (int k=0; k<nCols; ++k) {
          row(k)    =   get_Matrix_Entry(j,k+nColStart);

     return row;
}
Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) { 
    int vec_len = 8;
    Eigen::VectorXd row(nCols) ;
    int i,cols;
    cols=nCols;
    int rem = cols%vec_len;
    if(rem!=0)
        cols-=rem;

    #pragma omp parallel for    
    for(int ii=0;ii<cols; ii+=vec_len){
         for(i=ii;i<ii+vec_len;i++){
             row(i) = get_Matrix_Entry(j,i+nColStart);
         }
    }

    for(int jj=i; jj<nCols;jj++)
        row(jj) = get_Matrix_Entry(j,jj+nColStart);

    return row;
}
Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {
    int cache_line_size=8;
    Eigen::MatrixXd row_m(nCols,cache_line_size);

    #pragma omp parallel for schedule(static,1)
    for (int k=0; k<nCols; ++k) 
        row_m(k,0)  =   get_Matrix_Entry(j,k+nColStart);

    Eigen::VectorXd row(nCols); 
    row = row_m.block(0,0,nCols,1);

   return row;

}
  • 英特尔至强E7-v3处理器(4个插槽,16核/插槽,2个 线程/核心)
  • 特征族与C语言的应用++
  • 以下是代码段的串行实现:

    Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {
    
        Eigen::VectorXd row(nCols);
        for (int k=0; k<nCols; ++k) {
            row(k) = get_Matrix_Entry(j,k+nColStart);
        }
    
    } 
    
    double get_Matrix_Entry(int x , int y){
        return exp(-(x-y)*(x-y));
    } 
    
    Eigen::VectorXd get_行(const int j,const int nColStart,const int nCols){
    本征::矢量xd行(nCols);
    
    对于(int k=0;k尝试将函数重写为单个表达式,并让本征向量化自身,即:

    确保编译时使用
    -mavx
    -mfma
    (或-march=native)。在i7上给我一个x4加速(我知道你说的是尝试使用64/128线程,但这是单线程)

    通过将计算划分为多个段,您可以启用openmp以进一步提高速度:

    Eigen::VectorXd get_Row_omp(const int j, const int nColStart, const int nCols) {
    
        Eigen::VectorXd row(nCols);
    
    #pragma omp parallel
        {
            int num_threads = omp_get_num_threads();
            int tid = omp_get_thread_num();
            int n_per_thread = nCols / num_threads;
            if ((n_per_thread * num_threads < nCols)) n_per_thread++;
            int start = tid * n_per_thread;
            int len = n_per_thread;
            if (tid + 1 == num_threads) len = nCols - start;
    
            if(start < nCols)
                row.segment(start, len) = (-(Eigen::VectorXd::LinSpaced(len,
                                   nColStart + start, nColStart + start + len - 1)
                                .array() - double(j)).square()).exp().matrix();
    
        }
        return row;
    
    }
    

    您是如何测量时间的?您是否分析了您的代码以确定此特定部分是热停止?如果是,则需要多少连续墙时间?是什么让您相信虚假共享与当前问题有关?在E7上,亲和性设置将特别重要,以便将线程固定到不同的位置避免在cpu之间跳跃。我相信您不会坚持认为编译器库缺少此功能。@tim18我已将环境变量proc_affinity保持为true,但仍然不是helping@Gilles对于时间测量,我使用了omp_get_wtime()分析是通过Vtune完成的。N=6553600的串行代码运行20秒,并行代码有时运行18或22秒。这部分代码被多次调用(大约30次)因此,这个函数需要优化,但正如你所看到的,它不是。@user7440094你是如何编译的?在i7上使用一个线程,使用
    -march=native-O3
    和N=6553600,每次函数调用大约需要0.15秒。删除提到的标志,每次函数调用大约需要0.5秒。使用我下面的答案和OMP,每个函数大约需要0.015秒调用。我在Xeon处理器上尝试过,但对于N=10^6个元素,它仍然有1.3倍的加速比,我认为开销在这里起着重要作用。@user7440094同意。但这真的没有多大意义;我得到了预期的加速比,所以除非有其他东西限制了您的系统,否则我建议使用完全加速比,以便进行比较。哟你是对的加速比是4x,我忘了删除-pg标志,谢谢!另外,使用的编译标志是“-march=native”,无法使用“-mavx和-mfma”给出了有关Egeng库的错误。请确保你使用的是Egeng 3.3或更高版本。使用的Egeng库是:3.3.1
    Eigen::VectorXd get_Row_omp(const int j, const int nColStart, const int nCols) {
    
        Eigen::VectorXd row(nCols);
    
    #pragma omp parallel
        {
            int num_threads = omp_get_num_threads();
            int tid = omp_get_thread_num();
            int n_per_thread = nCols / num_threads;
            if ((n_per_thread * num_threads < nCols)) n_per_thread++;
            int start = tid * n_per_thread;
            int len = n_per_thread;
            if (tid + 1 == num_threads) len = nCols - start;
    
            if(start < nCols)
                row.segment(start, len) = (-(Eigen::VectorXd::LinSpaced(len,
                                   nColStart + start, nColStart + start + len - 1)
                                .array() - double(j)).square()).exp().matrix();
    
        }
        return row;
    
    }
    
    #include <Eigen/Core>
    #include <iostream>
    #include <omp.h>
    
    
    double get_Matrix_Entry(int x, int y) {
            return exp(-(x - y)*(x - y));
    }
    
    Eigen::VectorXd get_RowOld(const int j, const int nColStart, const int nCols) {
    
            Eigen::VectorXd row(nCols);
            for (int k = 0; k<nCols; ++k) {
                    row(k) = get_Matrix_Entry(j, k + nColStart);
            }
            return row;
    }
    
    
    Eigen::VectorXd get_Row(const int j, const int nColStart, const int nCols) {
    
            Eigen::VectorXd row(nCols);
    
            row = (-( Eigen::VectorXd::LinSpaced(nCols, nColStart, nColStart + nCols - 1).array() - double(j)).square()).exp().matrix();
    
            return row;
    }
    
    Eigen::VectorXd get_Row_omp(const int j, const int nColStart, const int nCols) {
    
            Eigen::VectorXd row(nCols);
    
    #pragma omp parallel
            {
                    int num_threads = omp_get_num_threads();
                    int tid = omp_get_thread_num();
                    int n_per_thread = nCols / num_threads;
                    if ((n_per_thread * num_threads < nCols)) n_per_thread++;
                    int start = tid * n_per_thread;
                    int len = n_per_thread;
                    if (tid + 1 == num_threads) len = nCols - start;
    
    
    #pragma omp critical
    {
            std::cout << tid << "/" << num_threads << "\t" << n_per_thread << "\t" << start <<
                                                             "\t" << len << "\t" << start+len << "\n\n";
    }
    
                    if(start < nCols)
                            row.segment(start, len) = (-(Eigen::VectorXd::LinSpaced(len, nColStart + start, nColStart + start + len - 1).array() - double(j)).square()).exp().matrix();
    
            }
            return row;
    }
    
    int main()
    {
            std::cout << EIGEN_WORLD_VERSION << '.' << EIGEN_MAJOR_VERSION << '.' << EIGEN_MINOR_VERSION << '\n';
            volatile int b = 3;
            int sz = 6553600;
            sz = 16;
            b = 6553500;
            b = 3;
            {
                    auto beg = omp_get_wtime();
                    auto r = get_RowOld(5, b, sz);
                    auto end = omp_get_wtime();
                    auto diff = end - beg;
                    std::cout << r.rows() << "\t" << r.cols() << "\n";
    //              std::cout << r.transpose() << "\n";
                    std::cout << "Old: " << r.mean() << "\n" << diff << "\n\n";
    
                    beg = omp_get_wtime();
                    auto r2 = get_Row(5, b, sz);
                    end = omp_get_wtime();
                    diff = end - beg;
                    std::cout << r2.rows() << "\t" << r2.cols() << "\n";
    //              std::cout << r2.transpose() << "\n";
                    std::cout << "Eigen:         " << (r2-r).cwiseAbs().sum() << "\t" << (r-r2).cwiseAbs().mean() << "\n" << diff << "\n\n";
    
                    auto omp_beg = omp_get_wtime();
                    auto r3 = get_Row_omp(5, b, sz);
                    auto omp_end = omp_get_wtime();
                    auto omp_diff = omp_end - omp_beg;
                    std::cout << r3.rows() << "\t" << r3.cols() << "\n";
    //              std::cout << r3.transpose() << "\n";
                    std::cout << "OMP and Eigen: " << (r3-r).cwiseAbs().sum() << "\t" << (r - r3).cwiseAbs().mean() << "\n" << omp_diff << "\n";
            }
    
            return 0;
    
    }