C++ OpenMP的扩展问题

C++ OpenMP的扩展问题,c++,parallel-processing,openmp,physics,scientific-computing,C++,Parallel Processing,Openmp,Physics,Scientific Computing,我已经为一种特殊类型的3D CFD模拟编写了一个代码,即Lattice Boltzmann方法(与Timm Krüger et alii的《Lattice Boltzmann方法》一书中提供的代码非常相似)。 使用OpenMP对程序进行多线程处理时,我遇到了一些我不能完全理解的问题:结果证明强烈依赖于整个域的大小 基本原理是为3D域的每个单元分配离散方向上19个分布函数(0-18)的特定值。它们被放置在堆中分配的两个线性数组中(一个总体被放置在一个单独的数组中):某个单元格的18个总体在内存中是

我已经为一种特殊类型的3D CFD模拟编写了一个代码,即Lattice Boltzmann方法(与Timm Krüger et alii的《Lattice Boltzmann方法》一书中提供的代码非常相似)。 使用OpenMP对程序进行多线程处理时,我遇到了一些我不能完全理解的问题:结果证明强烈依赖于整个域的大小

基本原理是为3D域的每个单元分配离散方向上19个分布函数(0-18)的特定值。它们被放置在堆中分配的两个线性数组中(一个总体被放置在一个单独的数组中):某个单元格的18个总体在内存中是连续的,连续的x值的值彼此相邻,依此类推(按行排序:主:总体->x->y->z)。 这些分布函数根据单元内的特定值重新分布,然后流到相邻单元。由于这个原因,我有两个种群f1和f2。该算法从f1中获取值,重新分配它们并将它们复制到f2中。然后交换指针,算法再次启动。 代码在单核上运行得非常好,但当我尝试在多核上并行时,我得到的性能取决于域的总体大小:对于非常小的域(10^3个单元),算法速度相当慢,每秒有1500万个单元,对于非常小的域(30^3个单元)该算法速度相当快,每秒超过6000万个单元,如果超过这个速度,性能将再次下降到每秒3000万个单元。在单个内核上执行代码只会产生每秒约1500万个单元的相同性能。当然,这些结果在不同的处理器之间有所不同,但在质量上仍然存在相同的问题

代码的核心归结为一个反复执行的并行循环,指向f1和f2的指针被交换:

#pragma omp parallel for default(none) shared(f0,f1,f2) schedule(static)
    for(unsigned int z = 0; z < NZ; ++z)
    {
        for(unsigned int y = 0; y < NY; ++y)
        {
            for(unsigned int x = 0; x < NX; ++x)
            {
                /// temporary populations
                double ft0  = f0[D3Q19_ScalarIndex(x,y,z)];
                double ft1  = f1[D3Q19_FieldIndex(x,y,z,1)];
                double ft2  = f1[D3Q19_FieldIndex(x,y,z,2)];
                double ft3  = f1[D3Q19_FieldIndex(x,y,z,3)];
                double ft4  = f1[D3Q19_FieldIndex(x,y,z,4)];
                double ft5  = f1[D3Q19_FieldIndex(x,y,z,5)];
                double ft6  = f1[D3Q19_FieldIndex(x,y,z,6)];
                double ft7  = f1[D3Q19_FieldIndex(x,y,z,7)];
                double ft8  = f1[D3Q19_FieldIndex(x,y,z,8)];
                double ft9  = f1[D3Q19_FieldIndex(x,y,z,9)];
                double ft10 = f1[D3Q19_FieldIndex(x,y,z,10)];
                double ft11 = f1[D3Q19_FieldIndex(x,y,z,11)];
                double ft12 = f1[D3Q19_FieldIndex(x,y,z,12)];
                double ft13 = f1[D3Q19_FieldIndex(x,y,z,13)];
                double ft14 = f1[D3Q19_FieldIndex(x,y,z,14)];
                double ft15 = f1[D3Q19_FieldIndex(x,y,z,15)];
                double ft16 = f1[D3Q19_FieldIndex(x,y,z,16)];
                double ft17 = f1[D3Q19_FieldIndex(x,y,z,17)];
                double ft18 = f1[D3Q19_FieldIndex(x,y,z,18)];

                /// microscopic to macroscopic
                double r    = ft0 + ft1 + ft2 + ft3 + ft4 + ft5 + ft6 + ft7 + ft8 + ft9 + ft10 + ft11 + ft12 + ft13 + ft14 + ft15 + ft16 + ft17 + ft18;
                double rinv = 1.0/r;
                double u    = rinv*(ft1 - ft2 + ft7 + ft8  + ft9   + ft10 - ft11 - ft12 - ft13 - ft14);
                double v    = rinv*(ft3 - ft4 + ft7 - ft8  + ft11  - ft12 + ft15 + ft16 - ft17 - ft18);
                double w    = rinv*(ft5 - ft6 + ft9 - ft10 + ft13 -  ft14 + ft15 - ft16 + ft17 - ft18);

                /// collision & streaming
                double trw0 = omega*r*w0;                   //temporary variables
                double trwc = omega*r*wc;
                double trwd = omega*r*wd;
                double uu   = 1.0 - 1.5*(u*u+v*v+w*w);

                double bu = 3.0*u;
                double bv = 3.0*v;
                double bw = 3.0*w;

                unsigned int xp = (x + 1) % NX;             //calculate x,y,z coordinates of neighbouring cells
                unsigned int yp = (y + 1) % NY;
                unsigned int zp = (z + 1) % NZ;
                unsigned int xm = (NX + x - 1) % NX;
                unsigned int ym = (NY + y - 1) % NY;
                unsigned int zm = (NZ + z - 1) % NZ;

                f0[D3Q19_ScalarIndex(x,y,z)]      = bomega*ft0  + trw0*(uu);                        //redistribute distribution functions and stream to neighbouring cells
                double cu = bu;
                f2[D3Q19_FieldIndex(xp,y, z,  1)] = bomega*ft1  + trwc*(uu + cu*(1.0 + 0.5*cu));
                cu = -bu;
                f2[D3Q19_FieldIndex(xm,y, z,  2)] = bomega*ft2  + trwc*(uu + cu*(1.0 + 0.5*cu));
                cu = bv;
                f2[D3Q19_FieldIndex(x, yp,z,  3)] = bomega*ft3  + trwc*(uu + cu*(1.0 + 0.5*cu));
                cu = -bv;
                f2[D3Q19_FieldIndex(x, ym,z,  4)] = bomega*ft4  + trwc*(uu + cu*(1.0 + 0.5*cu));
                cu = bw;
                f2[D3Q19_FieldIndex(x, y, zp, 5)] = bomega*ft5  + trwc*(uu + cu*(1.0 + 0.5*cu));
                cu = -bw;
                f2[D3Q19_FieldIndex(x, y, zm, 6)] = bomega*ft6  + trwc*(uu + cu*(1.0 + 0.5*cu));
                cu = bu+bv;
                f2[D3Q19_FieldIndex(xp,yp,z,  7)] = bomega*ft7  + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = bu-bv;
                f2[D3Q19_FieldIndex(xp,ym,z,  8)] = bomega*ft8  + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = bu+bw;
                f2[D3Q19_FieldIndex(xp,y, zp, 9)] = bomega*ft9  + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = bu-bw;
                f2[D3Q19_FieldIndex(xp,y, zm,10)] = bomega*ft10 + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = -bu+bv;
                f2[D3Q19_FieldIndex(xm,yp,z, 11)] = bomega*ft11 + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = -bu-bv;
                f2[D3Q19_FieldIndex(xm,ym,z, 12)] = bomega*ft12 + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = -bu+bw;
                f2[D3Q19_FieldIndex(xm,y, zp,13)] = bomega*ft13 + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = -bu-bw;
                f2[D3Q19_FieldIndex(xm,y, zm,14)] = bomega*ft14 + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = bv+bw;
                f2[D3Q19_FieldIndex(x, yp,zp,15)] = bomega*ft15 + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = bv-bw;
                f2[D3Q19_FieldIndex(x, yp,zm,16)] = bomega*ft16 + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = -bv+bw;
                f2[D3Q19_FieldIndex(x, ym,zp,17)] = bomega*ft17 + trwd*(uu + cu*(1.0 + 0.5*cu));
                cu = -bv-bw;
                f2[D3Q19_FieldIndex(x, ym,zm,18)] = bomega*ft18 + trwd*(uu + cu*(1.0 + 0.5*cu));
            }
        }
    }
#默认(无)共享(f0、f1、f2)计划(静态)的pragma omp并行
用于(无符号整数z=0;zconst unsigned int numBlocksZ = std::ceil(static_cast<double>(NZ) / BLOCK_SIZE);
const unsigned int numBlocksY = std::ceil(static_cast<double>(NY) / BLOCK_SIZE);
const unsigned int numBlocksX = std::ceil(static_cast<double>(NX) / BLOCK_SIZE);

#pragma omp parallel for default(none) shared(f0,f1,f2) schedule(static,1)
for(unsigned int block = 0; block < numBlocks; ++block)
{
  unsigned int startZ = BLOCK_SIZE* (block / (numBlocksX*numBlocksY));
  unsigned int endZ = std::min(startZ + BLOCK_SIZE, NZ);
  for(unsigned int z = startZ; z < endZ; ++z) {
    unsigned int startY = BLOCK_SIZE*(((block % (numBlocksX*numBlocksY)) / numBlocksX);
    unsigned int endY = std::min(startY + BLOCK_SIZE, NY);
    for(unsigned int y = startY; y < endY; ++y)
    {
      unsigned int startX = BLOCK_SIZE(block % numBlocksX);
      unsigned int endX = std::min(startX + BLOCK_SIZE, NX);
      for(unsigned int x = startX; x < endX; ++x)
      {
        ...
      }
    }  
  }