Scalapack返回错误的答案

Scalapack返回错误的答案,c,mpi,hpc,scalapack,C,Mpi,Hpc,Scalapack,我正试图编写一些c代码,使用scalapack中的pzheevd例程查找大型矩阵的所有特征值。我有下面这个简单的例子,它硬编码了一个简单的4x4矩阵。使用单个进程,2个进程或4个进程,我得到正确的特征值(-2.0396,-2,2,2.0396)。然而,使用非公度数(如3)返回的特征值是不正确的,即使看起来所有矩阵元素都被正确分配 要构建代码,请使用: mpicc -g test.c -llapack -o test -lblacs-openmpi -lblacsCinit-openmpi -L

我正试图编写一些c代码,使用scalapack中的pzheevd例程查找大型矩阵的所有特征值。我有下面这个简单的例子,它硬编码了一个简单的4x4矩阵。使用单个进程,2个进程或4个进程,我得到正确的特征值(-2.0396,-2,2,2.0396)。然而,使用非公度数(如3)返回的特征值是不正确的,即使看起来所有矩阵元素都被正确分配

要构建代码,请使用:

mpicc -g test.c -llapack -o test -lblacs-openmpi -lblacsCinit-openmpi  -L/usr/local/lib -lscalapack -lgfortran -lm -llapack -lblas
有效的示例:

$ mpirun -n 1 ./test
Info: 0
Eigenvalues: -2.039608 -2.000000 2.000000 2.039608
还有一个没有:

$ mpirun -n 3 ./test
Info: 0
Eigenvalues: -2.223729 -1.805190 2.003994 2.024926 
以及守则:

#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "mpi.h"

typedef struct complex16{double dr,di;} complex16;

extern void Cblacs_get(int context, int what, int *val);
extern void Cblacs_gridinit(int* context, char* order,
                            int nproc_rows, int nproc_cols);
extern void Cblacs_pcoord(int context, int p,
                          int* my_proc_row, int* my_proc_col);
extern void Cblacs_exit(int doneflag);
extern void descinit_(int* descrip, int* m, int* n,
                      int* row_block_size, int* col_block_size,
                      int* first_proc_row, int* first_proc_col,
                      int* blacs_grid, int* leading_dim,
                      int* error_info);
extern int numroc_(int* order, int* block_size, 
                   int* my_process_row_or_col, int* first_process_row_or_col,
                   int* nproc_rows_or_cols);
extern void pzheevd_(char *jobz, char *uplo, int *n, complex16 *a, int *ia, int *ja, int *desca, double *w, complex16 *z, int *iz, int *jz, int *descz, complex16 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info);

main(int argc, char** argv) {
  int   my_rank, size, m, n;
  int   row_block_size=1, col_block_size=1;
  int nproc_rows, nproc_cols;
  int my_process_row, my_process_col;
  int   blacs_grid;
  int first_proc_row = 0, first_proc_col = 0;
  int descrip[9], info, nlocal_rows, nlocal_cols;
  int i,j;
  int leading_dim;
  m=4; n=4;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);

  nproc_rows = sqrt(size);
  nproc_cols = size/nproc_rows;

  Cblacs_get(0, 0, &blacs_grid);
  Cblacs_gridinit(&blacs_grid, "R", nproc_rows, nproc_cols);
  Cblacs_pcoord(blacs_grid, my_rank, &my_process_row,&my_process_col);

  nlocal_rows = numroc_(&m, &row_block_size, &my_process_row, &first_proc_row, &nproc_rows);
  nlocal_cols = numroc_(&n, &col_block_size, &my_process_col, &first_proc_col, &nproc_cols);
  leading_dim = numroc_(&m, &col_block_size, &my_process_row, &first_proc_col, &nproc_rows);
  descinit_(descrip, &m, &n, &row_block_size, &col_block_size, &first_proc_row, &first_proc_col, &blacs_grid, &leading_dim, &info);

  complex16 *a, *z;
  double *w;
  a = (complex16*)malloc(nlocal_rows * nlocal_cols * sizeof(complex16));
  z = (complex16*)malloc(nlocal_rows * nlocal_cols * sizeof(complex16));
  w = (double*)malloc(n * sizeof(double));

  double *mat_els;
  mat_els = (double *)malloc(n*m * sizeof(double));
  mat_els[0] = -2.0;mat_els[1]=-0.2; mat_els[2] = -0.2; mat_els[3] = 0.0;
  mat_els[4] = -0.2;mat_els[5]=2.0; mat_els[6] = 0.0; mat_els[7] = -0.2;
  mat_els[8] = -0.2;mat_els[9]=0.0; mat_els[10] = 2.0; mat_els[11] = -0.2;
  mat_els[12] = 0.0;mat_els[13]=-0.2; mat_els[14] = -0.2; mat_els[15] = -2.0;

  int full_row, full_col;
  for(i = 0; i < nlocal_rows; i++)
    {
      for(j = 0; j < nlocal_cols; j++)
        {
          full_row = i * nproc_rows + my_process_row;
          full_col = j * nproc_cols + my_process_col;
          a[(i*nlocal_cols + j)].dr = mat_els[full_row * m + full_col];
          a[(i*nlocal_cols + j)].di = 0.0;
        }
    }
  char jobz = 'V'; // N not implemented yet.
  char uplo = 'U';
  int ai = 1, aj = 1, zi = 1, zj = 1;

  double *rwork;
  complex16 *work;
  int *iwork;
  int lwork, lrwork, liwork;

  rwork = (double*)malloc(2 * sizeof(double));
  work = (complex16*)malloc(2 * sizeof(complex16));
  iwork = (int*)malloc(2 * sizeof(int));

  lwork = -1; lrwork = -1; liwork = -1;
  pzheevd_(&jobz, &uplo, &n, a, &ai, &aj, descrip, w, z, &zi, &zj, descrip, work, &lwork, rwork, &lrwork, iwork, &liwork, &info);
  lwork = work[0].dr; lrwork = rwork[0]; liwork = iwork[0];
  free(work); free(rwork); free(iwork);

  rwork = (double*)malloc(lrwork * sizeof(double));
  work = (complex16*)malloc(lwork * sizeof(complex16));
  iwork = (int*)malloc(liwork * sizeof(int));
  pzheevd_(&jobz, &uplo, &n, a, &ai, &aj, descrip, w, z, &zi, &zj, descrip, work, &lwork, rwork, &lrwork, iwork, &liwork, &info);

  if ( my_rank == 0)
    {
      printf("Info: %d\n", info);
      printf("Eigenvalues: ");
      for(i = 0; i < n;i++)
          {
          printf("%lf ", w[i]);
          }
      printf("\n");
    }
  free(w);free(z);free(a);
  free(work);free(iwork);free(rwork);
  Cblacs_exit(1);
  MPI_Finalize();
}
#包括
#包括
#包括
#包括“mpi.h”
typedef结构complex16{double dr,di;}complex16;
外部无效Cblacs_get(int context,int what,int*val);
extern void Cblacs_gridinit(int*上下文,char*顺序,
int nproc_行,int nproc_列);
外部无效Cblacs_pcoord(int上下文,int p,
int*我的进程行,int*我的进程列);
外部无效Cblacs_出口(int doneflag);
外部无效描述(int*descripp,int*m,int*n,
int*行块大小,int*列块大小,
int*第一个进程行,int*第一个进程列,
int*blacs_网格,int*leading_dim,
int*错误信息);
extern int numroc(int*顺序,int*块大小,
int*我的进程行或列,int*第一进程行或列,
int*nproc\u行或列);
外部无效pzheevd_uz(char*jobz、char*uplo、int*n、complex16*a、int*ia、int*ja、int*desca、double*w、complex16*z、int*iz、int*jz、int*descz、complex16*work、int*lwork、double*rwork、int*lwork、int*iwork、int*liwork、int*info);
主(内部argc,字符**argv){
int my_秩,大小,m,n;
int row_block_size=1,col_block_size=1;
int nproc_行,nproc_列;
int my_process_row,my_process_col;
int blacs_网格;
int first_proc_row=0,first_proc_col=0;
int descripp[9],info,nlocal\u行,nlocal\u列;
int i,j;
int-leading_-dim;
m=4;n=4;
MPI_Init(&argc,&argv);
MPI_通信大小(MPI_通信世界和大小);
MPI通信等级(MPI通信世界和我的通信等级);
nproc_行=sqrt(大小);
nproc\u cols=大小/nproc\u行;
Cblacs_-get(0、0和blacs_网格);
Cblacs_gridinit(&blacs_grid,“R”,nproc_行,nproc_列);
Cblacs_pcoord(blacs_网格、我的排名、我的流程行和我的流程列);
nlocal_行=numroc_行(&m,&row_块大小,&my_过程_行,&first_过程_行,&nproc_行);
nlocal\u cols=numroc(n&n,&col\u block\u size,&my\u process\u col,&first\u proc\u col,&nproc\u cols);
前导尺寸=numroc(m&m,列块大小,我的处理行,第一个处理列,和nproc行);
描述(描述、m、n、行、块大小、列、块大小、第一行、第一行、第一行、blacs、网格、前导尺寸和信息);
复合物16*a,*z;
双*w;
a=(complex16*)malloc(nlocal_rows*nlocal_cols*sizeof(complex16));
z=(complex16*)malloc(nlocal_rows*nlocal_cols*sizeof(complex16));
w=(双*)malloc(n*sizeof(双));
双*材料;
材料=(双*)malloc(n*m*sizeof(双));
材料[0]=-2.0;材料[1]=-0.2;材料[2]=-0.2;材料[3]=0.0;
材料[4]=-0.2;材料[5]=2.0;材料[6]=0.0;材料[7]=-0.2;
材料[8]=-0.2;材料[9]=0.0;材料[10]=2.0;材料[11]=-0.2;
材料[12]=0.0;材料[13]=-0.2;材料[14]=-0.2;材料[15]=-2.0;
整整行,整列;
对于(i=0;i
我找到了我应该早点猜到的解决方案。应使用Fortran列顺序格式而不是C样式的行顺序格式提供矩阵元素。将for循环中的元素分配更改为以下内容可以解决问题,现在所有数量的进程(可以形成有效网格)都可以找到相同的特征值

a[(j*nlocal_rows + i)].dr = mat_els[full_row * m + full_col];
a[(j*nlocal_rows + i)].di = 0.0;