Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/129.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ MPI_Allreduce双打_C++_Mpi_Openmpi - Fatal编程技术网

C++ MPI_Allreduce双打

C++ MPI_Allreduce双打,c++,mpi,openmpi,C++,Mpi,Openmpi,我正试图找到我正在处理的排序的所有处理器的全局最小值和最大值。我正在尝试使用MPI\u Reduceall int rank, nproc; MPI_Comm_size(MPI_COMM_WORLD,&nproc); MPI_Comm_rank(MPI_COMM_WORLD,&rank); vector< vector<double> > buckets(nproc); double local_min = *std::min_element(value

我正试图找到我正在处理的排序的所有处理器的全局最小值和最大值。我正在尝试使用
MPI\u Reduceall

int rank, nproc;
MPI_Comm_size(MPI_COMM_WORLD,&nproc);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);

vector< vector<double> > buckets(nproc);
double local_min = *std::min_element(values_to_sort.begin(), values_to_sort.end());
double local_max = *std::max_element(values_to_sort.begin(), values_to_sort.end());

int min = 0;
int max = 0;

double global_min;
double global_max;

MPI_Allreduce(&local_min, &global_min, 1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD);
MPI_Allreduce(&local_max, &global_max, 1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD);

cout << "local_min " << local_min << " local_max " << local_max << endl;
cout << "global_min " << global_min << " global_max " << global_max << endl;
int-rank,nproc;
MPI通信大小(MPI通信世界和nproc);
MPI通信等级(MPI通信世界和等级);
向量<向量>桶(nproc);
double local_min=*std::min_元素(值_to_sort.begin(),值_to_sort.end());
double local_max=*std::max_元素(值_to_sort.begin(),值_to_sort.end());
int min=0;
int max=0;
双全局_min;
双全局_最大值;
MPI\u Allreduce(本地最小值、全局最小值、1、MPI\u 2双精度、MPI\u最小值、MPI\u通信世界);
MPI\u Allreduce(本地最大值和全局最大值,1,MPI\u 2双精度,MPI\u最大位置,MPI\u通信世界);

cout我做了一些阅读,MINLOC和MAXLOC操作要求使用大小大于1的数组来成对存储值和秩

我的更新代码

 int rank, nproc;
    MPI_Comm_size(MPI_COMM_WORLD,&nproc);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);

    vector< vector<double> > buckets(nproc);
    double local_min[2];
    local_min[1] = rank;
    local_min[0] = *std::min_element(values_to_sort.begin(), values_to_sort.end());

    double local_max[2];
    local_max[1] = rank;
    local_max[0] = *std::max_element(values_to_sort.begin(), values_to_sort.end());

    double global_min[2];
    double global_max[2];

    MPI_Allreduce(&local_min, &global_min, 1, MPI_2DOUBLE_PRECISION, MPI_MINLOC, MPI_COMM_WORLD);
    MPI_Allreduce(&local_max, &global_max, 1, MPI_2DOUBLE_PRECISION, MPI_MAXLOC, MPI_COMM_WORLD);

    cout << "local_min " << local_min[0] << " local_max " << local_max[0] << endl;
    cout << "global_min " << global_min[0] << " global_max " << global_max[0] << endl;
int-rank,nproc;
MPI通信大小(MPI通信世界和nproc);
MPI通信等级(MPI通信世界和等级);
向量<向量>桶(nproc);
双局部_min[2];
局部_min[1]=秩;
local_min[0]=*std::min_元素(值_到_sort.begin(),值_到_sort.end());
双局部_max[2];
局部_max[1]=秩;
本地\u max[0]=*std::max\u元素(值\u到\u sort.begin(),值\u到\u sort.end());
双全局_min[2];
双全局_max[2];
MPI\u Allreduce(本地最小值、全局最小值、1、MPI\u 2双精度、MPI\u最小值、MPI\u通信世界);
MPI\u Allreduce(本地最大值和全局最大值,1,MPI\u 2双精度,MPI\u最大位置,MPI\u通信世界);

您真的需要在这里使用
MPI\u MINLOC
MPI\u MAXLOC
吗?因为您陈述问题的方式和使用解决方案的方式,
MPI_MIN
MPI_MAX
都可以:

double local_min = *std::min_element(values_to_sort.begin(), values_to_sort.end());
double local_max = *std::max_element(values_to_sort.begin(), values_to_sort.end());

double global_min;
double global_max;

MPI_Allreduce(&local_min, &global_min, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
MPI_Allreduce(&local_max, &global_max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);

cout << "local_min " << local_min << " local_max " << local_max << endl;
cout << "global_min " << global_min << " global_max " << global_max << endl;

您可能会有一个不同的对类型,例如INT+BULL。<代码> MPIY2Doul精度> /CODE >是FORTRAN MPI类型,而不是C++。你不应该用C++代码来使用它。
double local_min = *std::min_element(values_to_sort.begin(), values_to_sort.end());
double local_max = *std::max_element(values_to_sort.begin(), values_to_sort.end());

double global_min;
double global_max;

MPI_Allreduce(&local_min, &global_min, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
MPI_Allreduce(&local_max, &global_max, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);

cout << "local_min " << local_min << " local_max " << local_max << endl;
cout << "global_min " << global_min << " global_max " << global_max << endl;
struct double_int {
    double val;
    int rank;
} local_min, local_max, global_min, global_max;

local_min.val = *std::min_element(values_to_sort.begin(), values_to_sort.end());
local_max.val = *std::max_element(values_to_sort.begin(), values_to_sort.end());
local_min.rank = local_max.rank = rank;

MPI_Allreduce(&local_min, &global_min, 1, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD);
MPI_Allreduce(&local_max, &global_max, 1, MPI_DOUBLE_INT, MPI_MAXLOC, MPI_COMM_WORLD);

cout << "on process " << rank << " local_min " << local_min.val
     << " local_max " << local_max.val << endl;
cout << "global_min " << global_min.val << " owned by process " << global_min.rank << endl;
cout << "global_max " << global_max.val << " owned by process " << global_max.rank << endl;