Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/305.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 为什么使用MPJ Express的程序会发生异常?_Java_Mpi_Multicore_Mpj Express - Fatal编程技术网

Java 为什么使用MPJ Express的程序会发生异常?

Java 为什么使用MPJ Express的程序会发生异常?,java,mpi,multicore,mpj-express,Java,Mpi,Multicore,Mpj Express,有一个使用MPJ Express将矩阵和向量相乘的程序。矩阵被行分割。但在处理过程中会发生异常。我做错了吗 import java.util.Random; import mpi.Comm; import mpi.MPI; public class Main { private static final int rootProcessorRank = 0; private static Comm comunicator; private static int proc

有一个使用MPJ Express将矩阵和向量相乘的程序。矩阵被行分割。但在处理过程中会发生异常。我做错了吗

import java.util.Random;

import mpi.Comm;
import mpi.MPI;

public class Main {
    private static final int rootProcessorRank = 0;
    private static Comm comunicator;
    private static int processorsNumber;
    private static int currentProcessorRank;


    public static void main(String[] args) {
        MPI.Init(args);
        comunicator = MPI.COMM_WORLD;
        currentProcessorRank = comunicator.Rank();
        processorsNumber = comunicator.Size();

        if (currentProcessorRank == rootProcessorRank) {
            rootProcessorAction();
        } else {
            notRootProcessorAction();
        }

        MPI.Finalize();

    }

    public static void rootProcessorAction() {
        int[] matrixVectorSize = new int[] {5};
        int[][] matrix = createAndInitMatrix(matrixVectorSize[0]);
        int[] vector = createAndInitVector(matrixVectorSize[0]);

        for (int i = 1; i < processorsNumber; i++) {
            comunicator.Isend(matrixVectorSize, 0, 1, MPI.INT, i, MPI.ANY_TAG);
            System.out.println("Proc: " + currentProcessorRank + ", send matrixVectorSize");

            comunicator.Isend(vector, 0, vector.length, MPI.INT, i, MPI.ANY_TAG);
            System.out.println("Proc: " + currentProcessorRank + ", send vector");
        }

        int averageRowsPerProcessor = matrix.length / (processorsNumber - 1);
        int[] rowsPerProcessor = new int[processorsNumber];
        int notDistributedRowsNumber = matrix.length;
        for (int i = 1; i < rowsPerProcessor.length; i++) {
            if (i == rowsPerProcessor.length - 1) {
                rowsPerProcessor[i] = notDistributedRowsNumber;
            } else {
                rowsPerProcessor[i] = averageRowsPerProcessor;
                notDistributedRowsNumber -= averageRowsPerProcessor;
            }
        }

        int offset = 0;
        // the processorRows[0] always will be '0'
        for (int i = 1; i < rowsPerProcessor.length; i++) {
            int[] processorRows = new int[1];
            processorRows[0] = rowsPerProcessor[i];
            comunicator.Isend(processorRows, 0, 1, MPI.INT, i, MPI.ANY_TAG);
            comunicator.Isend(matrix, offset, processorRows[0], MPI.OBJECT, i, MPI.ANY_TAG);
            offset += rowsPerProcessor[i];
        }

        // there will be a code that receive a subRecults from all processes.
    }

    public static void notRootProcessorAction() {
        int[] matrixVectorSize = new int[1];
        int[] rowsNumber = new int[1];
        int[] vector = null;
        int[][] subMatrix = null;

        comunicator.Probe(rootProcessorRank, MPI.ANY_SOURCE);
        comunicator.Recv(matrixVectorSize, 0, 1, MPI.INT, rootProcessorRank, MPI.ANY_TAG);
        System.out.println("Proc: " + currentProcessorRank + ", receive matrixVectorSize");

        vector = new int[matrixVectorSize[0]];
        comunicator.Probe(rootProcessorRank, MPI.ANY_SOURCE);
        comunicator.Recv(vector, 0, vector.length, MPI.INT, rootProcessorRank, MPI.ANY_TAG);
        System.out.println("Proc: " + currentProcessorRank + ", receive vector");

        comunicator.Probe(rootProcessorRank, MPI.ANY_SOURCE);
        comunicator.Recv(rowsNumber, 0, 1, MPI.INT, rootProcessorRank, MPI.ANY_TAG);
        System.out.println("Proc: " + currentProcessorRank + ", receive rowsNumber");
        subMatrix = new int[rowsNumber[0]][rowsNumber[0]];

        comunicator.Probe(rootProcessorRank, MPI.ANY_SOURCE);
        comunicator.Recv(subMatrix, 0, subMatrix.length, MPI.OBJECT, rootProcessorRank, MPI.ANY_TAG);
        System.out.println("Proc: " + currentProcessorRank + ", receive subMatrix");

        int[] result = new int[rowsNumber[0]];
        multiplyMatrixVector(subMatrix, vector, result);

        comunicator.Send(result, 0, result.length, MPI.INT, rootProcessorRank, MPI.ANY_TAG);
    }

    private static void multiplyMatrixVector(int[][] matrix, int[] vector, int[] result) {
        for (int i = 0; i < matrix.length; i++) {
            int summ = 0;
            for (int j = 0; j < matrix[i].length; j++) {
                summ += matrix[i][j] * vector[j];
            }
            result[i] = summ;
        }
    }

    private static int[][] createAndInitMatrix(int size) {
        int[][] matrix = new int[size][size];
        Random random = new Random();
        for (int i = 0; i < matrix.length; i++) {
            for (int j = 0; j < matrix.length; j++) {
                matrix[i][j] = random.nextInt(100);
            }
        }
        return matrix;
    }

    private static int[] createAndInitVector(int size) {
        int[] vector = new int[size];
        Random random = new Random();
        for (int i = 0; i < vector.length; i++) {
            vector[i] = random.nextInt(100);
        }
        return vector;
    }
}
import java.util.Random;
导入mpi.Comm;
导入mpi.mpi;
公共班机{
私有静态final int rootProcessorRank=0;
专用静态通信机;
私有静态int处理器编号;
私有静态int currentProcessorRank;
公共静态void main(字符串[]args){
MPI.Init(args);
通信者=MPI.COMM_WORLD;
currentProcessorRank=comunicator.Rank();
processorsNumber=comunicator.Size();
if(currentProcessorRank==rootProcessorRank){
rootProcessorAction();
}否则{
notRootProcessorAction();
}
MPI.Finalize();
}
公共静态void rootProcessorAction(){
int[]矩阵向量大小=新的int[]{5};
int[][]矩阵=createAndInitMatrix(矩阵向量大小[0]);
int[]vector=createAndInitVector(matrixVectorSize[0]);
对于(int i=1;i
这里有一个例外:

MPJ Express(0.44)在多核配置中启动 java.lang.reflect.InvocationTargetException位于 sun.reflect.NativeMethodAccessorImpl.invoke0(本机方法)位于 invoke(NativeMethodAccessorImpl.java:57) 在 sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) 位于java.lang.reflect.Method.invoke(Method.java:606) runtime.starter.multicrestarter$1.run(multicrestarter.java:281)在 java.lang.Thread.run(Thread.java:745)由以下原因引起:mpi.MPIException: xdev.XDevException:java.lang.NullPointerException位于 mpi.Comm.isend(Comm.java:944)位于 Main.Main(Main.java:20)处的Main.rootProcessorAction(Main.java:35) ... 6更多原因:xdev.XDevException: 位于的java.lang.NullPointerException xdev.smpdev.SMPDevice.isend(SMPDevice.java:104)位于 mpjdev.j
public static final int mpi.MPI.NUM_OF_PROCESSORS = 4
public static int mpi.MPI.UNDEFINED = -1
public static int mpi.MPI.THREAD_SINGLE = 1
public static int mpi.MPI.THREAD_FUNNELED = 2
public static int mpi.MPI.THREAD_SERIALIZED = 3
public static int mpi.MPI.THREAD_MULTIPLE = 4
public static int mpi.MPI.ANY_SOURCE = -2
public static int mpi.MPI.ANY_TAG = -2
public static int mpi.MPI.PROC_NULL = -3
public static int mpi.MPI.BSEND_OVERHEAD = 0
public static int mpi.MPI.SEND_OVERHEAD = 0
public static int mpi.MPI.RECV_OVERHEAD = 0
public static final int mpi.MPI.IDENT = 0
public static final int mpi.MPI.CONGRUENT = 3
public static final int mpi.MPI.SIMILAR = 1
public static final int mpi.MPI.UNEQUAL = 2
public static int mpi.MPI.GRAPH = 1
public static int mpi.MPI.CART = 2
public static int mpi.MPI.TAG_UB = 0
public static int mpi.MPI.HOST = 0
public static int mpi.MPI.IO = 0