Python 用mpi4py替换多处理池.map

Python 用mpi4py替换多处理池.map,python,multiprocessing,mpi4py,Python,Multiprocessing,Mpi4py,我是一个使用MPI的初学者,我还在阅读文档。然而,对于mpi4py来说,需要做的工作很少。我已经编写了一段代码,目前使用多处理模块在多个内核上运行,但我需要用mpi4py替换它,这样我就可以使用多个节点来运行我的代码。我的代码如下,当使用多处理模块时,也不使用 使用多处理 import numpy as np import multiprocessing start_time = time.time() E = 0.1 M = 5 n = 1000 G = 1 c = 1 stretch

我是一个使用MPI的初学者,我还在阅读文档。然而,对于mpi4py来说,需要做的工作很少。我已经编写了一段代码,目前使用多处理模块在多个内核上运行,但我需要用mpi4py替换它,这样我就可以使用多个节点来运行我的代码。我的代码如下,当使用多处理模块时,也不使用

使用多处理

import numpy as np
import multiprocessing 


start_time = time.time()

E = 0.1
M = 5
n = 1000
G = 1
c = 1
stretch = [10, 1]


#Point-Distribution Generator Function 
def CDF_inv(x, e, m):
    A = 1/(1 + np.log(m/e))
    if x == 1:
        return m
    elif 0 <= x <= A:
        return e * x / A
    elif A < x < 1:
        return e * np.exp((x / A) - 1)

#Elliptical point distribution Generator Function

def get_coor_ellip(dist=CDF_inv, params=[E, M], stretch=stretch):
    R = dist(random.random(), *params)
    theta = random.random() * 2 * np.pi
    return (R * np.cos(theta) * stretch[0], R * np.sin(theta) * stretch[1])


def get_dist_sq(x_array, y_array):
    return x_array**2 + y_array**2


#Function to obtain alpha

def get_alpha(args):
    zeta_list_part, M_list_part, X, Y = args
    alpha_x = 0
    alpha_y = 0
    for key in range(len(M_list_part)):
        z_m_z_x = X - zeta_list_part[key][0]
        z_m_z_y = Y - zeta_list_part[key][1]
        dist_z_m_z = get_dist_sq(z_m_z_x, z_m_z_y)
        alpha_x += M_list_part[key] * z_m_z_x / dist_z_m_z
        alpha_y += M_list_part[key] * z_m_z_y / dist_z_m_z
    return (alpha_x, alpha_y)

#The part of the process containing the loop that needs to be parallelised, where I use pool.map()

if __name__ == '__main__':
    # n processes, scale accordingly
    num_processes = 10
    pool = multiprocessing.Pool(processes=num_processes)
    random_sample = [CDF_inv(x, E, M)
                     for x in [random.random() for e in range(n)]]
    zeta_list = [get_coor_ellip() for e in range(n)]
    x1, y1 = zip(*zeta_list)
    zeta_list = np.column_stack((np.array(x1), np.array(y1)))
    x = np.linspace(-3, 3, 100)
    y = np.linspace(-3, 3, 100)
    X, Y = np.meshgrid(x, y)
    print len(x)*len(y)*n,'calculations to be carried out.'
    M_list = np.array([.001 for i in range(n)])
    # split zeta_list, M_list, X, and Y
    zeta_list_split = np.array_split(zeta_list, num_processes, axis=0)
    M_list_split = np.array_split(M_list, num_processes)
    X_list = [X for e in range(num_processes)]
    Y_list = [Y for e in range(num_processes)]

    alpha_list = pool.map(
            get_alpha, zip(zeta_list_split, M_list_split, X_list, Y_list))
    alpha_x = 0  
    alpha_y = 0
    for e in alpha_list:
        alpha_x += e[0] * 4 * G / (c**2)
        alpha_y += e[1] * 4 * G / (c**2)

print("%f seconds" % (time.time() - start_time))
将numpy导入为np
导入多处理
开始时间=time.time()
E=0.1
M=5
n=1000
G=1
c=1
拉伸=[10,1]
#点分布发生器函数
def CDF_库存(x、e、m):
A=1/(1+np.log(m/e))
如果x==1:
返回m

elif 0可以使用
分散
/
聚集
将multiprocessing.map版本转换为MPI。在您的情况下,为每个列组准备一个输入列表是很有用的。主要的区别是,所有代码首先由所有级别执行,因此必须使所有应该由maste rank 0 conidtional执行的操作都是由maste rank 0 conidtional执行的

if __name__ == '__main__':
    comm = MPI.COMM_WORLD
    if comm.rank == 0:
        random_sample = [CDF_inv(x, E, M)
                         for x in [random.random() for e in range(n)]]
        zeta_list = [get_coor_ellip() for e in range(n)]
        x1, y1 = zip(*zeta_list)
        zeta_list = np.column_stack((np.array(x1), np.array(y1)))
        x = np.linspace(-3, 3, 100)
        y = np.linspace(-3, 3, 100)
        X, Y = np.meshgrid(x, y)
        print len(x)*len(y)*n,'calculations to be carried out.'
        M_list = np.array([.001 for i in range(n)])
        # split zeta_list, M_list, X, and Y
        zeta_list_split = np.array_split(zeta_list, comm.size, axis=0)
        M_list_split = np.array_split(M_list, comm.size)
        X_list = [X for e in range(comm.size)]
        Y_list = [Y for e in range(comm.size)]
        work_list = list(zip(zeta_list_split, M_list_split, X_list, Y_list))
    else:
        work_list = None

    my_work = comm.scatter(work_list)
    my_alpha = get_alpha(my_work)

    alpha_list = comm.gather(my_alpha)
    if comm.rank == 0:
        alpha_x = 0  
        alpha_y = 0
        for e in alpha_list:
            alpha_x += e[0] * 4 * G / (c**2)
            alpha_y += e[1] * 4 * G / (c**2)
只要每个处理器得到相似的工作量,这就可以正常工作。如果通信成为一个问题,您可能希望在处理器之间分割数据生成,而不是在主列组0上进行所有操作


注意:关于代码的某些内容是伪造的,例如,
alpha_xy]
最终成为
np.ndarray
。串行版本出现错误。

对于仍然对类似主题感兴趣的人,我强烈建议大家看看MPIPoolExecutor()类,文档是。

哇!这个很好用,谢谢!目前看来,数据生成似乎不太耗时,但我将尝试将其分发到其他处理器。alpha_xy不是一个有效的变量,我实际上用alpha_x和alpha_y来推导另一个称为梯度的量。当我运行它时,它似乎运行良好。。。
if __name__ == '__main__':
    comm = MPI.COMM_WORLD
    if comm.rank == 0:
        random_sample = [CDF_inv(x, E, M)
                         for x in [random.random() for e in range(n)]]
        zeta_list = [get_coor_ellip() for e in range(n)]
        x1, y1 = zip(*zeta_list)
        zeta_list = np.column_stack((np.array(x1), np.array(y1)))
        x = np.linspace(-3, 3, 100)
        y = np.linspace(-3, 3, 100)
        X, Y = np.meshgrid(x, y)
        print len(x)*len(y)*n,'calculations to be carried out.'
        M_list = np.array([.001 for i in range(n)])
        # split zeta_list, M_list, X, and Y
        zeta_list_split = np.array_split(zeta_list, comm.size, axis=0)
        M_list_split = np.array_split(M_list, comm.size)
        X_list = [X for e in range(comm.size)]
        Y_list = [Y for e in range(comm.size)]
        work_list = list(zip(zeta_list_split, M_list_split, X_list, Y_list))
    else:
        work_list = None

    my_work = comm.scatter(work_list)
    my_alpha = get_alpha(my_work)

    alpha_list = comm.gather(my_alpha)
    if comm.rank == 0:
        alpha_x = 0  
        alpha_y = 0
        for e in alpha_list:
            alpha_x += e[0] * 4 * G / (c**2)
            alpha_y += e[1] * 4 * G / (c**2)