如何在python中创建贝叶斯数据融合?

如何在python中创建贝叶斯数据融合?,python,distribution,bayesian,pymc3,sensor-fusion,Python,Distribution,Bayesian,Pymc3,Sensor Fusion,我一直在研究数据融合方法,吸引我眼球的是使用Kalman滤波器的想法,它研究数据融合数据,研究高斯分布的均值和方差,并实现从弱传感器到强/更精确传感器的预测和校正。为了进一步了解融合技术,我查看了以下GitHub链接: 所以,在第一个链接中,我发现他们谈论的是离散贝叶斯滤波器,但是,他们没有提到连续贝叶斯滤波器。因此,我想用Kalman filter的思想执行相同的步骤,在PyMC3软件包的帮助下实现一个连续的贝叶斯滤波器。相关步骤可在第二个链接中找到,代码如下 mean = mean0 v

我一直在研究数据融合方法,吸引我眼球的是使用Kalman滤波器的想法,它研究数据融合数据,研究高斯分布的均值和方差,并实现从弱传感器到强/更精确传感器的预测和校正。为了进一步了解融合技术,我查看了以下GitHub链接:

  • 所以,在第一个链接中,我发现他们谈论的是离散贝叶斯滤波器,但是,他们没有提到连续贝叶斯滤波器。因此,我想用Kalman filter的思想执行相同的步骤,在PyMC3软件包的帮助下实现一个连续的贝叶斯滤波器。相关步骤可在第二个链接中找到,代码如下

    mean = mean0
    var = var0
    
    def predict(var, mean, varMove, meanMove):
        new_var = var + varMove
        new_mean= mean+ meanMove
        return new_var, new_mean
    
    def correct(var, mean, varSensor, meanSensor):
        new_mean=(varSensor*mean + var*meanSensor) / (var+varSensor)
        new_var = 1/(1/var +1/varSensor)
        return new_var, new_mean
    
    # Predict
    var, mean = predict(var, mean, varMove, distances[m])
    #print('mean: %.2f\tvar:%.2f' % (mean, var))
    plt.plot(x,mlab.normpdf(x, mean, var), label='%i. step (Prediction)' % (m+1))
            
    # Correct
    var, mean = correct(var, mean, varSensor, positions[m])
    print('After correction:  mean= %.2f\tvar= %.2f' % (mean, var))
    plt.plot(x,mlab.normpdf(x, mean, var), label='%i. step (Correction)' % (m+1))
    
    因此,根据我使用的数据,我实现了以下链接

    从上面的链接和分布列表中,学生T分布符合我的数据。实现数据融合的思想是能够获得初始数据,然后使用第一个模型中的参数,将其实现到预测模型中,并将预测模型中的参数收集到校正模型中。以下是我的融合方法(我为冗长的代码道歉)

    但是,我不确定我是否正确地执行了这些步骤。我的想法正确吗?如果没有,我如何修改代码?此外,如果可能的话,分享一个教程来解释将两个分布相乘的想法,并获得组合概率分布的比例和loc

    编辑1:

    我和某人谈论这个概念,他提到做贝叶斯更新。如何在代码中实现这种方式并使用PYMC3包

    def S2_0_Bayesian_Interface(data):
    ########################################################################################################################
        with pm.Model() as model_0:
                # Prior Distributions for unknown model parameters:
                nu_0 = pm.HalfNormal('nu_0', sigma=10)
                sigma_0 = pm.HalfNormal('sigma_0', sigma=10)
                mu_0 = pm.Normal('mu_0', mu=0, sigma=10)
    
                # Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
                observed_data_0 = pm.StudentT('observed_data_0', nu=nu_0, mu=mu_0, sigma=sigma_0, observed=data)
        
                # draw 5000 posterior samples
                trace_0 = pm.sample(draws=5000, tune=3000, chains=2, cores=1)
        
                # Obtaining Posterior Predictive Sampling:
                post_pred_0 = pm.sample_posterior_predictive(trace_0, samples=3000)
                print(post_pred_0['observed_data_0'].shape)
                print('\nSummary: ')
                print(pm.stats.summary(data=trace_0))
                print(pm.stats.summary(data=post_pred_0))
    ########################################################################################################################
        return trace_0, post_pred_0
    
    def S2_P_Bayesian_Interface(nu, mu, sigma, data):
    ########################################################################################################################
        with pm.Model() as model_P:
                # Prior Distributions for unknown model parameters:
                nu_P = pm.HalfNormal('nu_P', sigma=sigma)
                sigma_P = pm.HalfNormal('sigma_P', sigma=sigma)
                mu_P = pm.Normal('mu_P', mu=mu, sigma=sigma)
    
                # Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
                observed_data_P = pm.StudentT('observed_data_P', nu=nu_P, mu=mu_P, sigma=sigma_P, observed=data)
        
                # draw 5000 posterior samples
                trace_P = pm.sample(draws=5000, tune=3000, chains=2, cores=1)
        
                # Obtaining Posterior Predictive Sampling:
                post_pred_P = pm.sample_posterior_predictive(trace_P, samples=3000)
                print(post_pred_P['observed_data_P'].shape)
                print('\nSummary: ')
                print(pm.stats.summary(data=trace_P))
                print(pm.stats.summary(data=post_pred_P))
    ########################################################################################################################
        return trace_P, post_pred_P
    
    def S1_C_Bayesian_Interface(nu, mu, sigma, data):
    ########################################################################################################################
        with pm.Model() as model_C:
                # Prior Distributions for unknown model parameters:
                nu_C = pm.HalfNormal('nu_C', sigma=sigma)
                sigma_C = pm.HalfNormal('sigma_C', sigma=sigma)
                mu_C = pm.Normal('mu_C', mu=mu, sigma=sigma)
    
                # Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
                observed_data_C = pm.StudentT('observed_data_C', nu=nu_C, mu=mu_C, sigma=sigma_C, observed=data)
        
                # draw 5000 posterior samples
                trace_C = pm.sample(draws=5000, tune=3000, chains=2, cores=1)
        
                # Obtaining Posterior Predictive Sampling:
                post_pred_C = pm.sample_posterior_predictive(trace_C, samples=3000)
                print(post_pred_C['observed_data_C'].shape)
                print('\nSummary: ')
                print(pm.stats.summary(data=trace_C))
                print(pm.stats.summary(data=post_pred_C))
    ########################################################################################################################
        return trace_C, post_pred_C
    
    for m in range(len(S2_data_list)):
        print('\n')
        print('Acessing list number: ', m)
        # print(S2_data_list)
        # print(len(S2_data_list))
        # Initializing the PDF Distribution:
        print('#######################################################################################################')
        print('Designing the Bayesian PDF for initial Sensor 2:')
        trace_S2_0, post_pred_S2_0 = S2_0_Bayesian_Interface(data=S2_data_list[0])
        best_dist = getattr(st, 't')
        param_S2_0 = [np.mean(trace_S2_0['nu_0']), np.mean(trace_S2_0['mu_0']), np.mean(trace_S2_0['sigma_0'])]
        print('Parameters values = ', param_S2_0)
        pdf_S2_0, B_x_axis_pdf_S2_0, y_axis_pdf_S2_0 = make_pdf(data=S2_data_list[0], dist=best_dist, params=param_S2_0, size=len(trace_S2_0))
        print('#######################################################################################################')
        plt.plot(B_x_axis_pdf_S2_0, y_axis_pdf_S2_0, label='%i. step (Initial[S2])' % (m + 1))
    
        # Predict:
        print('#######################################################################################################')
        print('Designing the Bayesian PDF for predictive Sensor 2:')
        trace_S2_P, post_pred_S2_P = S2_P_Bayesian_Interface(nu=np.mean(trace_S2_0['nu_0']), mu=np.mean(trace_S2_0['mu_0']), sigma=np.mean(trace_S2_0['sigma_0']), data=S2_data_list[m])
        best_dist = getattr(st, 't')
        param_S2_P = [np.mean(trace_S2_P['nu_P']), np.mean(trace_S2_P['mu_P']), np.mean(trace_S2_P['sigma_P'])]
        print('Parameters values = ', param_S2_P)
        pdf_S2_P, B_x_axis_pdf_S2_P, y_axis_pdf_S2_P = make_pdf(data=S2_data_list[m], dist=best_dist, params=param_S2_P, size=len(trace_S2_P))
        print('#######################################################################################################')
        plt.plot(B_x_axis_pdf_S2_P, y_axis_pdf_S2_P, label='%i. step (Prediction[S2])' % (m + 1))
    
        # Correct:
        print('#######################################################################################################')
        print('Designing the Bayesian PDF for correction Sensor 1:')
        trace_S1_C, post_pred_S1_C = S1_C_Bayesian_Interface(nu=np.mean(trace_S2_P['nu_P']), mu=np.mean(trace_S2_P['mu_P']), sigma=np.mean(trace_S2_P['sigma_P']), data=S1_data_list[m])
        best_dist = getattr(st, 't')
        param_S1_C = [np.mean(trace_S1_C['nu_C']), np.mean(trace_S1_C['mu_C']), np.mean(trace_S1_C['sigma_C'])]
        print('Parameters values = ', param_S1_C)
        pdf_S1_C, B_x_axis_pdf_S1_C, y_axis_pdf_S1_C = make_pdf(data=S1_data_list[m], dist=best_dist, params=param_S1_C, size=len(trace_S1_C))
        print('#######################################################################################################')
        plt.plot(B_x_axis_pdf_S1_C, y_axis_pdf_S1_C, label='%i. step (Correction[S1])' % (m + 1))
        corr_pdf_S1 = y_axis_pdf_S1_C
        corr_pdf_S1_list.append(corr_pdf_S1)
        print('Corrected PDF: ', corr_pdf_S1)
        print('Length of Corrected PDF list = ', len(corr_pdf_S1_list))
        x_corr_pdf_list.append(B_x_axis_pdf_S1_C)