Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/date/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python PyMC3中的再生Hamilton-1989马尔可夫切换模型_Python_Pymc3_Hidden Markov Models - Fatal编程技术网

Python PyMC3中的再生Hamilton-1989马尔可夫切换模型

Python PyMC3中的再生Hamilton-1989马尔可夫切换模型,python,pymc3,hidden-markov-models,Python,Pymc3,Hidden Markov Models,我试图理解汉密尔顿1989年的马尔可夫切换自回归模型。我试图用贝叶斯方法重现结果。我使用编写了许多PyMC3模型,最新的迭代可以在下面找到 在没有自回归的情况下,模型收敛到接近汉密尔顿(1.16和-0.36)和实际转移概率的μ值。 然而,当加入自回归时,该模型无法收敛,拟合系数接近汉密尔顿的结果。过渡概率的拟合特别差 我错过了什么 # %% import pymc3 as pm import theano.tensor as tt import theano.tensor.slinalg as

我试图理解汉密尔顿1989年的马尔可夫切换自回归模型。我试图用贝叶斯方法重现结果。我使用编写了许多PyMC3模型,最新的迭代可以在下面找到

在没有自回归的情况下,模型收敛到接近汉密尔顿(1.16和-0.36)和实际转移概率的μ值。 然而,当加入自回归时,该模型无法收敛,拟合系数接近汉密尔顿的结果。过渡概率的拟合特别差

我错过了什么

# %%
import pymc3 as pm
import theano.tensor as tt
import theano.tensor.slinalg as sla  # theano-wrapped scipy linear algebra
import theano.tensor.nlinalg as nla  # theano-wrapped numpy linear algebra
import theano

theano.config.gcc.cxxflags = "-Wno-c++11-narrowing"

import pandas as pd
dta = pd.read_stata('https://www.stata-press.com/data/r14/rgnp.dta').iloc[1:]
dta.index = pd.DatetimeIndex(dta.date, freq='QS')
dta_hamilton = dta.rgnp

# Plot the data
dta_hamilton.plot(title='Growth rate of Real GNP', figsize=(12,3))

# %%
fig, ax = plt.subplots(figsize=(12, 4))
#plt.plot(np.round(trace["hmm_states"].mean(axis=0)), label="inferred")
plt.plot(dta_hamilton.values, label="true")

# %%
def solve_equilibrium(n_states, p_transition):
    A = tt.dmatrix('A')
    A = tt.eye(n_states) - p_transition + tt.ones(shape=(n_states, n_states))
    p_equilibrium = pm.Deterministic("p_equilibrium", sla.solve(A.T, tt.ones(shape=(n_states))))
    return p_equilibrium

class HMMStates(pm.Categorical):
    def __init__(self, p_transition, p_equilibrium, n_states, *args, **kwargs):
        """You can ignore this section for the time being."""
        super(pm.Categorical, self).__init__(*args, **kwargs)
        self.p_transition = p_transition
        self.p_equilibrium = p_equilibrium
        # This is needed
        self.k = n_states
        # This is only needed because discrete distributions must define a mode.
        self.mode = tt.cast(0,dtype='int64')

    def logp(self, x):
        """Focus your attention here!"""
        p_eq = self.p_equilibrium
        # Broadcast out the transition probabilities,
        # so that we can broadcast the calculation
        # of log-likelihoods
        p_tr = self.p_transition[x[:-1]]

        # the logp of the initial state evaluated against the equilibrium probabilities
        initial_state_logp = pm.Categorical.dist(p_eq).logp(x[0])

        # the logp of the rest of the states.
        x_i = x[1:]
        ou_like = pm.Categorical.dist(p_tr).logp(x_i)
        transition_logp = tt.sum(ou_like)
        return initial_state_logp + transition_logp

# %%
class HamiltonEmissions(pm.Continuous):
    def __init__(self, states, phi, sigma, mu, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.states = states
        self.phi = phi
        self.mu = mu
        self.sigma = sigma  # variance

    def logp(self, x):
        """
        x: observations
        """
        states = self.states
        sigma = self.sigma[states]
        mu = self.mu[states]
        phi = self.phi

        z = x - mu  # Centered version of x

        ar_mean = \
            phi[0] * z[0:-4] + \
                phi[1] * z[1:-3] + \
                    phi[2] * z[2:-2] + \
                        phi[3] * z[3:-1]
        
        ar_like = tt.sum(pm.Normal.dist(mu= ar_mean + mu[4:], sigma=sigma[4:]).logp(x[4:]))

        boundary_like = pm.Normal.dist(mu=0, sigma=sigma[:4]).logp(x[:4])
        return ar_like + boundary_like
# %%
n_states = 2
with pm.Model() as model:
    # Priors for transition matrix
    p_transition = pm.Dirichlet("p_transition",
        a=tt.ones((n_states, n_states)),
        shape=(n_states, n_states))

    # Solve for the equilibrium state
    p_equilibrium = solve_equilibrium(n_states, p_transition)

    # HMM state
    hmm_states = HMMStates(
        "hmm_states",
        p_transition=p_transition,
        p_equilibrium=p_equilibrium,
        n_states=n_states,
        shape=(len(dta_hamilton),)
    )

    # Prior for mu and sigma
    mu = pm.Normal("mu", mu=0, sigma=1, shape=(n_states,))
    sigma = pm.Exponential("sigma", lam=2, shape=(n_states,))
    phi = pm.Normal("phi", 0, 0.5, shape=(4, ))

    # Observed emission likelihood
    obs = HamiltonEmissions(
        "emission",
        states=hmm_states,
        mu=mu,
        sigma=sigma,
        phi=phi,
        observed=dta_hamilton
    )
# %%
with model:
    start = pm.find_MAP()
    step1 = pm.Metropolis(vars=[mu, sigma, phi, p_transition, emission])
    step2 = pm.BinaryGibbsMetropolis(vars=[hmm_states])
    trace = pm.sample(2500, cores=1, chains=2, step=[step1, step2], tune=1500)

# %%
import arviz as az
az.plot_trace(trace, var_names=["p_transition"])
该软件包提供一个前向过滤器后向样品采样器。这可能对你的问题更有效