Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/335.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Tensorflow概率错误:运算符NOTALLOWEDINGRAPHERROR:不允许在'tf.Tensor'上迭代_Python_Tensorflow_Tensorflow Probability - Fatal编程技术网

Python Tensorflow概率错误:运算符NOTALLOWEDINGRAPHERROR:不允许在'tf.Tensor'上迭代

Python Tensorflow概率错误:运算符NOTALLOWEDINGRAPHERROR:不允许在'tf.Tensor'上迭代,python,tensorflow,tensorflow-probability,Python,Tensorflow,Tensorflow Probability,我试图通过提供一个似然函数,用NUTS来估计tensorflow中的一个模型。我已经检查过似然函数是否返回了合理的值。我将按照此处的设置设置螺母: 这里有一些设置优先级的示例,等等: 我的代码在这里的colab笔记本中: 我得到了错误:运算符或不允许的Ingraph错误:在tf上迭代。不允许使用张量:AutoGraph未转换此函数。尝试直接用@tf.function来装饰它。这是我第一次使用tensorflow,我对解释这个错误感到非常迷茫。如果我可以将起始参数值作为单个输入进行传递,这也

我试图通过提供一个似然函数,用NUTS来估计tensorflow中的一个模型。我已经检查过似然函数是否返回了合理的值。我将按照此处的设置设置螺母:

这里有一些设置优先级的示例,等等:

我的代码在这里的colab笔记本中:

我得到了错误:
运算符或不允许的Ingraph错误:在
tf上迭代。不允许使用张量
:AutoGraph未转换此函数。尝试直接用@tf.function来装饰它。
这是我第一次使用tensorflow,我对解释这个错误感到非常迷茫。如果我可以将起始参数值作为单个输入进行传递,这也将非常理想(我正在处理的示例没有这样做,但我假设这是可能的)

更新 看起来我必须改变@tf.function decorator的位置。采样器现在运行,但它为每个参数的所有样本提供相同的值。是否需要通过log_prob()函数传递联合分发?我显然错过了什么。我可以通过bfgs优化运行似然,并得到合理的结果(我在其他软件中通过固定参数的最大似然估计模型)。看起来我需要定义函数来返回联合分发并调用log_prob()。我可以这样做,如果我把它设置为逻辑回归(逻辑选择模型是逻辑分布的差异)。但是,我丢失了标准的封闭形式

我的职能如下:

 @tf.function
def mmnl_log_prob(init_mu_b_time,init_sigma_b_time,init_a_car,init_a_train,init_b_cost,init_scale):

    # Create priors for hyperparameters
    mu_b_time = tfd.Sample(tfd.Normal(loc=init_mu_b_time, scale=init_scale),sample_shape=1).sample()
    # HalfCauchy distributions are too wide for logit discrete choice

    sigma_b_time = tfd.Sample(tfd.Normal(loc=init_sigma_b_time, scale=init_scale),sample_shape=1).sample()


    # Create priors for parameters
    a_car = tfd.Sample(tfd.Normal(loc=init_a_car, scale=init_scale),sample_shape=1).sample()
    a_train = tfd.Sample(tfd.Normal(loc=init_a_train, scale=init_scale),sample_shape=1).sample()

    # a_sm = tfd.Sample(tfd.Normal(loc=init_a_sm, scale=init_scale),sample_shape=1).sample()

    b_cost = tfd.Sample(tfd.Normal(loc=init_b_cost, scale=init_scale),sample_shape=1).sample()
    # Define a heterogeneous random parameter model with MultivariateNormalDiag()
    # Use MultivariateNormalDiagPlusLowRank() to define nests, etc.

    b_time = tfd.Sample(tfd.MultivariateNormalDiag(  # b_time
          loc=mu_b_time,
          scale_diag=sigma_b_time),sample_shape=num_idx).sample()


    # Definition of the utility functions

    V1 = a_train + tfm.multiply(b_time,TRAIN_TT_SCALED) + b_cost * TRAIN_COST_SCALED
    V2 = tfm.multiply(b_time,SM_TT_SCALED) + b_cost * SM_COST_SCALED
    V3 = a_car + tfm.multiply(b_time,CAR_TT_SCALED) + b_cost * CAR_CO_SCALED
    print("Vs",V1,V2,V3)

    # Definition of loglikelihood
    eV1 = tfm.multiply(tfm.exp(V1),TRAIN_AV_SP)
    eV2 = tfm.multiply(tfm.exp(V2),SM_AV_SP)
    eV3 = tfm.multiply(tfm.exp(V3),CAR_AV_SP)
    eVD = eV1 + eV2 +
 eV3
    print("eVs",eV1,eV2,eV3,eVD)

    l1 = tfm.multiply(tfm.truediv(eV1,eVD),tf.cast(tfm.equal(CHOICE,1),tf.float32))
    l2 = tfm.multiply(tfm.truediv(eV2,eVD),tf.cast(tfm.equal(CHOICE,2),tf.float32))
    l3 = tfm.multiply(tfm.truediv(eV3,eVD),tf.cast(tfm.equal(CHOICE,3),tf.float32))
    ll = tfm.reduce_sum(tfm.log(l1+l2+l3))

    print("ll",ll)

    return ll
    nuts_samples = 1000
nuts_burnin = 500
chains = 4
## Initial step size
init_step_size=.3
init = [0.,0.,0.,0.,0.,.5]

##
## NUTS (using inner step size averaging step)
##
@tf.function
def nuts_sampler(init):
    nuts_kernel = tfp.mcmc.NoUTurnSampler(
      target_log_prob_fn=mmnl_log_prob, 
      step_size=init_step_size,
      )
    adapt_nuts_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
  inner_kernel=nuts_kernel,
  num_adaptation_steps=nuts_burnin,
  step_size_getter_fn=lambda pkr: pkr.step_size,
  log_accept_prob_getter_fn=lambda pkr: pkr.log_accept_ratio,
  step_size_setter_fn=lambda pkr, new_step_size: pkr._replace(step_size=new_step_size)
       )

    samples_nuts_, stats_nuts_ = tfp.mcmc.sample_chain(
  num_results=nuts_samples,
  current_state=init,
  kernel=adapt_nuts_kernel,
  num_burnin_steps=100,
  parallel_iterations=5)
    return samples_nuts_, stats_nuts_

samples_nuts, stats_nuts = nuts_sampler(init)
函数的调用如下所示:

 @tf.function
def mmnl_log_prob(init_mu_b_time,init_sigma_b_time,init_a_car,init_a_train,init_b_cost,init_scale):

    # Create priors for hyperparameters
    mu_b_time = tfd.Sample(tfd.Normal(loc=init_mu_b_time, scale=init_scale),sample_shape=1).sample()
    # HalfCauchy distributions are too wide for logit discrete choice

    sigma_b_time = tfd.Sample(tfd.Normal(loc=init_sigma_b_time, scale=init_scale),sample_shape=1).sample()


    # Create priors for parameters
    a_car = tfd.Sample(tfd.Normal(loc=init_a_car, scale=init_scale),sample_shape=1).sample()
    a_train = tfd.Sample(tfd.Normal(loc=init_a_train, scale=init_scale),sample_shape=1).sample()

    # a_sm = tfd.Sample(tfd.Normal(loc=init_a_sm, scale=init_scale),sample_shape=1).sample()

    b_cost = tfd.Sample(tfd.Normal(loc=init_b_cost, scale=init_scale),sample_shape=1).sample()
    # Define a heterogeneous random parameter model with MultivariateNormalDiag()
    # Use MultivariateNormalDiagPlusLowRank() to define nests, etc.

    b_time = tfd.Sample(tfd.MultivariateNormalDiag(  # b_time
          loc=mu_b_time,
          scale_diag=sigma_b_time),sample_shape=num_idx).sample()


    # Definition of the utility functions

    V1 = a_train + tfm.multiply(b_time,TRAIN_TT_SCALED) + b_cost * TRAIN_COST_SCALED
    V2 = tfm.multiply(b_time,SM_TT_SCALED) + b_cost * SM_COST_SCALED
    V3 = a_car + tfm.multiply(b_time,CAR_TT_SCALED) + b_cost * CAR_CO_SCALED
    print("Vs",V1,V2,V3)

    # Definition of loglikelihood
    eV1 = tfm.multiply(tfm.exp(V1),TRAIN_AV_SP)
    eV2 = tfm.multiply(tfm.exp(V2),SM_AV_SP)
    eV3 = tfm.multiply(tfm.exp(V3),CAR_AV_SP)
    eVD = eV1 + eV2 +
 eV3
    print("eVs",eV1,eV2,eV3,eVD)

    l1 = tfm.multiply(tfm.truediv(eV1,eVD),tf.cast(tfm.equal(CHOICE,1),tf.float32))
    l2 = tfm.multiply(tfm.truediv(eV2,eVD),tf.cast(tfm.equal(CHOICE,2),tf.float32))
    l3 = tfm.multiply(tfm.truediv(eV3,eVD),tf.cast(tfm.equal(CHOICE,3),tf.float32))
    ll = tfm.reduce_sum(tfm.log(l1+l2+l3))

    print("ll",ll)

    return ll
    nuts_samples = 1000
nuts_burnin = 500
chains = 4
## Initial step size
init_step_size=.3
init = [0.,0.,0.,0.,0.,.5]

##
## NUTS (using inner step size averaging step)
##
@tf.function
def nuts_sampler(init):
    nuts_kernel = tfp.mcmc.NoUTurnSampler(
      target_log_prob_fn=mmnl_log_prob, 
      step_size=init_step_size,
      )
    adapt_nuts_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
  inner_kernel=nuts_kernel,
  num_adaptation_steps=nuts_burnin,
  step_size_getter_fn=lambda pkr: pkr.step_size,
  log_accept_prob_getter_fn=lambda pkr: pkr.log_accept_ratio,
  step_size_setter_fn=lambda pkr, new_step_size: pkr._replace(step_size=new_step_size)
       )

    samples_nuts_, stats_nuts_ = tfp.mcmc.sample_chain(
  num_results=nuts_samples,
  current_state=init,
  kernel=adapt_nuts_kernel,
  num_burnin_steps=100,
  parallel_iterations=5)
    return samples_nuts_, stats_nuts_

samples_nuts, stats_nuts = nuts_sampler(init)

我的问题有答案了!这只是一个不同命名法的问题。我需要将我的模型定义为一个softmax函数,我知道这就是我所说的“logit模型”,但它并没有为我点击。下面的博文让我顿悟:
我的问题有答案了!这只是一个不同命名法的问题。我需要将我的模型定义为一个softmax函数,我知道这就是我所说的“logit模型”,但它并没有为我点击。下面的博文让我顿悟:

此处交叉张贴:此处交叉张贴:查看我的更新问题:查看我的更新问题: