Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/codeigniter/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python TensorFlow 1.x:TypeError:不支持/:';非类型';和';int';_Python_Tensorflow_Deep Learning_Reinforcement Learning_Tflearn - Fatal编程技术网

Python TensorFlow 1.x:TypeError:不支持/:';非类型';和';int';

Python TensorFlow 1.x:TypeError:不支持/:';非类型';和';int';,python,tensorflow,deep-learning,reinforcement-learning,tflearn,Python,Tensorflow,Deep Learning,Reinforcement Learning,Tflearn,我是TensorFlow的新手。我在TensorFlow 1.x中创建了以下神经网络 import tensorflow as tf import numpy as np import tflearn class ActorNetwork(object): """ Input to the network is the state, output is the action under a deterministic policy.

我是TensorFlow的新手。我在TensorFlow 1.x中创建了以下神经网络

import tensorflow as tf
import numpy as np

import tflearn

class ActorNetwork(object):
    """
    Input to the network is the state, output is the action
    under a deterministic policy.
    The output layer activation is a tanh to keep the action
    between -action_bound and action_bound
    """

    def __init__(self, sess, state_dim, action_dim, action_bound, learning_rate, tau, batch_size):
        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.action_bound = action_bound
        self.learning_rate = learning_rate
        self.tau = tau
        self.batch_size = batch_size

        # Actor Network
        self.inputs, self.out, self.scaled_out = self.create_actor_network()

        self.network_params = tf.trainable_variables()

        # Target Network
        self.target_inputs, self.target_out, self.target_scaled_out = self.create_actor_network()

        self.target_network_params = tf.trainable_variables()[
            len(self.network_params):]

        # Op for periodically updating target network with online network
        # weights
        self.update_target_network_params = \
            [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +
                                                  tf.multiply(self.target_network_params[i], 1. - self.tau))
                for i in range(len(self.target_network_params))]

        # This gradient will be provided by the critic network
        self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])

        # Combine the gradients here
        self.unnormalized_actor_gradients = tf.gradients(
            self.scaled_out, self.network_params, -self.action_gradient)
        self.actor_gradients = list(map(lambda x: tf.math.divide(x, self.batch_size), self.unnormalized_actor_gradients))

        # Optimization Op
        self.optimize = tf.train.AdamOptimizer(self.learning_rate).\
            apply_gradients(zip(self.actor_gradients, self.network_params))

        self.num_trainable_vars = len(
            self.network_params) + len(self.target_network_params)

    def create_actor_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        net = tflearn.fully_connected(inputs, 400)
        net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)
        net = tflearn.fully_connected(net, 300)
        net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)
        # Final layer weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(
            net, self.a_dim, activation='tanh', weights_init=w_init)
        # Scale output to -action_bound to action_bound
        scaled_out = tf.multiply(out, self.action_bound)
        return inputs, out, scaled_out

    def train(self, inputs, a_gradient):
        self.sess.run(self.optimize, feed_dict={
            self.inputs: inputs,
            self.action_gradient: a_gradient
        })

    def predict(self, inputs):
        return self.sess.run(self.scaled_out, feed_dict={
            self.inputs: inputs
        })

    def predict_target(self, inputs):
        return self.sess.run(self.target_scaled_out, feed_dict={
            self.target_inputs: inputs
        })

    def update_target_network(self):
        self.sess.run(self.update_target_network_params)

    def get_num_trainable_vars(self):
        return self.num_trainable_vars
当我调用它一次时,它不会给出任何错误,但是在第二次调用时,它会给出一个错误。比如说

with tf.Session() as sess:
    actor1 = ActorNetwork(sess, 1, 2, 1, 0.01, 0.003, 200)
    actor2 = ActorNetwork(sess, 1, 2, 1, 0.01, 0.003, 200)
仅actor2出现以下错误:

TypeError:/:“NoneType”和“int”的操作数类型不受支持

它与lambda函数中的无值有关。但是,为什么它第一次没有提供错误呢

编辑:堆栈跟踪:

---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-2-2323bc1d5028> in <module>()
      1 with tf.Session() as sess:
      2     actor1 = ActorNetwork(sess, 1, 2, 1, 0.01, 0.003, 200)
----> 3     actor2 = ActorNetwork(sess, 1, 2, 1, 0.01, 0.003, 200)

3 frames
<ipython-input-1-895268594a81> in __init__(self, sess, state_dim, action_dim, action_bound, learning_rate, tau, batch_size)
     48         self.unnormalized_actor_gradients = tf.gradients(
     49             self.scaled_out, self.network_params, -self.action_gradient)
---> 50         self.actor_gradients = list(map(lambda x: tf.math.divide(x, self.batch_size), self.unnormalized_actor_gradients))
     51 
     52         # Optimization Op

<ipython-input-1-895268594a81> in <lambda>(x)
     48         self.unnormalized_actor_gradients = tf.gradients(
     49             self.scaled_out, self.network_params, -self.action_gradient)
---> 50         self.actor_gradients = list(map(lambda x: tf.math.divide(x, self.batch_size), self.unnormalized_actor_gradients))
     51 
     52         # Optimization Op

/tensorflow-1.15.2/python3.6/tensorflow_core/python/util/dispatch.py in wrapper(*args, **kwargs)
    178     """Call target, and fall back on dispatchers if there is a TypeError."""
    179     try:
--> 180       return target(*args, **kwargs)
    181     except (TypeError, ValueError):
    182       # Note: convert_to_eager_tensor currently raises a ValueError, not a

/tensorflow-1.15.2/python3.6/tensorflow_core/python/ops/math_ops.py in divide(x, y, name)
    323     return DivideDelegateWithName(x, name) / y
    324   else:
--> 325     return x / y
    326 
    327 

TypeError: unsupported operand type(s) for /: 'NoneType' and 'int'

TypeError: unsupported operand type(s) for /: 'NoneType' and 'int'
你的问题是:

self.network_params=tf.trainable_variables()
这也肯定会给你带来问题:

self.target\u network\u params=tf.trainable\u variables()[
len(自网络参数):]
问题是,您正在同一个TensorFlow图中创建两个模型。当您获得
tf.trainable_variables()
时,您将获得图形中的所有可训练变量。第一次,如果您刚刚使用
self.create\u actor\u network()
创建了模型,那么这些只是变量。但第二次,它将包括第二个
ActorNetwork
的变量和第一个
的变量。显然,第一个网络的变量和第二个网络的输出之间没有梯度,因此
tf.gradients
会产生一些
None
结果,从而导致错误。最简单的解决方案是将每个网络放在不同的图形中,例如:

将tf.Graph()作为\u default()作为graph1,将tf.Session()作为sess1:
actor1=ActorNetwork(sess1,1,2,1,0.01,0.003,200)
将tf.Graph()作为\u default()作为graph2,将tf.Session()作为sess2:
actor2=ActorNetwork(sess2,1,2,1,0.01,0.003,200)
您也可以在类内部执行此操作,这样会更安全,尽管您无法事先创建会话:

def\uuuuu init\uuuuuuuuuuu(自身、状态、动作、动作界限、学习速率、tau、批量大小):
self.graph=tf.graph()
使用self.graph.as_default():#在所有方法的开头添加
self.sess=tf.Session()
# ...
但是,如果您希望在同一个图中包含两个模型,则需要进一步更改代码,以避免使用`tf.trainable_变量。例如,您可以自己跟踪变量:

def创建参与者网络(self):
所有变量=[]模型变量列表
inputs=tflearn.input\u数据(shape=[None,self.s\u dim])
net=tflearn.完全连接(输入,400)
#保存图层变量
所有变量追加(net.W)
所有变量追加(net.b)
# ...
返回输入、输出、缩放输出、所有变量返回变量列表

该模块提供了一些帮助来简化这一过程,尽管没有什么太复杂的东西。在任何情况下,如果您可以避免,我建议您不要使用TFLearn,因为它没有维护,并且被Keras取代(您只需要使用
.weights
/
.trainable\u weights
)。

请添加错误的堆栈跟踪。@jdehesa添加了它。您好,根据您的建议,我在TF2.0中编写了代码。你能看一看,让我知道他们是否真的在做同样的事情吗?@kosa就我所知,这看起来不错,除非我遗漏了一些细节,我认为应该和你在TFLearn上做的一样。我不确定最后一个
scaled_out=tf.multiply(…)
是否可以这样做,或者最好是
multiply
Lambda
Keras层,不确定这是否有区别。Keras还提供一些实用程序,如检查模型的结构,检查参数数量、大小等。我正在使用摘要工具检查参数并可视化图层。为什么使用tf.multiply(…)是错误的?@kosa正如我所说,我不确定它是否会对层产生影响,可能不会,它将作为模型结构的一个元素出现(例如,使用
summary
),但由于操作中没有权重或任何内容,我认为这可能是唯一实际的区别。如果模型编译和培训没有抱怨,那么它一定是正确的。
class ActorNetwork(object):
  def __init__(self, state_dim, action_dim, action_bound, learning_rate, tau, batch_size):
    self.state_dim = state_dim
    self.action_dim = action_dim
    self.action_bound = action_bound
    self.learning_rate = learning_rate
    self.tau  = tau
    self.batch_size = batch_size
    self.optimizer = tf.keras.optimizers.Adam(self.learning_rate)

    #actor network
    self.inputs, self.out, self.scaled_out = self.create_actor_network()
    self.actor_model = keras.Model(inputs=self.inputs, outputs=self.scaled_out, name='actor_network')
    self.network_params = self.actor_model.trainable_variables

    #target actor network
    self.target_inputs, self.target_out, self.target_scaled_out = self.create_actor_network()
    self.target_actor_model = keras.Model(inputs=self.target_inputs, outputs=self.target_scaled_out, name='target_actor_network')
    self.target_network_params = self.target_actor_model.trainable_variables


  def create_actor_network(self):
    inputs = Input(shape = (self.state_dim,), batch_size = None, name = "actor_input_state")

    net = layers.Dense(400, name = 'actor_dense_1a')(inputs)
    net = layers.BatchNormalization()(net)
    net = layers.Activation(activation=tf.nn.relu)(net)

    net = layers.Dense(300, name = 'actor_dense_1b')(net)
    net = layers.BatchNormalization()(net)
    net = layers.Activation(activation=tf.nn.relu)(net)

    # net = layers.Dense(20, name = 'actor_dense_1c')(net)
    # net = layers.BatchNormalization()(net)
    # net = layers.Activation(activation=tf.nn.relu)(net)

    # net = layers.Dense(10, name = 'actor_dense_1d')(net)
    # net = layers.BatchNormalization()(net)
    # net = layers.Activation(activation=tf.nn.relu)(net)
    
    w_init = tf.random_uniform_initializer(minval=-0.003, maxval=0.003, seed=None)
    out = layers.Dense(self.action_dim, activation='tanh', name = 'actor_dense_2', kernel_initializer = w_init)(net)
    scaled_out = tf.multiply(out, self.action_bound, name = "actions_scaling")
    return inputs, out, scaled_out
  
  def update_target_network(self):
    self.update_target_network_params = [self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) + tf.multiply(self.target_network_params[i], 1-self.tau)) for i in range(len(self.target_network_params))]
  
  def train(self, inputs, a_gradient):
    with tf.GradientTape() as self.tape:
      self.prediction = self.actor_model(inputs)
    self.unnormalized_actor_gradients = self.tape.gradient(self.prediction, self.network_params, output_gradients = -a_gradient)
    self.actor_gradients = list(map(lambda x: tf.math.divide(x, self.batch_size), self.unnormalized_actor_gradients))
    self.optimizer.apply_gradients(zip(self.actor_gradients, self.network_params))
    
  def predict(self, inputs):
    return self.actor_model(inputs)

  def predict_target(self, inputs):
    return self.target_actor_model(inputs)