Python TensorFlow-InvalidArgumentError:必须为占位符提供一个值

Python TensorFlow-InvalidArgumentError:必须为占位符提供一个值,python,tensorflow,Python,Tensorflow,编辑:我特别确定以下行是错误的来源 unstacked_observation_outputs = tf.nn.relu(tf.nn.xw_plus_b(unstacked_observation_outputs[ tf.reduce_max(length(observation_history)).eval(session = self.sess) - 1], weights['second_observation'], biases[

编辑:我特别确定以下行是错误的来源

unstacked_observation_outputs = tf.nn.relu(tf.nn.xw_plus_b(unstacked_observation_outputs[
            tf.reduce_max(length(observation_history)).eval(session = self.sess) - 1], 
                weights['second_observation'], biases['second_observation']))
特别是,在
tf.reduce_max(长度(观察历史))中应用
.eval()
。eval(session=self.sess)-1
就是问题所在

我有两门非常相似的课。一个名为Actor(),另一个名为critist()。当我实例化一个actor对象时,如下所示:

class Actor(object):

def __init__(self, sess, observation_dimension, action_dimension, learning_rate, tau):

    self.sess = sess
    self.observation_dimension = observation_dimension
    self.action_dimension = action_dimension
    self.learning_rate = learning_rate
    self.tau = tau
    self.lstm_cell = rnn.BasicLSTMCell(lstm_hidden_units)

    with tf.variable_scope('lstm') as scope:
        self.observation_history, self.action_history = self.depthless_lstm()
        self.network_parameters = tf.trainable_variables()
        scope.reuse_variables()
        self.target_observation_history, self.target_action_history = self.depthless_lstm()
        self.target_network_parameters = tf.trainable_variables()[len(self.network_parameters):]

    def depthless_lstm(self):
        state = tf.placeholder(tf.float32, [None, 3600, self.observation_dimension])

        weights = {
        '0': tf.get_variable('weights_0', [lstm_hidden_units, hidden_units_01], initializer = tf.random_normal_initializer()),
        '1': tf.get_variable('weights_1', [hidden_units_01, 1], initializer = tf.random_normal_initializer())
        }
        biases = {
        '0': tf.get_variable('biases_0', [hidden_units_01], initializer = tf.random_normal_initializer()),
        '1': tf.get_variable('biases_1', [1], initializer = tf.random_normal_initializer())
        }

        lstm_outputs, learned_state = tf.nn.dynamic_rnn(self.lstm_cell, state, dtype = tf.float32)

        unstacked_lstm_outputs = tf.unstack(lstm_outputs, axis = 1)

        list_of_hidden_01_outputs = [tf.nn.relu(tf.nn.xw_plus_b(matrix, weights['0'], biases['0'])) 
        for matrix in unstacked_lstm_outputs]

        actions = [tf.nn.softmax(tf.nn.xw_plus_b(matrix, weights['1'], biases['1'])) 
        for matrix in list_of_hidden_01_outputs]

        actions = [actions[n] for n in range(len(actions)) if (n % frequency) == 1]

        return state, actions

actor = Actor(sess, observation_dimension, action_dimension, learning_rate, tau)
class Critic(object):

def __init__(self, sess, observation_dimension, action_dimension, learning_rate, tau, 
    model, number_actor_variables):

    self.sess = sess
    self.observation_dimension = observation_dimension
    self.action_dimension = action_dimension
    self.learning_rate = learning_rate
    self.tau = tau

    self.lstm_observation_cell = rnn.BasicLSTMCell(lstm_observation_units)

    with tf.variable_scope('next_lstm') as scope:
        self.observation_history, self.action, self.critique = self.depthless_lstm_critic()
        self.network_parameters = tf.trainable_variables()[number_actor_variables:]
        scope.reuse_variables()
        self.target_observation_history, self.target_action, self.target_critique = self.depthless_lstm_critic()
        self.target_network_parameters = tf.trainable_variables()[(len(self.network_parameters) 
            + number_actor_variables):]

    def depthless_lstm_critic(self):
        observation_history = tf.placeholder(tf.float32, [None, 3600, self.observation_dimension])
        action = tf.placeholder(tf.float32, [None, self.action_dimension])

        weights = {
        'first_observation': tf.get_variable('first_observation', [self.observation_dimension, observation_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'first_action': tf.get_variable('first_action', [self.action_dimension, action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'second_observation': tf.get_variable('second_observation', [lstm_observation_units, observation_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'second_action': tf.get_variable('second_action', [action_hidden_units01, action_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'combined_observation': tf.get_variable('combined_observation', [observation_hidden_units02, combined_units], 
        initializer = tf.random_normal_initializer()),
        'combined_action': tf.get_variable('combined_action', [action_hidden_units02, combined_units], 
        initializer = tf.random_normal_initializer()),
        'final': tf.get_variable('final', [combined_units, 1], 
        initializer = tf.random_normal_initializer())
        }
        biases = {
        'first_observation': tf.get_variable('_first_observation', [observation_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'first_action': tf.get_variable('_first_action', [action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'second_observation': tf.get_variable('_second_observation', [observation_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'second_action': tf.get_variable('_second_action', [action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'combined': tf.get_variable('_combined', [combined_units], 
        initializer = tf.random_normal_initializer()),
        'final': tf.get_variable('_final', [1], 
        initializer = tf.random_normal_initializer())
        }

        unstacked_observation_history = tf.unstack(observation_history, axis = 1)

        unstacked_observation_history = [tf.nn.relu(tf.nn.xw_plus_b(vector, 
        weights['first_observation'], biases['first_observation'])) 
            for vector in unstacked_observation_history]

        learned_action = tf.nn.relu(tf.nn.xw_plus_b(action, weights['first_action'], biases['first_action']))

        stacked_observation_history = tf.stack(unstacked_observation_history, axis = 1)

        lstm_observation_outputs, observation_states = tf.nn.dynamic_rnn(self.lstm_observation_cell, 
        stacked_observation_history, dtype = tf.float32, 
            sequence_length = length(stacked_observation_history))

        unstacked_observation_outputs = tf.unstack(lstm_observation_outputs, axis = 1)

        unstacked_observation_outputs = tf.nn.relu(tf.nn.xw_plus_b(unstacked_observation_outputs[
            tf.reduce_max(length(observation_history)).eval(session = self.sess) - 1], 
                weights['second_observation'], biases['second_observation']))

        learned_action = tf.nn.relu(tf.nn.xw_plus_b(learned_action, weights['second_action'], 
        biases['second_action']))

        combined_results = tf.nn.relu(tf.matmul(weights['combined_observation'], 
        unstacked_observation_outputs) + tf.nn.xw_plus_b(learned_action, weights['combined_action'], 
            biases['combined']))

        critique = tf.nn.xw_plus_b(combined_results, weights['final'], biases['final'])

        return observation_history, action, critique

critic = Critic(sess, observation_dimension, action_dimension, learning_rate, tau, 
    model, number_actor_variables)
我没有收到任何错误。但是,当我实例化一个批评家对象时,如下所示:

class Actor(object):

def __init__(self, sess, observation_dimension, action_dimension, learning_rate, tau):

    self.sess = sess
    self.observation_dimension = observation_dimension
    self.action_dimension = action_dimension
    self.learning_rate = learning_rate
    self.tau = tau
    self.lstm_cell = rnn.BasicLSTMCell(lstm_hidden_units)

    with tf.variable_scope('lstm') as scope:
        self.observation_history, self.action_history = self.depthless_lstm()
        self.network_parameters = tf.trainable_variables()
        scope.reuse_variables()
        self.target_observation_history, self.target_action_history = self.depthless_lstm()
        self.target_network_parameters = tf.trainable_variables()[len(self.network_parameters):]

    def depthless_lstm(self):
        state = tf.placeholder(tf.float32, [None, 3600, self.observation_dimension])

        weights = {
        '0': tf.get_variable('weights_0', [lstm_hidden_units, hidden_units_01], initializer = tf.random_normal_initializer()),
        '1': tf.get_variable('weights_1', [hidden_units_01, 1], initializer = tf.random_normal_initializer())
        }
        biases = {
        '0': tf.get_variable('biases_0', [hidden_units_01], initializer = tf.random_normal_initializer()),
        '1': tf.get_variable('biases_1', [1], initializer = tf.random_normal_initializer())
        }

        lstm_outputs, learned_state = tf.nn.dynamic_rnn(self.lstm_cell, state, dtype = tf.float32)

        unstacked_lstm_outputs = tf.unstack(lstm_outputs, axis = 1)

        list_of_hidden_01_outputs = [tf.nn.relu(tf.nn.xw_plus_b(matrix, weights['0'], biases['0'])) 
        for matrix in unstacked_lstm_outputs]

        actions = [tf.nn.softmax(tf.nn.xw_plus_b(matrix, weights['1'], biases['1'])) 
        for matrix in list_of_hidden_01_outputs]

        actions = [actions[n] for n in range(len(actions)) if (n % frequency) == 1]

        return state, actions

actor = Actor(sess, observation_dimension, action_dimension, learning_rate, tau)
class Critic(object):

def __init__(self, sess, observation_dimension, action_dimension, learning_rate, tau, 
    model, number_actor_variables):

    self.sess = sess
    self.observation_dimension = observation_dimension
    self.action_dimension = action_dimension
    self.learning_rate = learning_rate
    self.tau = tau

    self.lstm_observation_cell = rnn.BasicLSTMCell(lstm_observation_units)

    with tf.variable_scope('next_lstm') as scope:
        self.observation_history, self.action, self.critique = self.depthless_lstm_critic()
        self.network_parameters = tf.trainable_variables()[number_actor_variables:]
        scope.reuse_variables()
        self.target_observation_history, self.target_action, self.target_critique = self.depthless_lstm_critic()
        self.target_network_parameters = tf.trainable_variables()[(len(self.network_parameters) 
            + number_actor_variables):]

    def depthless_lstm_critic(self):
        observation_history = tf.placeholder(tf.float32, [None, 3600, self.observation_dimension])
        action = tf.placeholder(tf.float32, [None, self.action_dimension])

        weights = {
        'first_observation': tf.get_variable('first_observation', [self.observation_dimension, observation_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'first_action': tf.get_variable('first_action', [self.action_dimension, action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'second_observation': tf.get_variable('second_observation', [lstm_observation_units, observation_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'second_action': tf.get_variable('second_action', [action_hidden_units01, action_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'combined_observation': tf.get_variable('combined_observation', [observation_hidden_units02, combined_units], 
        initializer = tf.random_normal_initializer()),
        'combined_action': tf.get_variable('combined_action', [action_hidden_units02, combined_units], 
        initializer = tf.random_normal_initializer()),
        'final': tf.get_variable('final', [combined_units, 1], 
        initializer = tf.random_normal_initializer())
        }
        biases = {
        'first_observation': tf.get_variable('_first_observation', [observation_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'first_action': tf.get_variable('_first_action', [action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'second_observation': tf.get_variable('_second_observation', [observation_hidden_units02], 
        initializer = tf.random_normal_initializer()),
        'second_action': tf.get_variable('_second_action', [action_hidden_units01], 
        initializer = tf.random_normal_initializer()),
        'combined': tf.get_variable('_combined', [combined_units], 
        initializer = tf.random_normal_initializer()),
        'final': tf.get_variable('_final', [1], 
        initializer = tf.random_normal_initializer())
        }

        unstacked_observation_history = tf.unstack(observation_history, axis = 1)

        unstacked_observation_history = [tf.nn.relu(tf.nn.xw_plus_b(vector, 
        weights['first_observation'], biases['first_observation'])) 
            for vector in unstacked_observation_history]

        learned_action = tf.nn.relu(tf.nn.xw_plus_b(action, weights['first_action'], biases['first_action']))

        stacked_observation_history = tf.stack(unstacked_observation_history, axis = 1)

        lstm_observation_outputs, observation_states = tf.nn.dynamic_rnn(self.lstm_observation_cell, 
        stacked_observation_history, dtype = tf.float32, 
            sequence_length = length(stacked_observation_history))

        unstacked_observation_outputs = tf.unstack(lstm_observation_outputs, axis = 1)

        unstacked_observation_outputs = tf.nn.relu(tf.nn.xw_plus_b(unstacked_observation_outputs[
            tf.reduce_max(length(observation_history)).eval(session = self.sess) - 1], 
                weights['second_observation'], biases['second_observation']))

        learned_action = tf.nn.relu(tf.nn.xw_plus_b(learned_action, weights['second_action'], 
        biases['second_action']))

        combined_results = tf.nn.relu(tf.matmul(weights['combined_observation'], 
        unstacked_observation_outputs) + tf.nn.xw_plus_b(learned_action, weights['combined_action'], 
            biases['combined']))

        critique = tf.nn.xw_plus_b(combined_results, weights['final'], biases['final'])

        return observation_history, action, critique

critic = Critic(sess, observation_dimension, action_dimension, learning_rate, tau, 
    model, number_actor_variables)
我收到以下错误消息:

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'next_lstm/Placeholder' with dtype float
[[Node: next_lstm/Placeholder = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
和回溯

Caused by op 'next_lstm/Placeholder', defined at:
File "test_critic.py", line 89, in <module>
model, number_actor_variables)
File "test_critic.py", line 15, in __init__
self.observation_history, self.action, self.critique = 
self.depthless_lstm_critic()
File "test_critic.py", line 23, in depthless_lstm_critic
observation_history = tf.placeholder(tf.float32, [None, 3600, 
self.observation_dimension])
由op“下一个/占位符”引起,定义于:
文件“test_critic.py”,第89行,在
模型、数量(参与者变量)
文件“test\u critic.py”,第15行,在\uu init中__
自我观察\历史、自我行动、自我批评=
无我的批评家()
文件“test\u critic.py”,第23行,在depthless\lstm\u critic中
观察历史=tf.占位符(tf.float32,[None,3600,
自我观察(维度])

回溯表明问题出在占位符
观察历史记录上。但这部分代码似乎与我为Actor()类编写的代码相同,该类不会生成错误消息

观察\u历史
是一个占位符,这意味着在构建图形并等待一些实际值时,它是图形中的一个空节点。所以您需要将一些值传递给这个占位符

Tensor.eval({self.placeholderA : someValueA, self.placeholderB : someValueB ...}, self.sess)

在您的情况下,您需要将一些实际值传递给
观察\u历史

是否有人能告诉我为什么这个问题被标记下来,以便我可以在将来更改提问方式?因为您的代码格式不正确,您如何为没有缩进的类定义
\uuuuu init\uuuu
?而且,你发布了太多的代码,似乎没有抓住关键点。@Sraw哦,谢谢你注意到这一点。缩进在我的代码中是正确的,但是当我把它复制过来时,它变得混乱了。下次我会发布更少的代码。我以为人们都想看,但我能看出这是多么的难懂。