Python 使用tf.keras.layer.RNN时tensorflow 2.0中的类型错误

Python 使用tf.keras.layer.RNN时tensorflow 2.0中的类型错误,python,tensorflow,keras,recurrent-neural-network,Python,Tensorflow,Keras,Recurrent Neural Network,我发现出现错误是因为tensorflow.dynamic\u rnn(已弃用)和keras.layers.LSTM的返回格式不同。 因此,我几乎修改了所有代码,而且效果很好。 无论如何,谢谢你的评论:) 我对tensorflow一无所知。所以我在学习Nishant Shukla写的书。由于tensorflow的版本已更改,一些代码无法工作。 下面是代码。我如何解决这个问题 import numpy as np import tensorflow import tensorflow.compat

我发现出现错误是因为tensorflow.dynamic\u rnn(已弃用)和keras.layers.LSTM的返回格式不同。 因此,我几乎修改了所有代码,而且效果很好。 无论如何,谢谢你的评论:)


我对tensorflow一无所知。所以我在学习Nishant Shukla写的书。由于tensorflow的版本已更改,一些代码无法工作。 下面是代码。我如何解决这个问题

import numpy as np
import tensorflow
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()

class SeriesPredictor:
    def __init__(self,input_dim=1, seq_size=4, hidden_dim =10):
        self.input_dim = input_dim
        self.seq_size = seq_size
        self.hidden_dim = hidden_dim
        self.W_out = tf.Variable(tf.random_normal([hidden_dim,1]), name='W_out')
        self.b_out = tf.Variable(tf.random_normal([1]), name='b_out')
        self.x = tf.placeholder(tf.float32, [None, seq_size, input_dim])
        self.y = tf.placeholder(tf.float32, [None, seq_size])

        self.cost = tf.reduce_mean(tf.square(self.model()-self.y))
        self.train_op = tf.train.AdamOptimizer().minimizer(self.cost)

        self.saver = tf.train.Saver()

    def model(self):
        cell = tf.keras.layers.LSTMCell(self.hidden_dim)
        outputs, states = tf.keras.layers.RNN(cell, self.x, dtype=tf.float32)
        #since some codes was changed between tf v1 and v2, inevitably add keras code
        num_examples = tf.shape(self.x)[0]
        W_repeated = tf.tile(tf.expand_dims(self.W_out,0),[num_examples,1,1])
        out = tf.matmul(outputs,W_repeated)+self.b_out
        out = tf.squeeze(out)
        return out

    def train(self, train_x, train_y):
        with tf.Session() as sess:
            tf.get_variable_scope().reuse_variables()
            sess.run(tf.global_varaibles_initializer())
            for i in range(1000):
                _, mse = sess.run([self.train_op, self.cost], feed_dict={self.x:train_x,self.y:train_y})
                if i % 1000 == 0:
                    print(i, mse)
            save_path = self.saver.save(sess,'./model.ckpt')
            print('Model saved to {}'.format(save_path))

    def test(self,test_x):
        with tf.Session() as sess:
            tf.get_variable_scope().reuse_variables()
            self.saver.restore(sess,'./model.ckpt')
            output = sess.run(self.model(), feed_dict={self.x:test_x})
            print(output)

if __name__ == '__main__':
    predictor = SeriesPredictor(input_dim = 1, seq_size = 4, hidden_dim = 10)
    train_x = [[[1],[2],[5],[6]],
               [[5],[7],[7],[8]],
                [[3],[4],[5],[7]]]
    train_y = [[1,3,7,11],
               [5,12,14,15],
               [3,7,9,12]]
    predictor.train(train_x=train_x, train_y=train_y)
    test_x = [[[1],[2],[3],[4]],
              [[4],[5],[6],[7]]]
    actual_y = [[[1],[3],[5],[7]],
                [[4],[9],[11],[13]]]
    pred_y = predictor.test(test_x)
    print(pred_y)

这是为什么版本的TF编写的?您是否可以更明确地标记您自己添加的行或分别发布您的行和它们的行?您没有向RNN层提供任何输入,这就是它出错的原因。
import numpy as np
import tensorflow
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()

class SeriesPredictor:
    def __init__(self,input_dim=1, seq_size=4, hidden_dim =10):
        self.input_dim = input_dim
        self.seq_size = seq_size
        self.hidden_dim = hidden_dim
        self.W_out = tf.Variable(tf.random_normal([hidden_dim,1]), name='W_out')
        self.b_out = tf.Variable(tf.random_normal([1]), name='b_out')
        self.x = tf.placeholder(tf.float32, [None, seq_size, input_dim])
        self.y = tf.placeholder(tf.float32, [None, seq_size])

        self.cost = tf.reduce_mean(tf.square(self.model()-self.y))
        self.train_op = tf.train.AdamOptimizer().minimizer(self.cost)

        self.saver = tf.train.Saver()

    def model(self):
        cell = tf.keras.layers.LSTMCell(self.hidden_dim)
        outputs, states = tf.keras.layers.RNN(cell, self.x, dtype=tf.float32)
        #since some codes was changed between tf v1 and v2, inevitably add keras code
        num_examples = tf.shape(self.x)[0]
        W_repeated = tf.tile(tf.expand_dims(self.W_out,0),[num_examples,1,1])
        out = tf.matmul(outputs,W_repeated)+self.b_out
        out = tf.squeeze(out)
        return out

    def train(self, train_x, train_y):
        with tf.Session() as sess:
            tf.get_variable_scope().reuse_variables()
            sess.run(tf.global_varaibles_initializer())
            for i in range(1000):
                _, mse = sess.run([self.train_op, self.cost], feed_dict={self.x:train_x,self.y:train_y})
                if i % 1000 == 0:
                    print(i, mse)
            save_path = self.saver.save(sess,'./model.ckpt')
            print('Model saved to {}'.format(save_path))

    def test(self,test_x):
        with tf.Session() as sess:
            tf.get_variable_scope().reuse_variables()
            self.saver.restore(sess,'./model.ckpt')
            output = sess.run(self.model(), feed_dict={self.x:test_x})
            print(output)

if __name__ == '__main__':
    predictor = SeriesPredictor(input_dim = 1, seq_size = 4, hidden_dim = 10)
    train_x = [[[1],[2],[5],[6]],
               [[5],[7],[7],[8]],
                [[3],[4],[5],[7]]]
    train_y = [[1,3,7,11],
               [5,12,14,15],
               [3,7,9,12]]
    predictor.train(train_x=train_x, train_y=train_y)
    test_x = [[[1],[2],[3],[4]],
              [[4],[5],[6],[7]]]
    actual_y = [[[1],[3],[5],[7]],
                [[4],[9],[11],[13]]]
    pred_y = predictor.test(test_x)
    print(pred_y)