Tensorflow ValueError:Python函数输入的结构与输入\u签名不匹配

Tensorflow ValueError:Python函数输入的结构与输入\u签名不匹配,tensorflow,tensorflow2.0,tensorflow-lite,Tensorflow,Tensorflow2.0,Tensorflow Lite,系统信息 操作系统平台和发行版:CentOS Linux 7.7.1908版 -TensorFlow版本:2.3.0 我尝试转换为TFLite模型 我尝试将tf.keras.Model的编码器和解码器模型转换如下: import tensorflow as tf embedding_dim = 256 units = 512 top_k = 5000 vocab_size = top_k + 1 features_shape = 2048 attention_features_shape

系统信息

  • 操作系统平台和发行版:CentOS Linux 7.7.1908版 -TensorFlow版本:2.3.0
我尝试转换为TFLite模型

我尝试将
tf.keras.Model
的编码器和解码器模型转换如下:

import tensorflow as tf

embedding_dim = 256
units = 512
top_k = 5000
vocab_size = top_k + 1
features_shape = 2048
attention_features_shape = 64

class BahdanauAttention(tf.keras.Model):
    def __init__(self, utils):
        super(BahdanauAttention, self).__init__()
        self.W1 = tf.keras.layers.Dense(utils)
        self.W2 = tf.keras.layers.Dense(utils)
        self.V = tf.keras.layers.Dense(1)
    def call(self, features, hidden):
        # features(CNN_encoder output) shape == (batch_size, 64, embedding_dim)

        # hidden shape == (batch_size, hidden_size)
        # hidden_with_time_axis shape == (batch_size, 1, hidden_size)
        hidden_with_time_axis_shape = tf.expand_dims(hidden, 1)

        # score shape == (batch_size, 64, hidden_size)
        score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis_shape))

        # attention_weights shape == (batch_size, 64, 1)
        # you get 1 at the last axis because you are applying score to self.V
        attention_weights = tf.nn.softmax(self.V(score), axis=1)

        # context_vector shape after sum == (batch_size, hidden_size)
        context_vector = attention_weights * features
        context_vector = tf.reduce_sum(context_vector, axis=1)

        return context_vector, attention_weights

class CNN_Encoder(tf.keras.Model):
    #由于您已经提取了特征并使用pickle进行了转储
    #该编码器通过完全连接的层传递这些特征
    def __init__(self, embedding):
        super(CNN_Encoder, self).__init__()
        # shape after fc == (batch_size, 64, embedding_dim)
        self.fc = tf.keras.layers.Dense(embedding_dim)

    # @tf.function(input_signature=[tf.TensorSpec(shape=(1, 64, features_shape),dtype=tf.float32)])
    @tf.function
    def call(self, x):
        x = self.fc(x)
        x = tf.nn.relu(x)
        return x

class RNN_Decoder(tf.keras.Model):
    def __init__(self, embedding_dim, units, vocab_size):
        super(RNN_Decoder, self).__init__()
        self.units = units

        self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
        self.gru = tf.keras.layers.GRU(self.units,
                                       return_sequences=True,
                                       return_state=True,
                                       recurrent_initializer='glorot_uniform',
                                       unroll = True)
        self.fc1 = tf.keras.layers.Dense(self.units)
        self.fc2 = tf.keras.layers.Dense(vocab_size)

        self.attention = BahdanauAttention(self.units)


    @tf.function(input_signature=[tf.TensorSpec(shape=[1, 1], dtype=tf.int32, name='x'),
                                  tf.TensorSpec(shape=[1, 64, 256], dtype=tf.float32, name='feature'),
                                  tf.TensorSpec(shape=[1, 512], dtype=tf.float32, name='hidden')])
    @tf.function
    def call(self, x , features, hidden):
        #将注意力定义为一个单独的模型
        context_vector, attention_weights = self.attention(features, hidden)

        #x shape after passing through embedding == (batch_size, 1, embedding_dim)
        x = self.embedding(x)

        #x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
        x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)

        #将concated后的的向量传递给GRU
        output, state = self.gru(x)

        #shape == (batch_size, max_length, hidden_size)
        x = self.fc1(output)

        #x shape == (batch_size, max_length, hidden_size)
        x = tf.reshape(x, (-1, x.shape[2]))

        # output shape == (batch_size * max_length, vocab)
        x = self.fc2(x)

        return x, state, attention_weights

    def reset_states(self, batch_size):
        return tf.zeros((batch_size, self.units))

encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)`

encoder._set_inputs(tf.TensorSpec(shape=(1, 64, features_shape),dtype=tf.float32))
decoder._set_inputs([tf.TensorSpec(shape=[1, 1], dtype=tf.int32, name='x'),
                                  tf.TensorSpec(shape=[1, 64, 256], dtype=tf.float32, name='feature'),
                                  tf.TensorSpec(shape=[1, 512], dtype=tf.float32, name='hidden')])


encoder_converter = tf.lite.TFLiteConverter.from_keras_model(encoder)
decoder_converter = tf.lite.TFLiteConverter.from_keras_model(decoder)

encoder_model = encoder_converter.convert()
decoder_model = decoder_converter.convert()

open("encoder_model.tflite", "wb").write(encoder_model)
open("decoder_model.tflite", "wb").write(decoder_model)
错误消息是

ValueError: Structure of Python function inputs does not match input_signature:
  inputs: (
    [<tf.Tensor 'x:0' shape=(1, 1) dtype=int32>, <tf.Tensor 'feature:0' shape=(1, 64, 256) dtype=float32>, <tf.Tensor 'hidden:0' shape=(1, 512) dtype=float32>])
  input_signature: (
    TensorSpec(shape=(1, 1), dtype=tf.int32, name='x'),
    TensorSpec(shape=(1, 64, 256), dtype=tf.float32, name='feature'),
    TensorSpec(shape=(1, 512), dtype=tf.float32, name='hidden'))
ValueError:Python函数输入的结构与输入\u签名不匹配:
投入:(
[, ])
输入\u签名:(
TensorSpec(shape=(1,1),dtype=tf.int32,name='x'),
TensorSpec(shape=(1,64,256),dtype=tf.float32,name='feature'),
TensorSpec(shape=(1512),dtype=tf.float32,name='hidden'))
我认为函数输入与输入签名相同。如何解决此问题?

看起来像是问题