Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
ValueError:Python输入与输入\u签名不兼容:_Python_Tensorflow_Keras_Tensorflow2.0_Tensorflow Lite - Fatal编程技术网

ValueError:Python输入与输入\u签名不兼容:

ValueError:Python输入与输入\u签名不兼容:,python,tensorflow,keras,tensorflow2.0,tensorflow-lite,Python,Tensorflow,Keras,Tensorflow2.0,Tensorflow Lite,系统信息 操作系统平台和发行版:CentOS Linux 7.7.1908版 -TensorFlow版本:2.3.0 我举这个例子: 它按它应该的方式工作并保存检查点,现在我想将其转换为TF Lite模型 以下是完整转换代码的链接: 以下是完整列车代码的链接: 我也遵循 下面是我正在运行的内容,以保存并转换推理图: @tf.function def evaluate(image): hidden = decoder.reset_states(batch_size=1) t

系统信息

  • 操作系统平台和发行版:CentOS Linux 7.7.1908版 -TensorFlow版本:2.3.0
我举这个例子:

它按它应该的方式工作并保存检查点,现在我想将其转换为TF Lite模型

以下是完整转换代码的链接:

以下是完整列车代码的链接:

我也遵循

下面是我正在运行的内容,以保存并转换推理图:

@tf.function
def evaluate(image):
    hidden = decoder.reset_states(batch_size=1)

    temp_input = tf.expand_dims(load_image(image)[0], 0)
    img_tensor_val = image_features_extract_model(temp_input)
    img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))

    features = encoder(img_tensor_val)

    dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
    result = []

    for i in range(max_length):
        predictions, hidden, attention_weights = decoder(dec_input, features, hidden)

        predicted_id = tf.random.categorical(predictions, 1)[0][0]
        # print(tokenizer.index_word)
        print(predicted_id,predicted_id.dtype)

        # for key,value in tokenizer.index_word.items():
        #     key = tf.convert_to_tensor(key)
        #     tf.dtypes.cast(key,tf.int64)
        #     print(key)

        # print(tokenizer.index_word)

        result.append(predicted_id)

        # if tokenizer.index_word[predicted_id] == '<end>':
        #     return result

        dec_input = tf.expand_dims([predicted_id], 0)

    return result

export_dir = "./"
tflite_enc_input = ''
ckpt.f = evaluate
to_save = evaluate.get_concrete_function('')

converter = tf.lite.TFLiteConverter.from_concrete_functions([to_save])
tflite_model = converter.convert()
编码器型号:

class CNN_Encoder(tf.keras.Model):
    def __init__(self, embedding):
        super(CNN_Encoder, self).__init__()
        # shape after fc == (batch_size, 64, embedding_dim)
        self.fc = tf.keras.layers.Dense(embedding_dim)

    @tf.function(input_signature=[tf.TensorSpec(shape=(1, 64, features_shape),dtype=tf.dtypes.float32)])
    def call(self, x):
        x = self.fc(x)
        x = tf.nn.relu(x)
        return x
解码器型号:

class RNN_Decoder(tf.keras.Model):
    def __init__(self, embedding_dim, units, vocab_size):
        super(RNN_Decoder, self).__init__()
        self.units = units

        self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
        self.gru = tf.keras.layers.GRU(self.units,
                                       return_sequences=True,
                                       return_state=True,
                                       recurrent_initializer='glorot_uniform',
                                       unroll = True)
        self.fc1 = tf.keras.layers.Dense(self.units)
        self.fc2 = tf.keras.layers.Dense(vocab_size)

        self.attention = BahdanauAttention(self.units)


    @tf.function(input_signature=[tf.TensorSpec(shape=[1, 1], dtype=tf.int64),
                                  tf.TensorSpec(shape=[1, 64, 256], dtype=tf.float32),
                                  tf.TensorSpec(shape=[1, 512], dtype=tf.float32)])
    def call(self, x , features, hidden):

        context_vector, attention_weights = self.attention(features, hidden)

        #x shape after passing through embedding == (batch_size, 1, embedding_dim)
        x = self.embedding(x)

        #x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
        x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)


        output, state = self.gru(x)

        #shape == (batch_size, max_length, hidden_size)
        x = self.fc1(output)

        #x shape == (batch_size, max_length, hidden_size)
        x = tf.reshape(x, (-1, x.shape[2]))

        # output shape == (batch_size * max_length, vocab)
        x = self.fc2(x)

        return x, state, attention_weights

    def reset_states(self, batch_size):
        return tf.zeros((batch_size, self.units))
我只是将tf.function更改为int32,如下所示:

@tf.function(输入签名=[tf.TensorSpec(shape=[1,1],dtype=tf.int32),tf.TensorSpec(shape=[164256],dtype=tf.float32),tf.TensorSpec(shape=[1,512],dtype=tf.float32)])

但另一个错误出现了:

ValueError:Python输入与输入\u签名不兼容:

Tensor("ExpandDims_2:0", shape=(1, 1), dtype=int64),
Tensor("cnn__encoder/StatefulPartitionedCall:0", shape=(1, 64, 256), dtype=float32),
Tensor("rnn__decoder/StatefulPartitionedCall:1", shape=(1, 512), dtype=float32))
input_signature: (
TensorSpec(shape=(1, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(1, 64, 256), dtype=tf.float32, name=None),
TensorSpec(shape=(1, 512), dtype=tf.float32, name=None))```
Why the dtypes of inputs change from int64 to int32?
Tensor("ExpandDims_2:0", shape=(1, 1), dtype=int64),
Tensor("cnn__encoder/StatefulPartitionedCall:0", shape=(1, 64, 256), dtype=float32),
Tensor("rnn__decoder/StatefulPartitionedCall:1", shape=(1, 512), dtype=float32))
input_signature: (
TensorSpec(shape=(1, 1), dtype=tf.int32, name=None),
TensorSpec(shape=(1, 64, 256), dtype=tf.float32, name=None),
TensorSpec(shape=(1, 512), dtype=tf.float32, name=None))```
Why the dtypes of inputs change from int64 to int32?