Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/312.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 如何连接编码器和解码器_Python_Tensorflow_Keras_Tensorflow2.0_Tf.keras - Fatal编程技术网

Python 如何连接编码器和解码器

Python 如何连接编码器和解码器,python,tensorflow,keras,tensorflow2.0,tf.keras,Python,Tensorflow,Keras,Tensorflow2.0,Tf.keras,我构建了以下编码器-解码器架构,编码器和解码器分别工作良好: from tensorflow.keras.layers import LSTM, Input, Reshape, Lambda from tensorflow.keras.models import Model from tensorflow.keras import backend as K WORD_TO_INDEX = {"foo": 0, "bar": 1} MAX_QUERY_W

我构建了以下编码器-解码器架构,编码器和解码器分别工作良好:

from tensorflow.keras.layers import LSTM, Input, Reshape, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K

WORD_TO_INDEX = {"foo": 0, "bar": 1}

MAX_QUERY_WORD_COUNT = 10
QUERY_ENCODING_SIZE = 15

# ENCODER
query_encoder_input = Input(shape=(None, len(WORD_TO_INDEX)), name="query_encoder_input")
query_encoder_output = LSTM(QUERY_ENCODING_SIZE, name="query_encoder_lstm")(query_encoder_input)
query_encoder = Model(inputs=query_encoder_input, outputs=query_encoder_output)

# DECODER
query_decoder_input = Input(shape=(QUERY_ENCODING_SIZE,), name="query_decoder_input")
query_decoder_reshape = Reshape((1, QUERY_ENCODING_SIZE), name="query_decoder_reshape")(query_decoder_input)
query_decoder_lstm = LSTM(QUERY_ENCODING_SIZE, name="query_decoder_lstm", return_sequences=True, return_state=True)
recurrent_input, state_h, state_c = query_decoder_lstm(query_decoder_reshape)
states = [state_h, state_c]
query_decoder_outputs = []
for _ in range(MAX_QUERY_WORD_COUNT):
    recurrent_input, state_h, state_c = query_decoder_lstm(recurrent_input, initial_state=states)
    query_decoder_outputs.append(recurrent_input)
    states = [state_h, state_c]
query_decoder_output = Lambda(lambda x: K.concatenate(x, axis=1), name="query_decoder_concat")(query_decoder_outputs)
query_decoder = Model(inputs=query_decoder_input, outputs=query_decoder_output)
但当我试图将它们结合在一起创建一个自动编码器时,我得到了一个奇怪的错误,我不知道为什么

# AUTOENCODER
# apply the reshape layer to the output of the encoder
query_autoencoder_output = query_decoder.layers[1](query_encoder_output)
# rebuild the autoencoder by applying each layer of the decoder to the output of the encoder
for decoder_layer in query_decoder.layers[2:]:
    # this fails and I don't know why
    query_autoencoder_output = decoder_layer(query_autoencoder_output)
# the code never gets here
query_autoencoder = Model(inputs=query_encoder_input, outputs=query_autoencoder_output)
这将抛出错误:

ValueError:形状必须是秩3,但对于{{节点,它是秩2 查询\u解码器\u concat/concat\u 1}=ConcatV2[N=3,T=DT\u FLOAT, Tidx=DT_INT32](查询解码器\u lstm/PartitionedCall \u 11:1, 查询\u解码器\u lstm/PartitionedCall \u 11:2, 查询解码器lstm/PartitionedCall 11:3, 查询带有输入形状的【?,1,15】, [?,15],?,15],]

是我用于解码器的模板。(请参阅“如果我不想使用教师强制进行培训怎么办?”部分。)

我依靠(特别是最后一个)找出如何将模型组合在一起


这个错误是什么意思?我如何修复它?

您可以将模型视为一个图层*本质上*。有了自动编码器,它将像这样简单:

autoencoder=顺序([编码器,解码器])

您可以将模型视为图层*,本质上*。有了自动编码器,它将像这样简单:

autoencoder=顺序([编码器,解码器])

如果您想要一些额外的灵活性,可以将
tf.keras.Model子类化

class AutoEncoder(tf.keras.Model):
    def __init__(self, encoder, decoder):
        super(AutoEncoder, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def call(self, inputs, training=None, **kwargs):
        x = self.encoder(inputs)
        x = self.decoder(x)
        return x

ae = AutoEncoder(encoder, decoder)

ae.fit(...
完整可复制示例:

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import keras
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import numpy as np

(xtrain, ytrain), (xtest, ytest) = keras.datasets.cifar10.load_data()

train_ix = np.where(ytrain.ravel() == 1)
test_ix = np.where(ytest.ravel() == 1)

cars_train = xtrain[train_ix]
cars_test = xtest[test_ix]

cars = np.vstack([cars_train, cars_test]).astype(np.float32)/255

X = tf.data.Dataset.from_tensor_slices(cars).batch(8)


class Encoder(keras.Model):
    def __init__(self):
        super(Encoder, self).__init__()
        self.flat = keras.layers.Flatten(input_shape=(32, 32, 3))
        self.dense1 = keras.layers.Dense(128)
        self.dense2 = keras.layers.Dense(32)

    def call(self, inputs, training=None, **kwargs):
        x = self.flat(inputs)
        x = keras.activations.selu(self.dense1(x))
        x = keras.activations.selu(self.dense2(x))
        return x


class Decoder(keras.Model):
    def __init__(self):
        super(Decoder, self).__init__()
        self.dense1 = keras.layers.Dense(128, input_shape=[32])
        self.dense2 = keras.layers.Dense(32 * 32 * 3)
        self.reshape = keras.layers.Reshape([32, 32, 3])

    def call(self, inputs, training=None, **kwargs):
        x = keras.activations.selu(self.dense1(inputs))
        x = keras.activations.sigmoid(self.dense2(x))
        x = self.reshape(x)
        return x


class AutoEncoder(keras.Model):
    def __init__(self, encoder, decoder):
        super(AutoEncoder, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def call(self, inputs, training=None, **kwargs):
        x = self.encoder(inputs)
        x = self.decoder(x)
        return x


ae = AutoEncoder(Encoder(), Decoder())

loss_object = keras.losses.BinaryCrossentropy()

reconstruction_loss = keras.metrics.Mean(name='reconstruction_loss')

optimizer = keras.optimizers.Adam()


@tf.function
def reconstruct(inputs):
    with tf.GradientTape() as tape:
        out = ae(inputs)
        loss = loss_object(inputs, out)

    gradients = tape.gradient(loss, ae.trainable_variables)
    optimizer.apply_gradients(zip(gradients, ae.trainable_variables))

    reconstruction_loss(loss)


if __name__ == '__main__':
    template = 'Epoch {:2} Reconstruction Loss {:.4f}'
    for epoch in range(50):
        reconstruction_loss.reset_states()
        for input_batches in X:
            reconstruct(input_batches)
        print(template.format(epoch + 1, reconstruction_loss.result()))
输出:

Epoch 35 Reconstruction Loss 0.5794
Epoch 36 Reconstruction Loss 0.5793
Epoch 37 Reconstruction Loss 0.5792
Epoch 38 Reconstruction Loss 0.5791
Epoch 39 Reconstruction Loss 0.5790
Epoch 40 Reconstruction Loss 0.5789

如果您想要一些额外的灵活性,可以将
tf.keras.Model
子类化:

class AutoEncoder(tf.keras.Model):
    def __init__(self, encoder, decoder):
        super(AutoEncoder, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def call(self, inputs, training=None, **kwargs):
        x = self.encoder(inputs)
        x = self.decoder(x)
        return x

ae = AutoEncoder(encoder, decoder)

ae.fit(...
完整可复制示例:

import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from tensorflow import keras
import tensorflow as tf
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
import numpy as np

(xtrain, ytrain), (xtest, ytest) = keras.datasets.cifar10.load_data()

train_ix = np.where(ytrain.ravel() == 1)
test_ix = np.where(ytest.ravel() == 1)

cars_train = xtrain[train_ix]
cars_test = xtest[test_ix]

cars = np.vstack([cars_train, cars_test]).astype(np.float32)/255

X = tf.data.Dataset.from_tensor_slices(cars).batch(8)


class Encoder(keras.Model):
    def __init__(self):
        super(Encoder, self).__init__()
        self.flat = keras.layers.Flatten(input_shape=(32, 32, 3))
        self.dense1 = keras.layers.Dense(128)
        self.dense2 = keras.layers.Dense(32)

    def call(self, inputs, training=None, **kwargs):
        x = self.flat(inputs)
        x = keras.activations.selu(self.dense1(x))
        x = keras.activations.selu(self.dense2(x))
        return x


class Decoder(keras.Model):
    def __init__(self):
        super(Decoder, self).__init__()
        self.dense1 = keras.layers.Dense(128, input_shape=[32])
        self.dense2 = keras.layers.Dense(32 * 32 * 3)
        self.reshape = keras.layers.Reshape([32, 32, 3])

    def call(self, inputs, training=None, **kwargs):
        x = keras.activations.selu(self.dense1(inputs))
        x = keras.activations.sigmoid(self.dense2(x))
        x = self.reshape(x)
        return x


class AutoEncoder(keras.Model):
    def __init__(self, encoder, decoder):
        super(AutoEncoder, self).__init__()
        self.encoder = encoder
        self.decoder = decoder

    def call(self, inputs, training=None, **kwargs):
        x = self.encoder(inputs)
        x = self.decoder(x)
        return x


ae = AutoEncoder(Encoder(), Decoder())

loss_object = keras.losses.BinaryCrossentropy()

reconstruction_loss = keras.metrics.Mean(name='reconstruction_loss')

optimizer = keras.optimizers.Adam()


@tf.function
def reconstruct(inputs):
    with tf.GradientTape() as tape:
        out = ae(inputs)
        loss = loss_object(inputs, out)

    gradients = tape.gradient(loss, ae.trainable_variables)
    optimizer.apply_gradients(zip(gradients, ae.trainable_variables))

    reconstruction_loss(loss)


if __name__ == '__main__':
    template = 'Epoch {:2} Reconstruction Loss {:.4f}'
    for epoch in range(50):
        reconstruction_loss.reset_states()
        for input_batches in X:
            reconstruct(input_batches)
        print(template.format(epoch + 1, reconstruction_loss.result()))
输出:

Epoch 35 Reconstruction Loss 0.5794
Epoch 36 Reconstruction Loss 0.5793
Epoch 37 Reconstruction Loss 0.5792
Epoch 38 Reconstruction Loss 0.5791
Epoch 39 Reconstruction Loss 0.5790
Epoch 40 Reconstruction Loss 0.5789
基于,但不使用
顺序
,您可以执行以下操作:

query_autoencoder = Model(inputs=query_encoder_input, outputs=query_decoder(query_encoder_output))
query_autoencoder.summary()
与MZ的答案相比,该摘要还分解为更多的层次。

基于,但不使用
顺序
,您可以执行以下操作:

query_autoencoder = Model(inputs=query_encoder_input, outputs=query_decoder(query_encoder_output))
query_autoencoder.summary()

与MZ的答案相比,这个总结也分解成了更多的层次。

为了加入这两个模型,为什么不做一些类似于
顺序([encoder,decoder])
?因为我不知道这是存在的!非常感谢。如果你加上这个作为回答,我会接受的。(这对我很有用。)为了加入这两个模型,为什么不做一些类似于
顺序([encoder,decoder])
?因为我不知道这是存在的!非常感谢。如果你加上这个作为回答,我会接受的。(这对我很有效。)