Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Tensorflow AttributeError:该层从未被调用,因此没有定义的输入形状_Tensorflow_Tf.keras_Tensorflow2.0 - Fatal编程技术网

Tensorflow AttributeError:该层从未被调用,因此没有定义的输入形状

Tensorflow AttributeError:该层从未被调用,因此没有定义的输入形状,tensorflow,tf.keras,tensorflow2.0,Tensorflow,Tf.keras,Tensorflow2.0,我试图通过创建三个类在TensorFlow 2.0中构建一个自动编码器:编码器、解码器和自动编码器。 因为我不想手动设置输入形状,所以我试图从编码器的输入形状推断解码器的输出形状 import os import shutil import numpy as np import tensorflow as tf from tensorflow.keras import Model from tensorflow.keras.layers import Dense, Layer def ms

我试图通过创建三个类在TensorFlow 2.0中构建一个自动编码器:编码器、解码器和自动编码器。 因为我不想手动设置输入形状,所以我试图从编码器的输入形状推断解码器的输出形状

import os
import shutil

import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Layer


def mse(model, original):
    return tf.reduce_mean(tf.square(tf.subtract(model(original), original)))


def train_autoencoder(loss, model, opt, original):
    with tf.GradientTape() as tape:
        gradients = tape.gradient(
            loss(model, original), model.trainable_variables)
        gradient_variables = zip(gradients, model.trainable_variables)
        opt.apply_gradients(gradient_variables)


def log_results(model, X, max_outputs, epoch, prefix):
    loss_values = mse(model, X)

    sample_img = X[sample(range(X.shape[0]), max_outputs), :]
    original = tf.reshape(sample_img, (max_outputs, 28, 28, 1))
    encoded = tf.reshape(
        model.encode(sample_img), (sample_img.shape[0], 8, 8, 1))
    decoded = tf.reshape(
        model(tf.constant(sample_img)), (sample_img.shape[0], 28, 28, 1))
    tf.summary.scalar("{}_loss".format(prefix), loss_values, step=epoch + 1)
    tf.summary.image(
        "{}_original".format(prefix),
        original,
        max_outputs=max_outputs,
        step=epoch + 1)
    tf.summary.image(
        "{}_encoded".format(prefix),
        encoded,
        max_outputs=max_outputs,
        step=epoch + 1)
    tf.summary.image(
        "{}_decoded".format(prefix),
        decoded,
        max_outputs=max_outputs,
        step=epoch + 1)

    return loss_values


def preprocess_mnist(batch_size):
    (X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()

    X_train = X_train / np.max(X_train)
    X_train = X_train.reshape(X_train.shape[0],
                              X_train.shape[1] * X_train.shape[2]).astype(
                                  np.float32)
    train_dataset = tf.data.Dataset.from_tensor_slices(X_train).batch(
        batch_size)

    y_train = y_train.astype(np.int32)
    train_labels = tf.data.Dataset.from_tensor_slices(y_train).batch(
        batch_size)

    X_test = X_test / np.max(X_test)
    X_test = X_test.reshape(
        X_test.shape[0], X_test.shape[1] * X_test.shape[2]).astype(np.float32)

    y_test = y_test.astype(np.int32)

    return X_train, X_test, train_dataset, y_train, y_test, train_labels


class Encoder(Layer):
    def __init__(self, units):
        super(Encoder, self).__init__()
        self.units = units

    def build(self, input_shape):
        self.output_layer = Dense(units=self.units, activation=tf.nn.relu)

    @tf.function
    def call(self, X):
        return self.output_layer(X)


class Decoder(Layer):
    def __init__(self, encoder):
        super(Decoder, self).__init__()
        self.encoder = encoder

    def build(self, input_shape):
        self.output_layer = Dense(units=self.encoder.input_shape)

    @tf.function
    def call(self, X):
        return self.output_layer(X)


class AutoEncoder(Model):
    def __init__(self, units):
        super(AutoEncoder, self).__init__()
        self.units = units

    def build(self, input_shape):
        self.encoder = Encoder(units=self.units)
        self.encoder.build(input_shape)
        self.decoder = Decoder(encoder=self.encoder)

    @tf.function
    def call(self, X):
        Z = self.encoder(X)
        return self.decoder(Z)

    @tf.function
    def encode(self, X):
        return self.encoder(X)

    @tf.function
    def decode(self, Z):
        return self.decode(Z)


def test_autoencoder(batch_size,
                     learning_rate,
                     epochs,
                     max_outputs=4,
                     seed=None):

    tf.random.set_seed(seed)

    X_train, X_test, train_dataset, _, _, _ = preprocess_mnist(
        batch_size=batch_size)

    autoencoder = AutoEncoder(units=64)
    opt = tf.optimizers.Adam(learning_rate=learning_rate)

    log_path = 'logs/autoencoder'
    if os.path.exists(log_path):
        shutil.rmtree(log_path)

    writer = tf.summary.create_file_writer(log_path)

    with writer.as_default():
        with tf.summary.record_if(True):
            for epoch in range(epochs):
                for step, batch in enumerate(train_dataset):
                    train_autoencoder(mse, autoencoder, opt, batch)

                # logs (train)
                train_loss = log_results(
                    model=autoencoder,
                    X=X_train,
                    max_outputs=max_outputs,
                    epoch=epoch,
                    prefix='train')

                # logs (test)
                test_loss = log_results(
                    model=autoencoder,
                    X=X_test,
                    max_outputs=max_outputs,
                    epoch=epoch,
                    prefix='test')

                writer.flush()

                template = 'Epoch {}, Train loss: {:.5f}, Test loss: {:.5f}'
                print(
                    template.format(epoch + 1, train_loss.numpy(),
                                    test_loss.numpy()))

    if not os.path.exists('saved_models'):
        os.makedirs('saved_models')
    np.savez_compressed('saved_models/encoder.npz',
                        *autoencoder.encoder.get_weights())


if __name__ == '__main__':
    test_autoencoder(batch_size=128, learning_rate=1e-3, epochs=20, seed=42)
由于编码器的输入形状用于解码器的构建功能,我希望在训练自动编码器时,首先构建编码器,然后构建解码器,但情况似乎并非如此。我还尝试在解码器的build函数开始时调用
self.encoder.build()
,在解码器的build函数中构建编码器,但没有任何区别。我做错了什么

我收到的错误信息:

AttributeError: The layer has never been called and thus has no defined input shape.

你就快到了,只是把事情复杂化了一点。您收到此错误是因为
解码器
层依赖于尚未构建的
编码器
层(因为调用
构建
失败),并且它的
输入形状
属性未设置

解决方案是从
AutoEncoder
对象传递正确的输出形状,如下所示:

class Decoder(Layer):
    def __init__(self, units):
        super(Decoder, self).__init__()
        self.units = units

    def build(self, _):
        self.output_layer = Dense(units=self.units)

    def call(self, X):
        return self.output_layer(X)


class AutoEncoder(Model):
    def __init__(self, units):
        super(AutoEncoder, self).__init__()
        self.units = units

    def build(self, input_shape):
        self.encoder = Encoder(units=self.units)
        self.decoder = Decoder(units=input_shape[-1])
class Encoder(Layer):
    def __init__(self, units):
        super(Encoder, self).__init__()
        self.output_layer = Dense(units=units, activation=tf.nn.relu)

    def call(self, X):
        return self.output_layer(X)


class Decoder(Layer):
    def __init__(self, units):
        super(Decoder, self).__init__()
        self.output_layer = Dense(units=units)

    def call(self, X):
        return self.output_layer(X)


class AutoEncoder(Model):
    def __init__(self, units):
        super(AutoEncoder, self).__init__()
        self.units = units

    def build(self, input_shape):
        self.encoder = Encoder(units=self.units)
        self.decoder = Decoder(units=input_shape[-1])

    def call(self, X):
        Z = self.encoder(X)
        return self.decoder(Z)

    def encode(self, X):
        return self.encoder(X)

    def decode(self, Z):
        return self.decode(Z)
请注意,我已经删除了
@tf,function
decorator,因为您不太可能获得任何效率提升(
keras
已经为您创建了引擎盖下的静态图形)

此外,可以看到,您的构建不依赖于
input\u shape
信息,因此所有创建都可以安全地移动到构造函数中,如下所示:

class Decoder(Layer):
    def __init__(self, units):
        super(Decoder, self).__init__()
        self.units = units

    def build(self, _):
        self.output_layer = Dense(units=self.units)

    def call(self, X):
        return self.output_layer(X)


class AutoEncoder(Model):
    def __init__(self, units):
        super(AutoEncoder, self).__init__()
        self.units = units

    def build(self, input_shape):
        self.encoder = Encoder(units=self.units)
        self.decoder = Decoder(units=input_shape[-1])
class Encoder(Layer):
    def __init__(self, units):
        super(Encoder, self).__init__()
        self.output_layer = Dense(units=units, activation=tf.nn.relu)

    def call(self, X):
        return self.output_layer(X)


class Decoder(Layer):
    def __init__(self, units):
        super(Decoder, self).__init__()
        self.output_layer = Dense(units=units)

    def call(self, X):
        return self.output_layer(X)


class AutoEncoder(Model):
    def __init__(self, units):
        super(AutoEncoder, self).__init__()
        self.units = units

    def build(self, input_shape):
        self.encoder = Encoder(units=self.units)
        self.decoder = Decoder(units=input_shape[-1])

    def call(self, X):
        Z = self.encoder(X)
        return self.decoder(Z)

    def encode(self, X):
        return self.encoder(X)

    def decode(self, Z):
        return self.decode(Z)
上面的问题是,是否真的需要单独的
解码器
编码器
层。在我看来,这些应该被忽略,这只留给我们一个简短易读的片段:

class AutoEncoder(Model):
    def __init__(self, units):
        super(AutoEncoder, self).__init__()
        self.units = units

    def build(self, input_shape):
        self.encoder = Dense(units=self.units, activation=tf.nn.relu)
        self.decoder = Dense(units=input_shape[-1])

    def call(self, X):
        Z = self.encoder(X)
        return self.decoder(Z)

    def encode(self, X):
        return self.encoder(X)

    def decode(self, Z):
        return self.decode(Z)

顺便说一句,你在
示例中有一个错误
但这是一个你可以自己处理的小错误。你能发布你的完整代码吗?我添加了完整的代码。它可能需要
self.output\u layer=Dense(units=input\u shape[-1])
你可能是对的,但它仍然不能解决我的问题。是的:),希望这是朝着正确方向迈出的一步谢谢,这正是我想要的。我知道我可以创建自动编码器(或使用顺序模型),但我想做一点实验,考虑到这是我第一次尝试TF2.0,我想让事情复杂一点。当然,没有双关语,很高兴我能帮上忙。