Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 3.x Tensorflow 2.1完整内存和tf.1函数调用两次_Python 3.x_Tensorflow_Tensorflow2.0_Tf.keras - Fatal编程技术网

Python 3.x Tensorflow 2.1完整内存和tf.1函数调用两次

Python 3.x Tensorflow 2.1完整内存和tf.1函数调用两次,python-3.x,tensorflow,tensorflow2.0,tf.keras,Python 3.x,Tensorflow,Tensorflow2.0,Tf.keras,我正在用Tensorflow 2.1开发卷积自动编码器 这是密码 class ConvAutoencoder: def __init__(self, input_shape, latent_dim): self.input_shape = input_shape self.latent_dim = latent_dim self.__create_model() def __create_model(self): # Define Encoder en

我正在用Tensorflow 2.1开发卷积自动编码器

这是密码

class ConvAutoencoder:

def __init__(self, input_shape, latent_dim):
    self.input_shape = input_shape
    self.latent_dim = latent_dim
    self.__create_model()

def __create_model(self):
    # Define Encoder
    encoder_input = Input(shape=self.input_shape, name='encoder_input')
    x = Conv2D(filters=16, kernel_size=5, activation='relu', padding='same')(encoder_input)
    x = Conv2D(filters=32, kernel_size=3, strides=2, activation='relu', padding='same')(x)
    x = Conv2D(filters=64, kernel_size=3, strides=2, activation='relu', padding='same')(x)
    x = Conv2D(filters=128, kernel_size=2, strides=2, activation='relu', padding='same')(x)
    last_conv_shape = x.shape
    x = Flatten()(x)
    x = Dense(256, activation='relu')(x)
    x = Dense(units=self.latent_dim, name='encoded_rep')(x)
    self.encoder = Model(encoder_input, x, name='encoder_model')
    self.encoder.summary()

    # Define Decoder
    decoder_input = Input(shape=self.latent_dim, name='decoder_input')
    x = Dense(units=256)(decoder_input)
    x = Dense(units=(last_conv_shape[1] * last_conv_shape[2] * last_conv_shape[3]), activation='relu')(x)
    x = Reshape(target_shape=(last_conv_shape[1], last_conv_shape[2], last_conv_shape[3]))(x)
    x = Conv2DTranspose(filters=128, kernel_size=2, activation='relu', padding='same')(x)
    x = Conv2DTranspose(filters=64, kernel_size=3, strides=2, activation='relu', padding='same')(x)
    x = Conv2DTranspose(filters=32, kernel_size=3, strides=2, activation='relu', padding='same')(x)
    x = Conv2DTranspose(filters=16, kernel_size=5, strides=2, activation='relu', padding='same')(x)
    x = Conv2DTranspose(filters=self.input_shape[2], kernel_size=5, activation='sigmoid', padding='same')(x)
    self.decoder = Model(decoder_input, x, name='decoder_model')
    self.decoder.summary()

    # Define Autoencoder from encoder input to decoder output
    self.autoencoder = Model(encoder_input, self.decoder(self.encoder(encoder_input)))
    self.optimizer = Adam()
    self.autoencoder.summary()


@tf.function
def compute_loss(model, batch):
    decoded = model.autoencoder(batch)
    return tf.reduce_mean(tf.reduce_sum(tf.square(batch - decoded), axis=[1, 2, 3]))


@tf.function
def train(train_data, model, epochs=2, batch_size=32):
    for epoch in range(epochs):
        for i in tqdm(range(0, len(train_data), batch_size)):
            batch = train_data[i: i + batch_size]
            with tf.GradientTape() as tape:
                loss = compute_loss(model, batch)
            gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
            model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))


if __name__ == "__main__":
    img_dim = 64
    channels = 1

    (x_train, _), (x_test, _) = mnist.load_data()
    # Resize images to (img_dim x img_dim)
    x_train = np.array([cv2.resize(img, (img_dim, img_dim)) for img in x_train])
    x_test = np.array([cv2.resize(img, (img_dim, img_dim)) for img in x_test])

    # Normalize images
    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.

    # Reshape datasets for tensorflow
    x_train = x_train.reshape((-1, img_dim, img_dim, channels))
    x_test = x_test.reshape((-1, img_dim, img_dim, channels))

    # Create autoencoder and fit the model
    autoenc = ConvAutoencoder(input_shape=(img_dim, img_dim, channels), latent_dim=4)

    # Train autoencoder
    train(train_data=x_train, model=autoenc, epochs=2, batch_size=32)
现在,问题有两个:

  • 标有
    @tf的函数
    train()
    。函数
    被调用两次。如果没有
    @tf.function
    标签,就不会发生这种情况
  • 每次训练都会增加大约3GB的内存消耗
我做错了什么

其他资料:

  • Tensorflow版本:2.1.0
  • Python版本3.7.5
  • Tensorflow没有使用GPU,因为我仍然有驱动程序问题

除了StackOverflow之外,没有什么要说的了,但是StackOverflow迫使我为您的第一个问题写一些东西,当您使用
@tf.function
时,函数被执行并跟踪
在此期间,急切执行在该上下文中被禁用,因此每个
tf。方法只定义一个
tf.Operation
节点,该节点生成
tf.Tensor
输出

代码调试1:

# Train autoencoder
    train(train_data=x_train, model=autoenc, epochs=5, batch_size=32)
@tf.function
def train(train_data, model, epochs=2, batch_size=32):
    for epoch in range(epochs):
      print("Python execution: ", epoch)   ## This Line only Prints during Python Execution
      tf.print("Graph execution: ", epoch) ## This Line only Print during Graph Execution

      # for i in tqdm(range(0, len(train_data), batch_size)): ## RAISES ERROR
      for i in range(0, len(train_data), batch_size):
          batch = train_data[i: i + batch_size]
          with tf.GradientTape() as tape:
              loss = compute_loss(model, batch)
          gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
          model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))
    # Train autoencoder
    epochs = 5
    print('Loop Training using Dataset (Epochs : {})'.format(epochs))
    for epoch in range(epochs):
      train(train_data=x_train, model=autoenc, batch_size = 32)
@tf.function
def train(train_data, model, batch_size=32):
      print("Python execution")   ## This Line only Prints during Python Execution
      tf.print("Graph execution") ## This Line only Print during Graph Execution

      # for i in tqdm(range(0, len(train_data), batch_size)):
      for i in range(0, len(train_data), batch_size):
          batch = train_data[i: i + batch_size]
          with tf.GradientTape() as tape:
              loss = compute_loss(model, batch)
          gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
          model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))
      print("#################") # For Debugging Purpose
注意:使用较短的数据集将历元增加到5,以便更好地调试

列车功能:

# Train autoencoder
    train(train_data=x_train, model=autoenc, epochs=5, batch_size=32)
@tf.function
def train(train_data, model, epochs=2, batch_size=32):
    for epoch in range(epochs):
      print("Python execution: ", epoch)   ## This Line only Prints during Python Execution
      tf.print("Graph execution: ", epoch) ## This Line only Print during Graph Execution

      # for i in tqdm(range(0, len(train_data), batch_size)): ## RAISES ERROR
      for i in range(0, len(train_data), batch_size):
          batch = train_data[i: i + batch_size]
          with tf.GradientTape() as tape:
              loss = compute_loss(model, batch)
          gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
          model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))
    # Train autoencoder
    epochs = 5
    print('Loop Training using Dataset (Epochs : {})'.format(epochs))
    for epoch in range(epochs):
      train(train_data=x_train, model=autoenc, batch_size = 32)
@tf.function
def train(train_data, model, batch_size=32):
      print("Python execution")   ## This Line only Prints during Python Execution
      tf.print("Graph execution") ## This Line only Print during Graph Execution

      # for i in tqdm(range(0, len(train_data), batch_size)):
      for i in range(0, len(train_data), batch_size):
          batch = train_data[i: i + batch_size]
          with tf.GradientTape() as tape:
              loss = compute_loss(model, batch)
          gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
          model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))
      print("#################") # For Debugging Purpose
以下是使用Python和Tensorflow函数调试原始代码时的输出
您可以看到,该函数看起来像是两次“执行”,但它用于跟踪和执行以构建图形,但是该函数的后续调用已经使用生成的自动签名

观察到这一点,当使用
@tf.function
进行优化时,最好在训练循环之外使用历元

代码调试2:

# Train autoencoder
    train(train_data=x_train, model=autoenc, epochs=5, batch_size=32)
@tf.function
def train(train_data, model, epochs=2, batch_size=32):
    for epoch in range(epochs):
      print("Python execution: ", epoch)   ## This Line only Prints during Python Execution
      tf.print("Graph execution: ", epoch) ## This Line only Print during Graph Execution

      # for i in tqdm(range(0, len(train_data), batch_size)): ## RAISES ERROR
      for i in range(0, len(train_data), batch_size):
          batch = train_data[i: i + batch_size]
          with tf.GradientTape() as tape:
              loss = compute_loss(model, batch)
          gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
          model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))
    # Train autoencoder
    epochs = 5
    print('Loop Training using Dataset (Epochs : {})'.format(epochs))
    for epoch in range(epochs):
      train(train_data=x_train, model=autoenc, batch_size = 32)
@tf.function
def train(train_data, model, batch_size=32):
      print("Python execution")   ## This Line only Prints during Python Execution
      tf.print("Graph execution") ## This Line only Print during Graph Execution

      # for i in tqdm(range(0, len(train_data), batch_size)):
      for i in range(0, len(train_data), batch_size):
          batch = train_data[i: i + batch_size]
          with tf.GradientTape() as tape:
              loss = compute_loss(model, batch)
          gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
          model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))
      print("#################") # For Debugging Purpose
列车功能:

# Train autoencoder
    train(train_data=x_train, model=autoenc, epochs=5, batch_size=32)
@tf.function
def train(train_data, model, epochs=2, batch_size=32):
    for epoch in range(epochs):
      print("Python execution: ", epoch)   ## This Line only Prints during Python Execution
      tf.print("Graph execution: ", epoch) ## This Line only Print during Graph Execution

      # for i in tqdm(range(0, len(train_data), batch_size)): ## RAISES ERROR
      for i in range(0, len(train_data), batch_size):
          batch = train_data[i: i + batch_size]
          with tf.GradientTape() as tape:
              loss = compute_loss(model, batch)
          gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
          model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))
    # Train autoencoder
    epochs = 5
    print('Loop Training using Dataset (Epochs : {})'.format(epochs))
    for epoch in range(epochs):
      train(train_data=x_train, model=autoenc, batch_size = 32)
@tf.function
def train(train_data, model, batch_size=32):
      print("Python execution")   ## This Line only Prints during Python Execution
      tf.print("Graph execution") ## This Line only Print during Graph Execution

      # for i in tqdm(range(0, len(train_data), batch_size)):
      for i in range(0, len(train_data), batch_size):
          batch = train_data[i: i + batch_size]
          with tf.GradientTape() as tape:
              loss = compute_loss(model, batch)
          gradients = tape.gradient(loss, model.autoencoder.trainable_variables)
          model.optimizer.apply_gradients(zip(gradients, model.autoencoder.trainable_variables))
      print("#################") # For Debugging Purpose
这是修改流的输出函数,您仍然可以看到该函数被“执行”两次。并使用为5个时期创建的签名执行训练。 在这里,列车功能的每个后续调用都已在图中执行,由于Tensorflow优化,导致执行时间缩短


关于内存不足的第二个问题

您可以尝试使用Tensorflow数据集生成器,而不是将整个数据集加载到内存中

你可以在这本书里读到更多