Tensorflow 在Google Colab上的TPU实例上运行时,获取内部错误:无法序列化消息

Tensorflow 在Google Colab上的TPU实例上运行时,获取内部错误:无法序列化消息,tensorflow,keras,google-colaboratory,tpu,google-cloud-tpu,Tensorflow,Keras,Google Colaboratory,Tpu,Google Cloud Tpu,我正在尝试使用一个大学项目的TPU在Google Colab上训练一个模型。我使用的是TensorFlow 1.15.0。现在,正如我从TPU示例中了解到的,我正在将tf.keras.models.Model实例转换为具有适当分发策略的TPU兼容实例(代码如下) 然后是模型创建调用(下面的代码) 其中define_generator()函数如下所示: # define an encoder block def define_encoder_block(layer_in, n_filters, b

我正在尝试使用一个大学项目的TPU在Google Colab上训练一个模型。我使用的是TensorFlow 1.15.0。现在,正如我从TPU示例中了解到的,我正在将tf.keras.models.Model实例转换为具有适当分发策略的TPU兼容实例(代码如下)

然后是模型创建调用(下面的代码)

其中
define_generator()
函数如下所示:

# define an encoder block
def define_encoder_block(layer_in, n_filters, batchnorm=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add downsampling layer    
g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3), padding='same', kernel_initializer=init)(layer_in)
g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3), strides=(2,2), padding='same', kernel_initializer=init)(g)    
g = tf.keras.layers.Conv2D(n_filters, (3,3), padding='same', kernel_initializer=init)(g)  
# conditionally add batch normalization
if batchnorm:
    g = tf.keras.layers.BatchNormalization()(g, training=True)
# leaky relu activation
g = tf.keras.activations.elu(g)
return g


# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3),  padding='same', kernel_initializer=init)(layer_in)
g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3),  padding='same', kernel_initializer=init)(layer_in)    
g = tf.keras.layers.Conv2DTranspose(n_filters, (3,3), strides=(2,2),  padding='same', kernel_initializer=init)(g)    
# add batch normalization
g = tf.keras.layers.BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
    g = tf.keras.layers.Dropout(0.5)(g, training=True)
# merge with skip connection
g = tf.keras.layers.Concatenate()([g, skip_in])
# relu activation
g = tf.keras.activations.elu(g)
return g

# define complete model
def define_generator(image_shape=(256,256,3)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = tf.keras.layers.Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(e7)
b = tf.keras.activations.elu(b)
# decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
d1 = decoder_block(b, e7, 512)
d2 = decoder_block(d1, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = tf.keras.layers.Conv2DTranspose(3, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = tf.keras.activations.tanh(g)
# define model
model = tf.keras.models.Model(in_image, out_image)
return model    
但是,我得到了内部错误:未能序列化可追溯到
model.fit()
方法的消息,我尝试到处搜索解决方案,但找不到。谁能帮帮我吗

以下是我的Colab笔记本的链接,在这里可以找到完整的跟踪:


在最近的版本中,TPU上对Keras车型的支持得到了显著改进。我已经为TF2.2更新了您的代码示例。大多数更改都是简单的重命名,最大的更改是我使用设置输入数据集。为了在TPU上获得最佳效果,不要直接在
model.fit
中使用numpy阵列。如果您已经在numpy中拥有数据,则可以使用创建数据集,尽管使用可能会获得更好的结果。我无法访问您的原始数据集,因此我继续替换了随机张量

以下是更新的代码:

%tensorflow_version 2.x
import os
import tensorflow as tf
import numpy as np

resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)

# define an encoder block
def define_encoder_block(layer_in, n_filters, batchnorm=True):
  # weight initialization
  init = tf.keras.initializers.RandomNormal(stddev=0.02)
  # add downsampling layer    
  g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3), padding='same', kernel_initializer=init)(layer_in)
  g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3), strides=(2,2), padding='same', kernel_initializer=init)(g)    
  g = tf.keras.layers.Conv2D(n_filters, (3,3), padding='same', kernel_initializer=init)(g)  
  # conditionally add batch normalization
  if batchnorm:
      g = tf.keras.layers.BatchNormalization()(g, training=True)
  # leaky relu activation
  g = tf.keras.activations.elu(g)
  return g


# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
  # weight initialization
  init = tf.keras.initializers.RandomNormal(stddev=0.02)
  # add upsampling layer
  g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3),  padding='same', kernel_initializer=init)(layer_in)
  g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3),  padding='same', kernel_initializer=init)(layer_in)    
  g = tf.keras.layers.Conv2DTranspose(n_filters, (3,3), strides=(2,2),  padding='same', kernel_initializer=init)(g)    
  # add batch normalization
  g = tf.keras.layers.BatchNormalization()(g, training=True)
  # conditionally add dropout
  if dropout:
      g = tf.keras.layers.Dropout(0.5)(g, training=True)
  # merge with skip connection
  g = tf.keras.layers.Concatenate()([g, skip_in])
  # relu activation
  g = tf.keras.activations.elu(g)
  return g

# define complete model
def define_generator(image_shape=(256,256,3)):
  # weight initialization
  init = tf.keras.initializers.RandomNormal(stddev=0.02)
  # image input
  in_image = tf.keras.layers.Input(shape=image_shape)
  # encoder model: C64-C128-C256-C512-C512-C512-C512-C512
  e1 = define_encoder_block(in_image, 64, batchnorm=False)
  e2 = define_encoder_block(e1, 128)
  e3 = define_encoder_block(e2, 256)
  e4 = define_encoder_block(e3, 512)
  e5 = define_encoder_block(e4, 512)
  e6 = define_encoder_block(e5, 512)
  e7 = define_encoder_block(e6, 512)
  # bottleneck, no batch norm and relu
  b = tf.keras.layers.Conv2D(512, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(e7)
  b = tf.keras.activations.elu(b)
  # decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
  d1 = decoder_block(b, e7, 512)
  d2 = decoder_block(d1, e6, 512)
  d3 = decoder_block(d2, e5, 512)
  d4 = decoder_block(d3, e4, 512, dropout=False)
  d5 = decoder_block(d4, e3, 256, dropout=False)
  d6 = decoder_block(d5, e2, 128, dropout=False)
  d7 = decoder_block(d6, e1, 64, dropout=False)
  # output
  g = tf.keras.layers.Conv2DTranspose(3, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d7)
  out_image = tf.keras.activations.tanh(g)
  # define model
  model = tf.keras.models.Model(in_image, out_image)
  return model

# Values from original notebook
# shape = (11612,256,256,3) # this caused my notebook to OOM since it's huge
shape = (256,256,256,3)
batch_size = 8
epochs = 64

# Create fake random dataset
X_train = np.random.rand(*shape)
Y_train = np.random.rand(*shape)
dataset = (tf.data.Dataset.from_tensor_slices((X_train, Y_train))
    .repeat(epochs)
    .batch(batch_size, drop_remainder=True)
    .prefetch(16))

with strategy.scope():
  model = define_generator()
  adam = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.999)
  model.compile(optimizer=adam, loss='mean_absolute_error', metrics=['accuracy'])
  model.summary()

model.fit(dataset)

你找到解决办法了吗?没有。你遇到了相同的错误?是的,我在提出了很多建议后遇到了几个错误,但每次都会遇到很多不同的错误,然后返回GPU你检查过Kaggle了吗?他们还开始了实验性的TPU支持Kaggle觉得有些限制,因为他们有较短的运行时和cap,而且,我不能不提交内核就直接下载任何文件,但对于TPU,我没有
# define an encoder block
def define_encoder_block(layer_in, n_filters, batchnorm=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add downsampling layer    
g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3), padding='same', kernel_initializer=init)(layer_in)
g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3), strides=(2,2), padding='same', kernel_initializer=init)(g)    
g = tf.keras.layers.Conv2D(n_filters, (3,3), padding='same', kernel_initializer=init)(g)  
# conditionally add batch normalization
if batchnorm:
    g = tf.keras.layers.BatchNormalization()(g, training=True)
# leaky relu activation
g = tf.keras.activations.elu(g)
return g


# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
# weight initialization
init = RandomNormal(stddev=0.02)
# add upsampling layer
g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3),  padding='same', kernel_initializer=init)(layer_in)
g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3),  padding='same', kernel_initializer=init)(layer_in)    
g = tf.keras.layers.Conv2DTranspose(n_filters, (3,3), strides=(2,2),  padding='same', kernel_initializer=init)(g)    
# add batch normalization
g = tf.keras.layers.BatchNormalization()(g, training=True)
# conditionally add dropout
if dropout:
    g = tf.keras.layers.Dropout(0.5)(g, training=True)
# merge with skip connection
g = tf.keras.layers.Concatenate()([g, skip_in])
# relu activation
g = tf.keras.activations.elu(g)
return g

# define complete model
def define_generator(image_shape=(256,256,3)):
# weight initialization
init = RandomNormal(stddev=0.02)
# image input
in_image = tf.keras.layers.Input(shape=image_shape)
# encoder model: C64-C128-C256-C512-C512-C512-C512-C512
e1 = define_encoder_block(in_image, 64, batchnorm=False)
e2 = define_encoder_block(e1, 128)
e3 = define_encoder_block(e2, 256)
e4 = define_encoder_block(e3, 512)
e5 = define_encoder_block(e4, 512)
e6 = define_encoder_block(e5, 512)
e7 = define_encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(e7)
b = tf.keras.activations.elu(b)
# decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
d1 = decoder_block(b, e7, 512)
d2 = decoder_block(d1, e6, 512)
d3 = decoder_block(d2, e5, 512)
d4 = decoder_block(d3, e4, 512, dropout=False)
d5 = decoder_block(d4, e3, 256, dropout=False)
d6 = decoder_block(d5, e2, 128, dropout=False)
d7 = decoder_block(d6, e1, 64, dropout=False)
# output
g = tf.keras.layers.Conv2DTranspose(3, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d7)
out_image = tf.keras.activations.tanh(g)
# define model
model = tf.keras.models.Model(in_image, out_image)
return model    
%tensorflow_version 2.x
import os
import tensorflow as tf
import numpy as np

resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)

# define an encoder block
def define_encoder_block(layer_in, n_filters, batchnorm=True):
  # weight initialization
  init = tf.keras.initializers.RandomNormal(stddev=0.02)
  # add downsampling layer    
  g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3), padding='same', kernel_initializer=init)(layer_in)
  g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3), strides=(2,2), padding='same', kernel_initializer=init)(g)    
  g = tf.keras.layers.Conv2D(n_filters, (3,3), padding='same', kernel_initializer=init)(g)  
  # conditionally add batch normalization
  if batchnorm:
      g = tf.keras.layers.BatchNormalization()(g, training=True)
  # leaky relu activation
  g = tf.keras.activations.elu(g)
  return g


# define a decoder block
def decoder_block(layer_in, skip_in, n_filters, dropout=True):
  # weight initialization
  init = tf.keras.initializers.RandomNormal(stddev=0.02)
  # add upsampling layer
  g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3),  padding='same', kernel_initializer=init)(layer_in)
  g = tf.keras.layers.Conv2D(int(n_filters/2), (3,3),  padding='same', kernel_initializer=init)(layer_in)    
  g = tf.keras.layers.Conv2DTranspose(n_filters, (3,3), strides=(2,2),  padding='same', kernel_initializer=init)(g)    
  # add batch normalization
  g = tf.keras.layers.BatchNormalization()(g, training=True)
  # conditionally add dropout
  if dropout:
      g = tf.keras.layers.Dropout(0.5)(g, training=True)
  # merge with skip connection
  g = tf.keras.layers.Concatenate()([g, skip_in])
  # relu activation
  g = tf.keras.activations.elu(g)
  return g

# define complete model
def define_generator(image_shape=(256,256,3)):
  # weight initialization
  init = tf.keras.initializers.RandomNormal(stddev=0.02)
  # image input
  in_image = tf.keras.layers.Input(shape=image_shape)
  # encoder model: C64-C128-C256-C512-C512-C512-C512-C512
  e1 = define_encoder_block(in_image, 64, batchnorm=False)
  e2 = define_encoder_block(e1, 128)
  e3 = define_encoder_block(e2, 256)
  e4 = define_encoder_block(e3, 512)
  e5 = define_encoder_block(e4, 512)
  e6 = define_encoder_block(e5, 512)
  e7 = define_encoder_block(e6, 512)
  # bottleneck, no batch norm and relu
  b = tf.keras.layers.Conv2D(512, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(e7)
  b = tf.keras.activations.elu(b)
  # decoder model: CD512-CD1024-CD1024-C1024-C1024-C512-C256-C128
  d1 = decoder_block(b, e7, 512)
  d2 = decoder_block(d1, e6, 512)
  d3 = decoder_block(d2, e5, 512)
  d4 = decoder_block(d3, e4, 512, dropout=False)
  d5 = decoder_block(d4, e3, 256, dropout=False)
  d6 = decoder_block(d5, e2, 128, dropout=False)
  d7 = decoder_block(d6, e1, 64, dropout=False)
  # output
  g = tf.keras.layers.Conv2DTranspose(3, (3,3), strides=(2,2), padding='same', kernel_initializer=init)(d7)
  out_image = tf.keras.activations.tanh(g)
  # define model
  model = tf.keras.models.Model(in_image, out_image)
  return model

# Values from original notebook
# shape = (11612,256,256,3) # this caused my notebook to OOM since it's huge
shape = (256,256,256,3)
batch_size = 8
epochs = 64

# Create fake random dataset
X_train = np.random.rand(*shape)
Y_train = np.random.rand(*shape)
dataset = (tf.data.Dataset.from_tensor_slices((X_train, Y_train))
    .repeat(epochs)
    .batch(batch_size, drop_remainder=True)
    .prefetch(16))

with strategy.scope():
  model = define_generator()
  adam = tf.keras.optimizers.Adam(learning_rate=0.0002, beta_1=0.5, beta_2=0.999)
  model.compile(optimizer=adam, loss='mean_absolute_error', metrics=['accuracy'])
  model.summary()

model.fit(dataset)