Python Tensorflow,在另一个tf.estimator模型中使用tf.estimator训练模型
有没有办法在另一个模型B中使用tf.estimator训练的模型a 这是形势,, 假设我有一个受过训练的“a型”和模型a\u fn()。 “模型A”获取图像作为输入,并输出一些类似于MNIST分类器的向量浮点值。 还有另一个“模型B”,它是在Model_B_fn()中定义的。 它还获取图像作为输入,在训练“模型B”时需要“模型A”的矢量输出 因此,基本上,我想训练需要输入为图像的“模型B”和“模型A”的预测输出。(无需再训练“模型A”,只需在训练“模型B”时获得预测输出) 我试过三个案子:Python Tensorflow,在另一个tf.estimator模型中使用tf.estimator训练模型,python,tensorflow,Python,Tensorflow,有没有办法在另一个模型B中使用tf.estimator训练的模型a 这是形势,, 假设我有一个受过训练的“a型”和模型a\u fn()。 “模型A”获取图像作为输入,并输出一些类似于MNIST分类器的向量浮点值。 还有另一个“模型B”,它是在Model_B_fn()中定义的。 它还获取图像作为输入,在训练“模型B”时需要“模型A”的矢量输出 因此,基本上,我想训练需要输入为图像的“模型B”和“模型A”的预测输出。(无需再训练“模型A”,只需在训练“模型B”时获得预测输出) 我试过三个案子: 在模
那么,关于在另一个tf.estimator中使用经过训练的自定义tf.estimator,您有什么想法吗???我已经找到了这个问题的一个解决方案 如果你在为同样的问题而挣扎,你可以使用这种方法
例如,请参阅简单用例。谢谢,我尝试过类似的方法,但我错过了步骤2!!!
def model_a_fn(features, labels, mode, params):
# ...
# ...
# ...
return
def main():
# model checkpoint location
model_a_dir = './model_a'
# create estimator for Model A
model_a = tf.estimator.Estimator(model_fn=model_a_fn, model_dir=model_a_dir)
# train Model A
model_a.train(input_fn=lambda : input_fn_a)
# ...
# ...
# ...
# export model a
model_a.export_savedmodel(model_a_dir, serving_input_receiver_fn=serving_input_receiver_fn)
# exported to ./model_a/123456789
return
if __name__ == '__main__':
main()
# follows model_a's input format
def bypass_input_fn(x):
features = {
'x': x,
}
return features
def model_b_fn(features, labels, mode, params):
# parse input
inputs = tf.reshape(features['x'], shape=[-1, 28, 28, 1])
# get Model A's response
model_a = params['model_a']
predictions = model_a.predict(
input_fn=lambda: bypass_input_fn(inputs)
)
for results in predictions:
# Error occurs!!!
model_a_output = results['class_id']
# build Model B
layer1 = tf.layers.conv2d(inputs, 32, 5, same, activation=tf.nn.relu)
layer1 = tf.layers.max_pooling2d(layer1, pool_size=[2, 2], strides=2)
# ...
# some layers added...
# ...
flatten = tf.layers.flatten(prev_layer)
layern = tf.layers.dense(10)
# let say layern's output shape and model_a_output's output shape is same
add_layer = tf.add(flatten, model_a_output)
# ...
# do more... stuff
# ...
return
def main():
# load pretrained model A
model_a_dir = './model_a'
model_a = tf.estimator.Estimator(model_fn=model_a_fn, model_dir=model_a_dir)
# model checkpoint location
model_b_dir = './model_b/'
# create estimator for Model A
model_b = tf.estimator.Estimator(
model_fn=model_b_fn,
model_dir=model_b_dir,
params={
'model_a': model_a,
}
)
# train Model B
model_b.train(input_fn=lambda : input_fn_b)
return
if __name__ == '__main__':
main()
def model_b_fn(features, labels, mode, params):
# parse input
inputs = tf.reshape(features['x'], shape=[-1, 28, 28, 1])
# get Model A's response
model_a_predict_fn = params['model_a_predict_fn']
model_a_prediction = model_a_predict_fn(
{
'x': inputs
}
)
model_a_output = model_a_prediction['output']
# build Model B
layer1 = tf.layers.conv2d(inputs, 32, 5, same, activation=tf.nn.relu)
layer1 = tf.layers.max_pooling2d(layer1, pool_size=[2, 2], strides=2)
# ...
# some layers added...
# ...
flatten = tf.layers.flatten(prev_layer)
layern = tf.layers.dense(10)
# let say layern's output shape and model_a_output's output shape is same
add_layer = tf.add(flatten, model_a_output)
# ...
# do more... stuff
# ...
return
def main():
# load pretrained model A
model_a_dir = './model_a/123456789'
model_a_predict_fn = tf.contrib.predictor.from_saved_model(export_dir=model_a_dir)
# model checkpoint location
model_b_dir = './model_b/'
# create estimator for Model A
# Error occurs!!!
model_b = tf.estimator.Estimator(
model_fn=model_b_fn,
model_dir=model_b_dir,
params={
'model_a_predict_fn': model_a_predict_fn,
}
)
# train Model B
model_b.train(input_fn=lambda : input_fn_b)
return
if __name__ == '__main__':
main()
def model_b_fn(features, labels, mode, params):
# parse input
inputs = tf.reshape(features['x'], shape=[-1, 28, 28, 1])
# get Model A's response
model_a_predict_fn = tf.contrib.predictor.from_saved_model(export_dir=params['model_a_dir'])
# Error occurs!!!
model_a_prediction = model_a_predict_fn(
{
'x': inputs
}
)
model_a_output = model_a_prediction['output']
# build Model B
layer1 = tf.layers.conv2d(inputs, 32, 5, same, activation=tf.nn.relu)
layer1 = tf.layers.max_pooling2d(layer1, pool_size=[2, 2], strides=2)
# ...
# some layers added...
# ...
flatten = tf.layers.flatten(prev_layer)
layern = tf.layers.dense(10)
# let say layern's output shape and model_a_output's output shape is same
add_layer = tf.add(flatten, model_a_output)
# ...
# do more... stuff
# ...
return
def main():
# load pretrained model A
model_a_dir = './model_a/123456789'
# model checkpoint location
model_b_dir = './model_b/'
# create estimator for Model A
# Error occurs!!!
model_b = tf.estimator.Estimator(
model_fn=model_b_fn,
model_dir=model_b_dir,
params={
'model_a_dir': model_a_dir,
}
)
# train Model B
model_b.train(input_fn=lambda : input_fn_b)
return
if __name__ == '__main__':
main()