Python 如何保存/恢复tensorflow的tensor_森林模型?

Python 如何保存/恢复tensorflow的tensor_森林模型?,python,tensorflow,random-forest,Python,Tensorflow,Random Forest,我使用tensorflow运行随机森林模型。 代码: 问题:如何保存模型并将其恢复为预测? 这是tf的随机森林的最新版本,我使用TF1.2,它可以工作。我发现有人使用TensorForestEstimator,但它不适用于TF1.2, tf更新如此频繁 保存模型很容易,但恢复它会让我丧命。无论我做什么,总是出现“FertileStatsResourceHandleOp”错误,最后,我在恢复之前添加了两行代码,它可以正常工作 hparams = tensor_forest.ForestHParam

我使用tensorflow运行随机森林模型。 代码:

问题:如何保存模型并将其恢复为预测? 这是tf的随机森林的最新版本,我使用TF1.2,它可以工作。我发现有人使用TensorForestEstimator,但它不适用于TF1.2,
tf更新如此频繁

保存模型很容易,但恢复它会让我丧命。无论我做什么,总是出现“FertileStatsResourceHandleOp”错误,最后,我在恢复之前添加了两行代码,它可以正常工作

hparams = tensor_forest.ForestHParams(num_classes=num_classes,
                                     num_features=num_features,
                                     num_trees=num_trees,
                                     max_nodes=max_nodes).fill()
forest_graph = tensor_forest.RandomForestGraphs(params=hparams)
逗号代码如下所示:

X = tf.placeholder(tf.float32, shape=[None, num_features],name="input_x")
Y = tf.placeholder(tf.int32, shape=[None], name="input_y")
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
                                     num_features=num_features,
                                     num_trees=num_trees,
                                     max_nodes=max_nodes).fill()
forest_graph = tensor_forest.RandomForestGraphs(params=hparams)
train_op = forest_graph.training_graph(X, Y)
loss_op = forest_graph.training_loss(X,Y)

correct_prediction = tf.argmax(infer_op, 1, name="predictions")
accuracy_op = tf.reduce_mean(tf.cast(tf.equal(correct_prediction,tf.cast(Y, tf.int64)), tf.float32),name="accuracy")

init_vars =  tf.group(tf.global_variables_initializer(), resources.initialize_resources(resources.shared_resources()))
sess = tf.Session()
sess.run(init_vars)
test_x, test_y = mnist.test.images, mnist.test.labels
saver = tf.train.Saver(save_relative_paths=True, max_to_keep=10)
checkpoint_prefix = 'checkpoints/model'
for i in range(1, num_steps + 1):
    batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)
    _, l = sess.run([train_op, loss_op], feed_dict={X:batch_x, Y: batch_y})
    if i % 10 == 0 or i == 1:
        acc = sess.run(accuracy_op, feed_dict={X:batch_x, Y: batch_y})
        print('step %i, loss: %f, acc: %f' % (i, l, acc))
    if i % 10 == 0:
        print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
        path = saver.save(sess, checkpoint_prefix, global_step=i)
        print("last Saved model checkpoint to {} at step {}".format(path, i))
print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
还原模型:
有办法在这里计算AUC吗?
X = tf.placeholder(tf.float32, shape=[None, num_features],name="input_x")
Y = tf.placeholder(tf.int32, shape=[None], name="input_y")
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
                                     num_features=num_features,
                                     num_trees=num_trees,
                                     max_nodes=max_nodes).fill()
forest_graph = tensor_forest.RandomForestGraphs(params=hparams)
train_op = forest_graph.training_graph(X, Y)
loss_op = forest_graph.training_loss(X,Y)

correct_prediction = tf.argmax(infer_op, 1, name="predictions")
accuracy_op = tf.reduce_mean(tf.cast(tf.equal(correct_prediction,tf.cast(Y, tf.int64)), tf.float32),name="accuracy")

init_vars =  tf.group(tf.global_variables_initializer(), resources.initialize_resources(resources.shared_resources()))
sess = tf.Session()
sess.run(init_vars)
test_x, test_y = mnist.test.images, mnist.test.labels
saver = tf.train.Saver(save_relative_paths=True, max_to_keep=10)
checkpoint_prefix = 'checkpoints/model'
for i in range(1, num_steps + 1):
    batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)
    _, l = sess.run([train_op, loss_op], feed_dict={X:batch_x, Y: batch_y})
    if i % 10 == 0 or i == 1:
        acc = sess.run(accuracy_op, feed_dict={X:batch_x, Y: batch_y})
        print('step %i, loss: %f, acc: %f' % (i, l, acc))
    if i % 10 == 0:
        print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
        path = saver.save(sess, checkpoint_prefix, global_step=i)
        print("last Saved model checkpoint to {} at step {}".format(path, i))
print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
                                     num_features=num_features,
                                     num_trees=num_trees,
                                     max_nodes=max_nodes).fill()
forest_graph = tensor_forest.RandomForestGraphs(params=hparams)
checkpoint_file = tf.train.latest_checkpoint('checkpoints')
graph = tf.Graph()
with graph.as_default():
    session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file), clear_devices=True)
        saver.restore(sess, checkpoint_file)
        input_x = graph.get_operation_by_name("input_x").outputs[0]
        input_y = graph.get_operation_by_name("input_y").outputs[0]
        predictions = graph.get_operation_by_name("predictions").outputs[0]
        accuracy = graph.get_operation_by_name("accuracy").outputs[0]
        acc = sess.run(accuracy, {input_x: test_x, input_y:test_y })
        predictions = sess.run(predictions, {input_x: test_x })
        print(predictions)