Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/303.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 如何在tensorflow中使用保存的模型_Python_Tensorflow_Neural Network - Fatal编程技术网

Python 如何在tensorflow中使用保存的模型

Python 如何在tensorflow中使用保存的模型,python,tensorflow,neural-network,Python,Tensorflow,Neural Network,首先,我试图按照人们的指示恢复模型,但我还没有找到任何线索。 以下是我保存模型的代码,模型已成功保存 import tensorflow as tf from sklearn.utils import shuffle EPOCHS = 10 BATCH_SIZE = 128 x = tf.placeholder(tf.float32, (None, 32, 32, 3),name='x') y = tf.placeholder(tf.int32, (None),name='y') one_ho

首先,我试图按照人们的指示恢复模型,但我还没有找到任何线索。 以下是我保存模型的代码,模型已成功保存

import tensorflow as tf
from sklearn.utils import shuffle
EPOCHS = 10
BATCH_SIZE = 128

x = tf.placeholder(tf.float32, (None, 32, 32, 3),name='x')
y = tf.placeholder(tf.int32, (None),name='y')
one_hot_y = tf.one_hot(y, 43)

rate = 0.001

logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)

correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()

def evaluate(TrainX, trainLabels):
    num_examples = len(TrainX)
    total_accuracy = 0
    sess = tf.get_default_session()
    for offset in range(0, num_examples, BATCH_SIZE):
        batch_x, batch_y = TrainX[offset:offset+BATCH_SIZE], trainLabels[offset:offset+BATCH_SIZE]
        accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
        total_accuracy += (accuracy * len(batch_x))
    return total_accuracy / num_examples

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    num_examples = len(trainImages)

    print("Training...")
    print()
    for i in range(EPOCHS):
        TrainX, trainLabels = shuffle(TrainX, trainLabels)
        for offset in range(0, num_examples, BATCH_SIZE):
            end = offset + BATCH_SIZE
            batch_x, batch_y = TrainX[offset:end], trainLabels[offset:end]
            sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})

        validation_accuracy = evaluate(TrainX, trainLabels)
        print("EPOCH {} ...".format(i+1))
        print("Validation Accuracy = {:.3f}".format(validation_accuracy))
        print()

    saver.save(sess, './lenet')
    print("Model saved")
训练

新纪元1。。。 验证精度=0.765

第二纪元。。。 验证精度=0.911

第三纪元。。。 验证精度=0.933

第四纪元。。。 验证精度=0.958

第五纪元。。。 验证精度=0.965

第六纪元。。。 验证精度=0.973

第七纪元。。。 验证精度=0.978

第八纪元。。。 验证精度=0.986

第九纪元。。。 验证精度=0.985

第十纪元。。。 验证精度=0.980

模型保存


我的问题是,当我放置新的测试数据时,我如何使用这个模型,假设我放置5个测试数据,以查看它们在经过训练的模型中传递时的准确性。我希望看到测试数据和标签的准确性,这些数据和标签将正确地适合经过培训的模型。谢谢你的时间,如果你有不明白的地方,我愿意给你更多的细节

简而言之,我建议使用
tf.data
tf.saved\u model
api。有两种机制:
tf.train.Saver()
或基于上一种机制的更高级别API
tf.saved\u model
。你可以在其他帖子中找到不同之处。我试图为您编写后一个的伪代码,但最终我找到了以下所有代码片段。希望有帮助:

培训部分:

# create list of file names: ['0.csv', '1.csv', ...]
totrain_features = [os.path.join('./TrainX/', f) for f in os.listdir('./TrainX/') if f.endswith('.csv')]
totrain_img = [os.path.join('./TrainLabels/', f) for f in os.listdir('./TrainLabels/') if f.endswith('.csv')]
epoch_length = len(totrain_features)

# model starts here
file_names_ph = tf.placeholder(tf.string, shape=(None), name='file_name_ph') #create a ph to put list of file paths
in_pipeline = input_pipeline(file_names_ph) # check standalone code below
nodes = model(in_pipeline['img_next_op'], in_pipeline['label_next_op'])


with tf.Session() as sess:
    sess.run([tf.global_variables_initializer(), in_pipeline['iter_init_op']], feed_dict={file_names_ph: totrain_files})
    for step in tqdm(range(epoch_length)):
        # run train_op
        _ = sess.run(nodes['train_op'])
        # use saver to save weights
        if step % epoch_length == epoch_length - 1: #save last step
            # prepare args for simple_save
            in_dict = {
                'file_names': file_names_ph,
            }
            out_dict = {
                'predict': nodes['predict_op'],
                'diff_op': nodes['diff_op']
            }
            tf.saved_model.simple_save(sess, './savedmodel', in_dict, out_dict) # This is what you need, the pb file of the graph and variables are saved in savedmodel folder
# input pipeline for predict
# create list of file names: ['0.csv', '1.csv', ...]
topredict_files = [os.path.join('./predict/', f) for f in os.listdir('./predict/') if f.endswith('.csv')]
epoch_length = len(topredict_files)

# save prediction images to /results folder
if not os.path.exists('./results'):
    os.makedirs('./results')

# reset another graph as the default graph
graph2 = tf.Graph()
with graph2.as_default():
    with tf.Session() as sess:
        tf.saved_model.loader.load(
            sess,
            [tf.saved_model.tag_constants.SERVING], './savedmodel'
        ) # here's what you need
        # get operation and so on
        file_names_ph = graph2.get_tensor_by_name('file_name_ph:0')
        predict_tensor = graph2.get_tensor_by_name('Conv1/prediction:0')
        diff_tensor = graph2.get_tensor_by_name('Operations/difference:0')
        iter_init_op = graph2.get_operation_by_name('iter_init_op')

        sess.run(iter_init_op, feed_dict={file_names_ph: topredict_files})
        for step in tqdm(range(epoch_length)):
            predict, difference = sess.run([predict_tensor, diff_tensor])
            # then save your prediction and comparison
            ...
def input_pipeline(file_names_ph):
    # create new dataset for predict
    dataset = tf.data.Dataset.from_tensor_slices(file_names_ph)

    # apply list of file names to the py function wrapper for reading files
    dataset = dataset.map(_pyfn_wrapper, num_parallel_calls=mp.cpu_count()) # use the tf built-in csv reader or take a look how to use py_func:https://stackoverflow.com/questions/55363728/how-to-feed-h5-files-in-tf-data-pipeline-in-tensorflow-model

    # construct batch size
    dataset = dataset.batch(1).prefetch(mp.cpu_count())

    # initialize iterator
    iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
    iterator_initialize_op = iterator.make_initializer(dataset, name='iter_init_op')

    # get image and labels
    image_getnext_op, label_getnext_op = iterator.get_next()
    return {'img_next_op': image_getnext_op, 'label_next_op': label_getnext_op, 'iter_init_op': iterator_initialize_op}


def model(in_ds, out_ds):
    # a simple model
    with tf.name_scope("Conv1"):
        W = tf.get_variable("W", shape=[3, 3, 1, 1],
                             initializer=tf.contrib.layers.xavier_initializer())
        b = tf.get_variable("b", shape=[1], initializer=tf.contrib.layers.xavier_initializer())
        layer1 = tf.nn.conv2d(in_ds, W, strides=[1, 1, 1, 1], padding='SAME') + b
        prediction = tf.nn.relu(layer1, name='prediction')

    with tf.name_scope("Operations"):
        global_step = tf.Variable(0, name='global_step', trainable=False)
        loss = tf.reduce_mean(tf.losses.mean_squared_error(labels=out_ds, predictions=prediction), name='loss')
        train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss, name='train_op', global_step=global_step)
        difference_op = tf.cast(tf.equal(prediction, out_ds), dtype=tf.int32, name='difference')
    # I really like dictionary, it's easy to handle
    return {'global_step': global_step, 'loss': loss, 'train_op': train_op, 'diff_op': difference_op, 'predict_op': prediction}
预测部分:

# create list of file names: ['0.csv', '1.csv', ...]
totrain_features = [os.path.join('./TrainX/', f) for f in os.listdir('./TrainX/') if f.endswith('.csv')]
totrain_img = [os.path.join('./TrainLabels/', f) for f in os.listdir('./TrainLabels/') if f.endswith('.csv')]
epoch_length = len(totrain_features)

# model starts here
file_names_ph = tf.placeholder(tf.string, shape=(None), name='file_name_ph') #create a ph to put list of file paths
in_pipeline = input_pipeline(file_names_ph) # check standalone code below
nodes = model(in_pipeline['img_next_op'], in_pipeline['label_next_op'])


with tf.Session() as sess:
    sess.run([tf.global_variables_initializer(), in_pipeline['iter_init_op']], feed_dict={file_names_ph: totrain_files})
    for step in tqdm(range(epoch_length)):
        # run train_op
        _ = sess.run(nodes['train_op'])
        # use saver to save weights
        if step % epoch_length == epoch_length - 1: #save last step
            # prepare args for simple_save
            in_dict = {
                'file_names': file_names_ph,
            }
            out_dict = {
                'predict': nodes['predict_op'],
                'diff_op': nodes['diff_op']
            }
            tf.saved_model.simple_save(sess, './savedmodel', in_dict, out_dict) # This is what you need, the pb file of the graph and variables are saved in savedmodel folder
# input pipeline for predict
# create list of file names: ['0.csv', '1.csv', ...]
topredict_files = [os.path.join('./predict/', f) for f in os.listdir('./predict/') if f.endswith('.csv')]
epoch_length = len(topredict_files)

# save prediction images to /results folder
if not os.path.exists('./results'):
    os.makedirs('./results')

# reset another graph as the default graph
graph2 = tf.Graph()
with graph2.as_default():
    with tf.Session() as sess:
        tf.saved_model.loader.load(
            sess,
            [tf.saved_model.tag_constants.SERVING], './savedmodel'
        ) # here's what you need
        # get operation and so on
        file_names_ph = graph2.get_tensor_by_name('file_name_ph:0')
        predict_tensor = graph2.get_tensor_by_name('Conv1/prediction:0')
        diff_tensor = graph2.get_tensor_by_name('Operations/difference:0')
        iter_init_op = graph2.get_operation_by_name('iter_init_op')

        sess.run(iter_init_op, feed_dict={file_names_ph: topredict_files})
        for step in tqdm(range(epoch_length)):
            predict, difference = sess.run([predict_tensor, diff_tensor])
            # then save your prediction and comparison
            ...
def input_pipeline(file_names_ph):
    # create new dataset for predict
    dataset = tf.data.Dataset.from_tensor_slices(file_names_ph)

    # apply list of file names to the py function wrapper for reading files
    dataset = dataset.map(_pyfn_wrapper, num_parallel_calls=mp.cpu_count()) # use the tf built-in csv reader or take a look how to use py_func:https://stackoverflow.com/questions/55363728/how-to-feed-h5-files-in-tf-data-pipeline-in-tensorflow-model

    # construct batch size
    dataset = dataset.batch(1).prefetch(mp.cpu_count())

    # initialize iterator
    iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
    iterator_initialize_op = iterator.make_initializer(dataset, name='iter_init_op')

    # get image and labels
    image_getnext_op, label_getnext_op = iterator.get_next()
    return {'img_next_op': image_getnext_op, 'label_next_op': label_getnext_op, 'iter_init_op': iterator_initialize_op}


def model(in_ds, out_ds):
    # a simple model
    with tf.name_scope("Conv1"):
        W = tf.get_variable("W", shape=[3, 3, 1, 1],
                             initializer=tf.contrib.layers.xavier_initializer())
        b = tf.get_variable("b", shape=[1], initializer=tf.contrib.layers.xavier_initializer())
        layer1 = tf.nn.conv2d(in_ds, W, strides=[1, 1, 1, 1], padding='SAME') + b
        prediction = tf.nn.relu(layer1, name='prediction')

    with tf.name_scope("Operations"):
        global_step = tf.Variable(0, name='global_step', trainable=False)
        loss = tf.reduce_mean(tf.losses.mean_squared_error(labels=out_ds, predictions=prediction), name='loss')
        train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss, name='train_op', global_step=global_step)
        difference_op = tf.cast(tf.equal(prediction, out_ds), dtype=tf.int32, name='difference')
    # I really like dictionary, it's easy to handle
    return {'global_step': global_step, 'loss': loss, 'train_op': train_op, 'diff_op': difference_op, 'predict_op': prediction}

看看我是如何定义管道和模型的:

# create list of file names: ['0.csv', '1.csv', ...]
totrain_features = [os.path.join('./TrainX/', f) for f in os.listdir('./TrainX/') if f.endswith('.csv')]
totrain_img = [os.path.join('./TrainLabels/', f) for f in os.listdir('./TrainLabels/') if f.endswith('.csv')]
epoch_length = len(totrain_features)

# model starts here
file_names_ph = tf.placeholder(tf.string, shape=(None), name='file_name_ph') #create a ph to put list of file paths
in_pipeline = input_pipeline(file_names_ph) # check standalone code below
nodes = model(in_pipeline['img_next_op'], in_pipeline['label_next_op'])


with tf.Session() as sess:
    sess.run([tf.global_variables_initializer(), in_pipeline['iter_init_op']], feed_dict={file_names_ph: totrain_files})
    for step in tqdm(range(epoch_length)):
        # run train_op
        _ = sess.run(nodes['train_op'])
        # use saver to save weights
        if step % epoch_length == epoch_length - 1: #save last step
            # prepare args for simple_save
            in_dict = {
                'file_names': file_names_ph,
            }
            out_dict = {
                'predict': nodes['predict_op'],
                'diff_op': nodes['diff_op']
            }
            tf.saved_model.simple_save(sess, './savedmodel', in_dict, out_dict) # This is what you need, the pb file of the graph and variables are saved in savedmodel folder
# input pipeline for predict
# create list of file names: ['0.csv', '1.csv', ...]
topredict_files = [os.path.join('./predict/', f) for f in os.listdir('./predict/') if f.endswith('.csv')]
epoch_length = len(topredict_files)

# save prediction images to /results folder
if not os.path.exists('./results'):
    os.makedirs('./results')

# reset another graph as the default graph
graph2 = tf.Graph()
with graph2.as_default():
    with tf.Session() as sess:
        tf.saved_model.loader.load(
            sess,
            [tf.saved_model.tag_constants.SERVING], './savedmodel'
        ) # here's what you need
        # get operation and so on
        file_names_ph = graph2.get_tensor_by_name('file_name_ph:0')
        predict_tensor = graph2.get_tensor_by_name('Conv1/prediction:0')
        diff_tensor = graph2.get_tensor_by_name('Operations/difference:0')
        iter_init_op = graph2.get_operation_by_name('iter_init_op')

        sess.run(iter_init_op, feed_dict={file_names_ph: topredict_files})
        for step in tqdm(range(epoch_length)):
            predict, difference = sess.run([predict_tensor, diff_tensor])
            # then save your prediction and comparison
            ...
def input_pipeline(file_names_ph):
    # create new dataset for predict
    dataset = tf.data.Dataset.from_tensor_slices(file_names_ph)

    # apply list of file names to the py function wrapper for reading files
    dataset = dataset.map(_pyfn_wrapper, num_parallel_calls=mp.cpu_count()) # use the tf built-in csv reader or take a look how to use py_func:https://stackoverflow.com/questions/55363728/how-to-feed-h5-files-in-tf-data-pipeline-in-tensorflow-model

    # construct batch size
    dataset = dataset.batch(1).prefetch(mp.cpu_count())

    # initialize iterator
    iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
    iterator_initialize_op = iterator.make_initializer(dataset, name='iter_init_op')

    # get image and labels
    image_getnext_op, label_getnext_op = iterator.get_next()
    return {'img_next_op': image_getnext_op, 'label_next_op': label_getnext_op, 'iter_init_op': iterator_initialize_op}


def model(in_ds, out_ds):
    # a simple model
    with tf.name_scope("Conv1"):
        W = tf.get_variable("W", shape=[3, 3, 1, 1],
                             initializer=tf.contrib.layers.xavier_initializer())
        b = tf.get_variable("b", shape=[1], initializer=tf.contrib.layers.xavier_initializer())
        layer1 = tf.nn.conv2d(in_ds, W, strides=[1, 1, 1, 1], padding='SAME') + b
        prediction = tf.nn.relu(layer1, name='prediction')

    with tf.name_scope("Operations"):
        global_step = tf.Variable(0, name='global_step', trainable=False)
        loss = tf.reduce_mean(tf.losses.mean_squared_error(labels=out_ds, predictions=prediction), name='loss')
        train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss, name='train_op', global_step=global_step)
        difference_op = tf.cast(tf.equal(prediction, out_ds), dtype=tf.int32, name='difference')
    # I really like dictionary, it's easy to handle
    return {'global_step': global_step, 'loss': loss, 'train_op': train_op, 'diff_op': difference_op, 'predict_op': prediction}

您可以检查以下问题:。他们解释得很清楚。嗨,我试过你的代码,但它说,
名称“准确性操作”没有定义
这意味着我必须再次执行该代码??我想知道我必须执行主功能中的所有代码,以使用保存的模型,如准确性操作或正确的预测。。