Python 我如何使用TFLearn与培训指标(当前验证准确度、培训准确度等)交互?

Python 我如何使用TFLearn与培训指标(当前验证准确度、培训准确度等)交互?,python,tensorflow,callback,tflearn,Python,Tensorflow,Callback,Tflearn,是TFLearn文档中的一个示例。它显示了如何使用TFLearn培训师和常规Tensorflow图结合TFLearn和Tensorflow。但是,当前的培训、测试和验证精度计算无法访问 import tensorflow as tf import tflearn ... # User defined placeholders with tf.Graph().as_default(): # Placeholders for data and labels X = tf

是TFLearn文档中的一个示例。它显示了如何使用TFLearn培训师和常规Tensorflow图结合TFLearn和Tensorflow。但是,当前的培训、测试和验证精度计算无法访问

import tensorflow as tf
import tflearn
    ...   
# User defined placeholders
with tf.Graph().as_default():
    # Placeholders for data and labels
    X = tf.placeholder(shape=(None, 784), dtype=tf.float32)
    Y = tf.placeholder(shape=(None, 10), dtype=tf.float32)

    net = tf.reshape(X, [-1, 28, 28, 1])

    # Using TFLearn wrappers for network building
    net = tflearn.conv_2d(net, 32, 3, activation='relu')
    .
    .
    .
    net = tflearn.fully_connected(net, 10, activation='linear')

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=net, 
            labels=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)

    # Initializing the variables
    ...
    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)
    ...
        for epoch in range(2):  # 2 epochs
    ...
            for i in range(total_batch):
                batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
                sess.run(optimizer, feed_dict={X: batch_xs, Y: batch_ys})
如何访问嵌套FOR循环中每个步骤的计算训练和验证精度


为清晰起见更新:

解决方案可能如下:使用Trainer类的fit_batch方法,我相信我正在计算嵌套循环期间的训练和验证精度

此代码是否计算模型列车的运行精度? 有没有更好的方法来使用TFLearn

我知道tensorboard使用这些值。我可以从事件日志中检索这些值吗

def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
                / predictions.shape[0])
...    
network = input_data(shape=[None, image_size, image_size, num_channels],
                     data_preprocessing=feature_normalization,
                     data_augmentation=None,
                     name='input_d')
.
.
.
network = regression(network, optimizer='SGD',
                    loss='categorical_crossentropy',
                    learning_rate=0.05, name='targets')

model_dnn_tr = tflearn.DNN(network, tensorboard_verbose=0)
...
with tf.Session(graph=graph) as session:
...
    for step in range(num_steps):
    ...
        batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
        batch_labels = train_labels[offset:(offset + batch_size), :]

        loss = model_dnn_tr.fit_batch({'input_d' : batch_data}, {'targets': 
            batch_labels})

        if (step % 50 == 0):
            trainAccr = accuracy(model_dnn_tr.predict({'input_d' : 
                batch_data}), batch_labels)

            validAccr = accuracy(model_dnn_tr.predict({'input_d' : 
                valid_dataset}), valid_labels)

testAccr = accuracy(model_dnn_tr.predict({'input_d' : test_dataset}), 
    test_labels)
用正确的答案更新 我可以从事件日志中检索值吗

def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
                / predictions.shape[0])
...    
network = input_data(shape=[None, image_size, image_size, num_channels],
                     data_preprocessing=feature_normalization,
                     data_augmentation=None,
                     name='input_d')
.
.
.
network = regression(network, optimizer='SGD',
                    loss='categorical_crossentropy',
                    learning_rate=0.05, name='targets')

model_dnn_tr = tflearn.DNN(network, tensorboard_verbose=0)
...
with tf.Session(graph=graph) as session:
...
    for step in range(num_steps):
    ...
        batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
        batch_labels = train_labels[offset:(offset + batch_size), :]

        loss = model_dnn_tr.fit_batch({'input_d' : batch_data}, {'targets': 
            batch_labels})

        if (step % 50 == 0):
            trainAccr = accuracy(model_dnn_tr.predict({'input_d' : 
                batch_data}), batch_labels)

            validAccr = accuracy(model_dnn_tr.predict({'input_d' : 
                valid_dataset}), valid_labels)

testAccr = accuracy(model_dnn_tr.predict({'input_d' : test_dataset}), 
    test_labels)
Tensorboard确实有下载精度数据集的方法,但在培训期间使用它是有问题的

该代码是否计算模型列车的运行精度

总之,是的

fit_batch方法的工作原理与人们预期的一样;我在下面发布的初始解决方案也是如此

但是,两者都不是规定的方法

在TFLearn中是否有更好的方法实现这一点?

对!

为了跟踪培训指标并与之互动,应实施功能

from tflearn import callbacks as cb

class BiasVarianceStrategyCallback(cb.Callback):
    def __init__(self, train_acc_thresh,run_id,rel_err=.1):
        """ Note: We are free to define our init function however we please. """
        def errThrshld(Tran_accuracy=train_acc_thresh,relative_err=rel_err):
            Tran_err = round(1-Tran_accuracy,2)
            Test_err = ...
            Vald_err = ...
            Diff_err = ...
            return {'Tr':Tran_err,'Vl':Vald_err,'Ts':Test_err,'Df':Diff_err}
        return

    def update_acc_df(self,training_state,state):
        ...
        return
    def on_epoch_begin(self, training_state):
        """ """
        ...
        variance_found = ...
        if trn_acc_stall or vld_acc_stall:
            print("accuracy increase stalled. training epoch:"...
            if trn_lss_mvNup or vld_lss_mvNup:
                print("loss began increase training:"...
                raise StopIteration
                return
            if variance_found or bias_found:
                print("bias:",bias_found,"variance:",variance_found)
                raise StopIteration
                return
        return
    def on_batch_end(self, training_state, snapshot=False):
        self.update_acc_df(training_state,"batch")
        return
    def on_epoch_end(self, training_state):
        self.update_acc_df(training_state,"epoch")
        return
    def on_train_end(self, training_state):
        self.update_acc_df(training_state,"train")
        self.df = self.df.iloc[0:0]
        return

初始解 到目前为止,我找到的最令人满意的解决方案是:

  • 使用dataset对象和迭代器提供数据
  • 与OP中的fit_batch方法没有太大区别

    def accuracy(predictions, labels):
        return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
                    / predictions.shape[0])   
    ...
    
    graph = tf.Graph()
    
    with graph.as_default():
    ...
    
        # create a placeholder to dynamically switch between
        # validation and training batch sizes
        batch_size_x = tf.placeholder(tf.int64)
    
        data_placeholder =  tf.placeholder(tf.float32, 
            shape=(None, image_size, image_size, num_channels))
    
        labels_placeholder = tf.placeholder(tf.float32, shape=(None, num_labels))
    
        # create dataset: one for training and one for test etc
        dataset = tf.data.Dataset.from_tensor_slices((data_placeholder,labels_placeholder)).batch(batch_size_x).repeat()
    
    
        # create a iterator
        iterator = tf.data.Iterator.from_structure(dataset.output_types, dataset.output_shapes)
        # get the tensor that will contain data
        feature, label = iterator.get_next()
    
        # create the initialisation operations
        init_op = iterator.make_initializer(dataset)   
    
        valid_data_x = tf.constant(valid_data)
        test_data_x = tf.constant(test_data)
    
        # Model.
    
        network = input_data(shape=[None, image_size, image_size, num_channels],
                            placeholder=data_placeholder,
                            data_preprocessing=feature_normalization,
                            data_augmentation=None,
                            name='input_d')
        .
        .
        .
        logits = fully_connected(network,...
    
        # Training computation.
        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels_placeholder,logits=logits))
        # Optimizer.
        optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
    
        prediction = tf.nn.softmax(logits) 
    ...
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        # initialise iterator with train data
        feed_dict =  {data_placeholder: train_data,
                  labels_placeholder: train_data_labels,
                  batch_size_x: batch_size}
       session.run(init_op, feed_dict = feed_dict)
    
       for step in range(num_steps):
    
           batch_data,batch_labels = session.run( [feature, label], feed_dict = 
            feed_dict )
    
            feed_dict2 = {data_placeholder: batch_data, labels_placeholder: batch_labels}
            _, l, predictions = session.run([optimizer, loss, prediction],
                                       feed_dict=feed_dict2)
            if (step % 50 == 0):
    
                trainAccrMb = accuracy(predictions, batch_labels)
    
                feed_dict = {data_placeholder: valid_data_x.eval(), labels_placeholder: valid_data_labels }
                valid_prediction = session.run(prediction,
                                            feed_dict=feed_dict)
                validAccr= accuracy(valid_prediction, valid_data_labels)
    
    feed_dict = {data_placeholder: test_data_x.eval(), labels_placeholder: 
        test_data_labels }#, batch_size_x: len(valid_data)}
    
    test_prediction = session.run(prediction,
                                    feed_dict=feed_dict)
    
    testAccr = accuracy(test_prediction, test_data_labels)
    

你应该提出更具体的问题。你要求我们为你编写代码。答案应该是大约3-4行代码。如果你理解了这个问题,它是非常具体的