Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/309.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Tensorboard:找不到命名范围_Python_Python 3.x_Tensorflow_Deep Learning_Tensorboard - Fatal编程技术网

Python Tensorboard:找不到命名范围

Python Tensorboard:找不到命名范围,python,python-3.x,tensorflow,deep-learning,tensorboard,Python,Python 3.x,Tensorflow,Deep Learning,Tensorboard,我有一个我命名为“Pred/accurity”的范围,在Tensorboard中似乎找不到。稍后我将包括我的全部代码,但具体来说,在我对成本函数的定义中,我有: def compute_cost(z, Y, parameters, l2_reg=False): with tf.name_scope('cost'): logits = tf.transpose(z) labels = tf.transpose(Y) cost = tf.reduce_mean(tf.nn

我有一个我命名为“Pred/accurity”的范围,在Tensorboard中似乎找不到。稍后我将包括我的全部代码,但具体来说,在我对成本函数的定义中,我有:

def compute_cost(z, Y, parameters, l2_reg=False):

with tf.name_scope('cost'):
    logits = tf.transpose(z)
    labels = tf.transpose(Y)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, 
                                                                  labels = labels))
    if l2_reg == True:

        reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

        cost = cost + tf.reduce_sum(reg)

with tf.name_scope('Pred/Accuracy'):

    prediction=tf.argmax(z)
    correct_prediction = tf.equal(tf.argmax(z), tf.argmax(Y))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

return cost, prediction, accuracy
但在tensorboard上,即使单击成本块,我也看不到:

下面是我的全部代码,不包括导入/预处理数据

# Create X and Y placeholders

def create_xy_placeholder(n_x, n_y):
    X = tf.placeholder(tf.float32, shape = [n_x, None], name = 'X')
    Y = tf.placeholder(tf.float32, shape = [n_y, None], name = 'Y')

    return X, Y


# initialize parameters hidden layers

def initialize_parameters(n_x, scale, hidden_units):

    hidden_units= [n_x] + hidden_units
    parameters = {}
    regularizer = tf.contrib.layers.l2_regularizer(scale)

    for i in range(0, len(hidden_units[1:])):
        with tf.variable_scope('hidden_parameters_'+str(i+1)):
            w = tf.get_variable("W"+str(i+1), [hidden_units[i+1], hidden_units[i]], 
                                    initializer=tf.contrib.layers.xavier_initializer(),
                                    regularizer=regularizer)

            b = tf.get_variable("b"+str(i+1), [hidden_units[i+1], 1], 
                                    initializer = tf.constant_initializer(0.1))

            parameters.update({"W"+str(i+1): w})
            parameters.update({"b"+str(i+1): b})

    return parameters


# forward progression with batch norm and dropout

def forward_propagation(X, parameters, batch_norm=False, keep_prob=1):

    a_new = X   

    for i in range(0, int(len(parameters)/2)-1):

        with tf.name_scope('forward_pass_'+str(i+1)):

            w = parameters['W'+str(i+1)]
            b = parameters['b'+str(i+1)]

            z = tf.matmul(w, a_new) + b

            if batch_norm == True:
                z = tf.layers.batch_normalization(z, momentum=0.99, axis=0)

            a = tf.nn.relu(z)

            if keep_prob < 1:
                a = tf.nn.dropout(a, keep_prob)  

            a_new = a

            tf.summary.histogram('act_'+str(i+1), a_new)

    # calculating final Z before input into cost as logit 

    with tf.name_scope('forward_pass_'+str(int(len(parameters)/2))):
        w = parameters['W'+str(int(len(parameters)/2))]
        b = parameters['b'+str(int(len(parameters)/2))]

        z = tf.matmul(w, a_new) + b

        if batch_norm == True:
                z = tf.layers.batch_normalization(z, momentum=0.99, axis=0)

    return z

# compute cost with option for l2 regularizatoin

def compute_cost(z, Y, parameters, l2_reg=False):

    with tf.name_scope('cost'):
        logits = tf.transpose(z)
        labels = tf.transpose(Y)

        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, 
                                                                      labels = labels))
        if l2_reg == True:

            reg = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)

            cost = cost + tf.reduce_sum(reg)

    with tf.name_scope('Pred/Accuracy'):

        prediction=tf.argmax(z)
        correct_prediction = tf.equal(tf.argmax(z), tf.argmax(Y))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    return cost, prediction, accuracy


# defining the model (need to add keep_prob for dropout)

def model(X_train, Y_train, X_test, Y_test, 
          hidden_units=[30, 50, 50, 30, 4],            # hidden units/layers
          learning_rate = 0.0001,                       # Learning rate
          num_epochs = 2000, minibatch_size = 30,       # minibatch/ number epochs
          keep_prob=0.5,                                # dropout
          batch_norm=True,                              # batch normalization
          l2_reg=True, scale = 0.01,                    # L2 regularization/scale is lambda
          print_cost = True):

    ops.reset_default_graph()                         # to be able to rerun the model without overwriting tf variables
    tf.set_random_seed(1)                             # to keep consistent results
    seed = 3                                          # to keep consistent results
    (n_x, m) = X_train.shape                          # (n_x: input size, m : number of examples in the train set)
    n_y = Y_train.shape[0]                            # n_y : output size
    costs = []                                        # To keep track of the cost

    logs_path = '/tmp/tensorflow_logs/example/'

    # Create Placeholders of shape (n_x, n_y)
    X, Y = create_xy_placeholder(n_x, n_y)

    # Initialize parameters
    parameters = initialize_parameters(n_x, scale, hidden_units)

    # Forward propagation: Build the forward propagation in the tensorflow graph
    z = forward_propagation(X, parameters, keep_prob, batch_norm)

    # Cost function: Add cost function to tensorflow graph
    cost, prediction, accuracy = compute_cost(z, Y, parameters, l2_reg)

    # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
    with tf.name_scope('optimizer'):

        optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)

        # Op to calculate every variable gradient
        grads = tf.gradients(cost, tf.trainable_variables())
        grads = list(zip(grads, tf.trainable_variables()))
        # Op to update all variables according to their gradient
        apply_grads = optimizer.apply_gradients(grads_and_vars = grads)    


    # Initialize all the variables
    init = tf.global_variables_initializer()

    # to view in tensorboard
    tf.summary.scalar('loss', cost)
    tf.summary.scalar('accuracy', accuracy)

    # Create summaries to visualize weights
    for var in tf.trainable_variables():
        tf.summary.histogram(var.name, var)
    # Summarize all gradients
    for grad, var in grads:
        tf.summary.histogram(var.name + '/gradient', grad)

    merged_summary_op = tf.summary.merge_all()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    # Start the session to compute the tensorflow graph
    with tf.Session(config=config) as sess:
        # Run the initialization
        sess.run(init)

        # define writer
        summary_writer = tf.summary.FileWriter(logs_path, 
                                               graph=tf.get_default_graph())

        # Do the training loop
        for epoch in range(num_epochs):

            epoch_cost = 0.                       # Defines a cost related to an epoch
            num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
            seed = seed + 1
            minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)

            count = 0

            for minibatch in minibatches:

                # Select a minibatch
                (minibatch_X, minibatch_Y) = minibatch

                # IMPORTANT: The line that runs the graph on a minibatch.
                # Run the session to execute the "optimizer" and the "cost", the feedict should contain a minibatch for (X,Y).

                _ , minibatch_cost, summary = sess.run([apply_grads, cost, 
                                                        merged_summary_op], 
                                              feed_dict = {X: minibatch_X, Y: minibatch_Y})

                epoch_cost += minibatch_cost / num_minibatches

                # Write logs at every iteration
                summary_writer.add_summary(summary, epoch * num_minibatches + count)

                count += 1

            # Print the cost every epoch
            if print_cost == True and epoch % 100 == 0:
                print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
                prediction1=tf.argmax(z)
#                print('Z5: ', Z5.eval(feed_dict={X: minibatch_X, Y: minibatch_Y}))
                print('prediction: ', prediction1.eval(feed_dict={X: minibatch_X, 
                                                                  Y: minibatch_Y}))

                correct1=tf.argmax(Y)
#                print('Y: ', Y.eval(feed_dict={X: minibatch_X, 
#                                                            Y: minibatch_Y}))
                print('correct: ', correct1.eval(feed_dict={X: minibatch_X, 
                                                            Y: minibatch_Y}))

            if print_cost == True and epoch % 5 == 0:
                costs.append(epoch_cost)

        # plot the cost
        plt.plot(np.squeeze(costs))
        plt.ylabel('cost')
        plt.xlabel('iterations (per tens)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

        # lets save the parameters in a variable
        parameters = sess.run(parameters)
        print ("Parameters have been trained!")

        # Calculate the correct predictions
        correct_prediction = tf.equal(tf.argmax(z), tf.argmax(Y))

        # Calculate accuracy on the test set

        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
        print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))

        print("Run the command line:\n" \
          "--> tensorboard --logdir=/tmp/tensorflow_logs " \
          "\nThen open http://0.0.0.0:6006/ into your web browser")



        return parameters


# run model on test data


parameters = model(x_train, y_train, x_test, y_test, keep_prob=1)
#创建X和Y占位符
def创建_xy_占位符(n_x,n_y):
X=tf.placeholder(tf.float32,shape=[n_X,None],name='X')
Y=tf.placeholder(tf.float32,shape=[n_-Y,None],name='Y')
返回X,Y
#初始化隐藏层的参数
def初始化_参数(n_x、比例、隐藏单位):
隐藏单位=[n_x]+隐藏单位
参数={}
正则化器=tf.contrib.layers.l2_正则化器(比例)
对于范围内的i(0,len(隐藏单位[1:]):
使用tf.variable_scope('hidden_parameters_u'+str(i+1)):
w=tf.get_变量(“w”+str(i+1),[hidden_units[i+1],hidden_units[i]],
initializer=tf.contrib.layers.xavier_initializer(),
正则化器=正则化器)
b=tf.get_变量(“b”+str(i+1),[hidden_units[i+1],1],
初始值设定项=tf.常数(初始值设定项(0.1))
parameters.update({“W”+str(i+1):W})
parameters.update({“b”+str(i+1):b})
返回参数
#具有批处理范数和退出的正向前进
def正向传播(X,参数,批处理规范=False,保持概率=1):
a_new=X
对于范围(0,int(len(参数)/2)-1内的i:
使用tf.name\u范围('forward\u pass\u'+str(i+1)):
w=参数['w'+str(i+1)]
b=参数['b'+str(i+1)]
z=tf.matmul(w,a_新)+b
如果批次_norm==真:
z=tf.layers.batch_归一化(z,动量=0.99,轴=0)
a=tf.nn.relu(z)
如果保持_prob<1:
a=tf.nn.辍学(a,保持概率)
a_new=a
tf.summary.histogram('act_'+str(i+1),a_新)
#在作为logit输入成本之前计算最终Z
使用tf.name\u scope('forward\u pass\u'+str(int(len(parameters)/2)):
w=参数['w'+str(int(len(参数)/2))]
b=参数['b'+str(int(len(parameters)/2))]
z=tf.matmul(w,a_新)+b
如果批次_norm==真:
z=tf.layers.batch_归一化(z,动量=0.99,轴=0)
返回z
#使用l2正则化选项计算成本
def计算成本(z,Y,参数,l2_reg=False):
使用tf.name\u范围(“成本”):
logits=tf.转置(z)
标签=tf.转置(Y)
成本=tf.减少平均值(tf.nn.softmax)交叉熵,
标签=标签)
如果l2_reg==真:
reg=tf.get\u收集(tf.GraphKeys.regulation\u损失)
成本=成本+tf.减少金额(reg)
使用tf.name\u范围(“预测/准确度”):
预测=tf.argmax(z)
正确的预测=tf.equal(tf.argmax(z),tf.argmax(Y))
准确度=tf.reduce\u平均值(tf.cast(正确的预测,“浮动”))
退货成本、预测、准确性
#定义模型(需要为退出添加keep_prob)
def型号(X_系列、Y_系列、X_测试、Y_测试、,
隐藏单位=[30,50,50,30,4],#隐藏单位/层
学习率=0.0001,#学习率
num_epochs=2000,miniback_size=30,#miniback/number epochs
保持概率=0.5,#退出
批次规格=真,#批次规格化
l2#u reg=True,比例=0.01,#l2正则化/比例为λ
打印成本=真):
ops.reset_default_graph()#能够在不覆盖tf变量的情况下重新运行模型
tf.设置_random_seed(1)#以保持结果一致
种子=3#以保持一致的结果
(n_x,m)=x_train.shape(n_x:输入大小,m:序列集中的示例数)
n_y=y_列。形状[0]\n_y:输出大小
成本=[]以跟踪成本
logs_path='/tmp/tensorflow_logs/example/'
#创建形状的占位符(n_x,n_y)
十、 Y=创建_xy_占位符(n_X,n_Y)
#初始化参数
参数=初始化参数(n×、比例、隐藏单位)
#正向传播:在tensorflow图中构建正向传播
z=正向传播(X,参数,保持概率,批次规范)
#成本函数:将成本函数添加到tensorflow图
成本、预测、精度=计算成本(z、Y、参数、l2_reg)
#反向传播:定义tensorflow优化器。使用AdamOptimizer。
使用tf.name_作用域(“优化器”):
优化器=tf.train.AdamOptimizer(学习率=学习率)
#Op计算每个变量的梯度
梯度=tf.梯度(成本,tf.可训练的变量()
grads=list(zip(grads,tf.trainable_variables())
#Op根据其梯度更新所有变量
应用梯度=优化器。应用梯度(梯度和变量=梯度)
#初始化所有变量
init=tf.global_variables_initializer()
#在张力板上观看
tf.汇总.标量(‘损失’、成本)
tf.汇总.标量('准确度',准确度)
#创建摘要以可视化权重
对于tf.trainable_variables()中的变量:
tf.汇总.直方图(变量名称,变量)
#总结所有梯度
对于梯度,梯度中的var:
tf.summary.histogram(变量名+'/梯度',梯度)
merged\u summary\u op=tf.summary.merge\u all()
config=tf.ConfigProto()
config.gpu\u options.allow\u gro