Python 如何检查我的Tensorflow代码是否正确?

Python 如何检查我的Tensorflow代码是否正确?,python,tensorflow,conv-neural-network,Python,Tensorflow,Conv Neural Network,以下是来自 参考上述解释,我写下了如下代码: import os import tensorflow as tf import numpy as np # hyper-params learning_rate = 0.0002 epochs = 250 batch_size = 16 N_w = 11 #number of frames concatenated together channels = 9*N_w drop_out = [0.5, 0.5, 0.5, 0, 0, 0,

以下是来自

参考上述解释,我写下了如下代码:

import os
import tensorflow as tf
import numpy as np

# hyper-params 
learning_rate = 0.0002
epochs = 250
batch_size = 16
N_w = 11 #number of frames concatenated together 
channels = 9*N_w
drop_out = [0.5, 0.5, 0.5, 0, 0, 0, 0, 0]

# input_tensor X  
X = tf.placeholder(tf.float32, [batch_size, 256, 256, channels]) # batch_size x Height x Width x N_w 

def conv_down(x, N, stride, count): #Conv [4x4, str_2] > Batch_Normalization > Leaky_ReLU
    with tf.variable_scope("conv_down_{}_{}".format(N, count)) : #N == depth of tensor 
        with tf.variable_scope("conv_down_4x4_str{}".format(stride)) : #this's used for downsampling
            x = tf.layers.conv2d(x, N, kernel_size=4, strides=stride, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
            x = tf.contrib.layers.batch_norm(x) 
            x = tf.nn.relu(x) #change it into leaky_relu in version 1.8 : now in 1.1
    return x

def conv_up(x, N, drop_rate, stride, count): #Conv_transpose [4x4, str_2] > Batch_Normalizaiton > DropOut > ReLU
    with tf.variable_scope("conv_up_{}_{}".format(N, count)) : #N == depth of tensor
        with tf.variable_scope("conv_up_4x4_str{}".format(stride)) :
            x = tf.layers.conv2d_transpose(x, N, kernel_size=4, strides=stride, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
            x = tf.contrib.layers.batch_norm(x)
            if drop_rate is not 0:
                x = tf.nn.dropout(x, keep_prob=drop_rate)
            x = tf.nn.relu(x)
        return x

def conv_refine1(x, N, drop_rate, count): #Conv [3x3, str_1] > Batch_Normalization > DropOut > ReLU
    with tf.variable_scope("conv_refine_1_{}_{}".format(N, count)) :
        with tf.variable_scope("conv_refine_3x3_str1") :
            x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
            x = tf.contrib.layers.batch_norm(x)
            if drop_rate is not 0:
                x = tf.nn.dropout(x, keep_prob=drop_rate)
            x = tf.nn.relu(x)
        return x 

def conv_refine2(x, N, drop_rate, count): #Conv [3x3, str_1] > Batch_Normalization > DropOut > ReLU
    with tf.variable_scope("conv_refine_2_{}_{}".format(N, count)) :
        with tf.variable_scope("conv_refine_3x3_str1") :
            x = tf.layers.conv2d(x, N, kernel_size=3, strides=1, padding='same', kernel_initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.2)))
            x = tf.contrib.layers.batch_norm(x)
            if drop_rate is not 0:
                x = tf.nn.dropout(x, keep_prob=drop_rate)
            x = tf.nn.relu(x)
        return x    

def conv_upsample(x, N, drop_rate, stride, count):
    with tf.variable_scope("conv_upsamp_{}_{}".format(N,count)) :
        x = conv_up(x, 2*N, drop_rate, stride, count)
        x = conv_refine1(x, N, drop_rate, count)
        x = conv_refine2(x, N, drop_rate, count)
    return x 

def biLinearDown(x, N):
    return tf.image.resize_images(x, [N, N])

def finalTanH(x):
    return tf.nn.tanh(x)

def T(x):

    #channel_output_structure
    down_channel_output = [64, 128, 256, 512, 512, 512, 512, 512]
    up_channel_output= [512, 512, 512, 512, 256, 128, 64, 3]
    biLinearDown_output= [32, 64, 128] #for skip-connection 

    #down_sampling
    conv1 = conv_down(x, down_channel_output[0], 2, 1)
    conv2 = conv_down(conv1, down_channel_output[1], 2, 2)
    conv3 = conv_down(conv2, down_channel_output[2], 2, 3)
    conv4 = conv_down(conv3, down_channel_output[3], 1, 4)
    conv5 = conv_down(conv4, down_channel_output[4], 1, 5)
    conv6 = conv_down(conv5, down_channel_output[5], 1, 6)
    conv7 = conv_down(conv6, down_channel_output[6], 1, 7)
    conv8 = conv_down(conv7, down_channel_output[7], 1, 8)

    #upsampling 
    dconv1 = conv_upsample(conv8, up_channel_output[0], drop_out[0], 1, 1)
    dconv2 = conv_upsample(dconv1, up_channel_output[1], drop_out[1], 1, 2)
    dconv3 = conv_upsample(dconv2, up_channel_output[2], drop_out[2], 1, 3)
    dconv4 = conv_upsample(dconv3, up_channel_output[3], drop_out[3], 1, 4)
    dconv5 = conv_upsample(dconv3, up_channel_output[4], drop_out[4], 1, 5)
    dconv6 = conv_upsample(tf.concat([dconv5, biLinearDown(x, biLinearDown_output[0])], axis=3), up_channel_output[5], drop_out[5], 2, 6)
    dconv7 = conv_upsample(tf.concat([dconv6, biLinearDown(x, biLinearDown_output[1])], axis=3), up_channel_output[6], drop_out[6], 2, 7)
    dconv8 = conv_upsample(tf.concat([dconv7, biLinearDown(x, biLinearDown_output[2])], axis=3), up_channel_output[7], drop_out[7], 2, 8)

    #final_tanh
    T_x = finalTanH(dconv8)

    return T_x

sheudo_np = np.random.uniform(low=-1., high=1., size=[16, 256,256, 11])

sheudo_input = tf.Variable(np.float32(sheudo_np))

T_x = T(sheudo_input)
最后一个变量
T_x
的形状是
(16,256,256,3)
,因此我认为维度是可以的。如何进一步检查网络结构是否编码良好,而不仅仅是维度


有没有使用tensorflow进行深度学习的从业者指南?

是的-欢迎来到tensorflow的世界,在这里,一切都离你很遥远,你几乎不知道它是否真的在做你想做的事情。我认为这一直是对该平台的一个大抱怨,而且这个平台没有恒星工作区。尽管如此,这里还是列出了一个非详尽的选项列表,用于了解您的人际网络内部发生了什么

  • 对您感兴趣的所有变量调用sess.run。
    • 这将把流经你的网络的张量打印成易于打印和阅读的numpy数组
  • 为您感兴趣的所有观察对象创建张力板图。
    • 我建议将每个损失放入张力板图,将梯度放入张力板图,将权重之和放入张力板图(如果不是每层,则至少是整个网络)。这些将向您展示培训过程中任何感兴趣对象的进度。我认为这是非常宝贵的信息,有助于深入了解引擎盖下正在发生的事情
  • Tensorflow的急切模式执行允许您实时查看图形中的值。
    • 我承认我还没有使用过它,因为TFAPI太多了,我一直在努力学习,我只是还没有机会。据我所知,它工作得相当好,但有点有限。希望我能提供更多,但事实就是这样
  • 最后,我喜欢制作简单的测试脚本,以仔细检查我的所有重塑/平铺/连接操作是否完全按照我期望的方式进行

    • 这意味着创建一个简单的脚本,如:

      • A=tf.常数([1,2],[3,4]);B=tf.重塑(A,[-1]);sess=tf.Session();打印(sess.run(B))
    • 这是一个很小的例子,但你明白我的意思。有时,转置+平铺+重塑+其他所有操作的结果都会与您预期的不同,因此这是一种简单的方法,可以再次检查图形中的操作是否正确

调试愉快

使用Tensorflow调试器。它显示了所有张量的值和形状。允许您单步浏览图形。最后,有漂亮的过滤器。