Python 基于线性回归的Tensorflow图像分割

Python 基于线性回归的Tensorflow图像分割,python,machine-learning,neural-network,artificial-intelligence,tensorflow,Python,Machine Learning,Neural Network,Artificial Intelligence,Tensorflow,之前,我构建了一个实现二值图像分割的网络——前景和背景。我做了两个分类。现在,我想对每个像素进行线性回归,而不是二进制分类 假设图像视图中有一个3D曲面,我想用线性值10分割该曲面的正中间。比如说,曲面的边缘是5。当然,中间的所有体素都在5-10范围内。然后,随着体素从曲面移开,值会迅速降为零 在二值分类中,我有一张前景位置为1的图像和背景位置为1的图像——换句话说,分类:)现在我只想有一张具有如下值的地面真值图像 通过这个线性回归示例,我假设可以简单地将成本函数更改为最小二乘函数--cost

之前,我构建了一个实现二值图像分割的网络——前景和背景。我做了两个分类。现在,我想对每个像素进行线性回归,而不是二进制分类

假设图像视图中有一个3D曲面,我想用线性值10分割该曲面的正中间。比如说,曲面的边缘是5。当然,中间的所有体素都在5-10范围内。然后,随着体素从曲面移开,值会迅速降为零

在二值分类中,我有一张前景位置为1的图像和背景位置为1的图像——换句话说,分类:)现在我只想有一张具有如下值的地面真值图像

通过这个线性回归示例,我假设可以简单地将成本函数更改为最小二乘函数--
cost=tf.square(y-pred)
。当然,我会改变事实真相

然而,当我这样做时,我的预测输出
NaN
。最后一层是矩阵权重值乘以最终输出的线性和。我猜这与此有关?我无法将其设置为
tf.nn.softmax()
函数,因为这将规范化0和1之间的值

所以我相信
cost=tf.square(y-pred)
是问题的根源。我下一次试过这个<代码>成本=tf。减少总和(tf.square(y-pred))但这不起作用

然后我尝试了这个(推荐的)
cost=tf.reduce_sum(tf.pow(pred-y,2))/(2*批量大小)
,但没有成功

我应该以不同的方式初始化权重吗?标准化权重

完整代码如下所示:

import tensorflow as tf
import pdb
import numpy as np
from numpy import genfromtxt
from PIL import Image
from tensorflow.python.ops import rnn, rnn_cell
from tensorflow.contrib.learn.python.learn.datasets.scroll import scroll_data

# Parameters
learning_rate = 0.001
training_iters = 1000000
batch_size = 2
display_step = 1

# Network Parameters
n_input_x = 396 # Input image x-dimension
n_input_y = 396 # Input image y-dimension
n_classes = 1 # Binary classification -- on a surface or not
n_steps = 396
n_hidden = 128
n_output = n_input_y * n_classes

dropout = 0.75 # Dropout, probability to keep units

# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input_x, n_input_y])
y = tf.placeholder(tf.float32, [None, n_input_x * n_input_y], name="ground_truth")
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
    # Conv2D wrapper, with bias and relu activation
    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
    x = tf.nn.bias_add(x, b)
    return tf.nn.relu(x)

def maxpool2d(x, k=2):
    # MaxPool2D wrapper
    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
                          padding='SAME')

def deconv2d(prev_layer, w, b, output_shape, strides):
    # Deconv layer
    deconv = tf.nn.conv2d_transpose(prev_layer, w, output_shape=output_shape, strides=strides, padding="VALID")
    deconv = tf.nn.bias_add(deconv, b)
    deconv = tf.nn.relu(deconv)
    return deconv

# Create model
def net(x, cnn_weights, cnn_biases, dropout):
    # Reshape input picture
    x = tf.reshape(x, shape=[-1, 396, 396, 1])

    with tf.name_scope("conv1") as scope:
    # Convolution Layer
        conv1 = conv2d(x, cnn_weights['wc1'], cnn_biases['bc1'])
        # Max Pooling (down-sampling)
        #conv1 = tf.nn.local_response_normalization(conv1)
        conv1 = maxpool2d(conv1, k=2)

    # Convolution Layer
    with tf.name_scope("conv2") as scope:
        conv2 = conv2d(conv1, cnn_weights['wc2'], cnn_biases['bc2'])
        # Max Pooling (down-sampling)
        # conv2 = tf.nn.local_response_normalization(conv2)
        conv2 = maxpool2d(conv2, k=2)

    # Convolution Layer
    with tf.name_scope("conv3") as scope:
        conv3 = conv2d(conv2, cnn_weights['wc3'], cnn_biases['bc3'])
        # Max Pooling (down-sampling)
        # conv3 = tf.nn.local_response_normalization(conv3)
        conv3 = maxpool2d(conv3, k=2)


    temp_batch_size = tf.shape(x)[0] #batch_size shape
    with tf.name_scope("deconv1") as scope:
        output_shape = [temp_batch_size, 99, 99, 64]
        strides = [1,2,2,1]
        # conv4 = deconv2d(conv3, weights['wdc1'], biases['bdc1'], output_shape, strides)
        deconv = tf.nn.conv2d_transpose(conv3, cnn_weights['wdc1'], output_shape=output_shape, strides=strides, padding="SAME")
        deconv = tf.nn.bias_add(deconv, cnn_biases['bdc1'])
        conv4 = tf.nn.relu(deconv)

        # conv4 = tf.nn.local_response_normalization(conv4)

    with tf.name_scope("deconv2") as scope:
        output_shape = [temp_batch_size, 198, 198, 32]
        strides = [1,2,2,1]
        conv5 = deconv2d(conv4, cnn_weights['wdc2'], cnn_biases['bdc2'], output_shape, strides)
        # conv5 = tf.nn.local_response_normalization(conv5)

    with tf.name_scope("deconv3") as scope:
        output_shape = [temp_batch_size, 396, 396, 1]
        #this time don't use ReLu -- since output layer
        conv6 = tf.nn.conv2d_transpose(conv5, cnn_weights['wdc3'], output_shape=output_shape, strides=[1,2,2,1], padding="VALID")
        x = tf.nn.bias_add(conv6, cnn_biases['bdc3'])

    # Include dropout
    #conv6 = tf.nn.dropout(conv6, dropout)

    x = tf.reshape(conv6, [-1, n_input_x, n_input_y])

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, n_steps, n_input)
    # Permuting batch_size and n_steps
    x = tf.transpose(x, [1, 0, 2])
    # Reshaping to (n_steps*batch_size, n_input)

    x = tf.reshape(x, [-1, n_input_x])
    # Split to get a list of 'n_steps' tensors of shape (batch_size, n_hidden)
    # This input shape is required by `rnn` function
    x = tf.split(0, n_steps, x)
    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True, activation=tf.nn.relu)
    # lstm_cell = rnn_cell.MultiRNNCell([lstm_cell] * 12, state_is_tuple=True)
    # lstm_cell = rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=0.8)
    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)
    # Linear activation, using rnn inner loop last output
    # pdb.set_trace()
    output = []
    for i in xrange(396):
        output.append(tf.matmul(outputs[i], lstm_weights[i]) + lstm_biases[i])

    return output


cnn_weights = {
    # 5x5 conv, 1 input, 32 outputs
    'wc1' : tf.Variable(tf.random_normal([5, 5, 1, 32])),
    # 5x5 conv, 32 inputs, 64 outputs
    'wc2' : tf.Variable(tf.random_normal([5, 5, 32, 64])),
    # 5x5 conv, 32 inputs, 64 outputs
    'wc3' : tf.Variable(tf.random_normal([5, 5, 64, 128])),

    'wdc1' : tf.Variable(tf.random_normal([2, 2, 64, 128])),

    'wdc2' : tf.Variable(tf.random_normal([2, 2, 32, 64])),

    'wdc3' : tf.Variable(tf.random_normal([2, 2, 1, 32])),
}

cnn_biases = {
    'bc1': tf.Variable(tf.random_normal([32])),
    'bc2': tf.Variable(tf.random_normal([64])),
    'bc3': tf.Variable(tf.random_normal([128])),
    'bdc1': tf.Variable(tf.random_normal([64])),
    'bdc2': tf.Variable(tf.random_normal([32])),
    'bdc3': tf.Variable(tf.random_normal([1])),
}

lstm_weights = {}
lstm_biases = {}

for i in xrange(396):
    lstm_weights[i] = tf.Variable(tf.random_normal([n_hidden, n_output]))
    lstm_biases[i] = tf.Variable(tf.random_normal([n_output]))


# Construct model
# with tf.name_scope("net") as scope:
pred = net(x, cnn_weights, cnn_biases, keep_prob)
# pdb.set_trace()
pred = tf.pack(pred)
pred = tf.transpose(pred, [1,0,2])
pred = tf.reshape(pred, [-1, n_input_x * n_input_y])

with tf.name_scope("opt") as scope:
    # cost = tf.reduce_sum(tf.square(y-pred))
    cost = tf.reduce_sum(tf.pow((pred-y),2)) / (2*batch_size)
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

# Evaluate model
with tf.name_scope("acc") as scope:
    # accuracy is the difference between prediction and ground truth matrices
    correct_pred = tf.equal(0,tf.cast(tf.sub(cost,y), tf.int32))
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    summary = tf.train.SummaryWriter('/tmp/logdir/', sess.graph) #initialize graph for tensorboard
    step = 1
    # Import data
    data = scroll_data.read_data('/home/kendall/Desktop/')
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_x, batch_y = data.train.next_batch(batch_size)
        # Run optimization op (backprop)
        # pdb.set_trace()
        batch_x = batch_x.reshape((batch_size, n_input_x, n_input_y))
        batch_y = batch_y.reshape(batch_size, n_input_x * n_input_y)
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
        step = step + 1
        if step % display_step == 0:
            batch_y = batch_y.reshape(batch_size, n_input_x * n_input_y)
            loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
                                                              y: batch_y})


            # Make prediction
            im = Image.open('/home/kendall/Desktop/cropped/temp data0001.tif')
            batch_x = np.array(im)
            batch_x = batch_x.reshape((1, n_input_x, n_input_y))
            batch_x = batch_x.astype(float)
            prediction = sess.run(pred, feed_dict={x: batch_x})
            prediction = prediction.reshape((1, n_input_x * n_input_y))
            prediction = tf.nn.softmax(prediction)
            prediction = prediction.eval()
            prediction = prediction.reshape((n_input_x, n_input_y))

            # my_accuracy = accuracy_custom(temp_arr1,batch_y[0,:,:,0])
            #
            # print "Step = " + str(step) + "  |  Accuracy = " + str(my_accuracy)
            print "Step = " + str(step) + "  |  Accuracy = " + str(acc)

            # csv_file = "CNN-LSTM-reg/CNNLSTMreg-step-" + str(step) + "-accuracy-" + str(my_accuracy) + ".csv"
            csv_file = "CNN-LSTM-reg/CNNLSTMreg-step-" + str(step) + "-accuracy-" + str(acc) + ".csv"
            np.savetxt(csv_file, prediction, delimiter=",")
将tensorflow导入为tf
导入pdb
将numpy作为np导入
从numpy导入genfromtxt
从PIL导入图像
从tensorflow.python.ops导入rnn、rnn\u单元
从tensorflow.contrib.learn.python.learn.datasets.scroll导入scroll_数据
#参数
学习率=0.001
培训费用=1000000
批量大小=2
显示步骤=1
#网络参数
n_input_x=396#输入图像x维
n_input_y=396#输入图像y维
n_classes=1#二元分类——是否在曲面上
n_步数=396
n_hidden=128
n_输出=n_输入*y*n_类
辍学率=0.75#辍学率,保留单位的概率
#tf图形输入
x=tf.placeholder(tf.float32,[None,n\u input\u x,n\u input\u y])
y=tf.placeholder(tf.float32,[None,n\u input\u x*n\u input\u y],name=“ground\u truth”)
keep_prob=tf.placeholder(tf.float32)#辍学(keep probability)
#为简单起见,创建一些包装
def conv2d(x,W,b,步幅=1):
#Conv2D包装,带有偏置和relu激活
x=tf.nn.conv2d(x,W,步幅=[1,步幅,步幅,1],padding='SAME')
x=tf.nn.bias_add(x,b)
返回tf.nn.relu(x)
def maxpool2d(x,k=2):
#MaxPool2D包装器
返回tf.nn.max_pool(x,ksize=[1,k,k,1],步长=[1,k,k,1],
填充(“相同”)
def DECOV2D(上一层、w、b、输出形状、步幅):
#脱氯层
deconv=tf.nn.conv2d_转置(上一层,w,输出形状=输出形状,步幅=步幅,padding=“有效”)
deconv=tf.nn.bias_add(deconv,b)
deconv=tf.nn.relu(deconv)
回程分路器
#创建模型
def净(x、cnn_权重、cnn_偏差、辍学):
#重塑输入图片
x=tf.重塑(x,shape=[-1396396,1])
以tf.name_scope(“conv1”)作为作用域:
#卷积层
conv1=conv2d(x,cnn_权重['wc1',],cnn_偏差['bc1']))
#最大池(下采样)
#conv1=tf.nn.local\u response\u normalization(conv1)
conv1=maxpool2d(conv1,k=2)
#卷积层
以tf.name_scope(“conv2”)作为作用域:
conv2=conv2d(conv1,cnn_权重['wc2'],cnn_偏差['bc2']))
#最大池(下采样)
#conv2=tf.nn.local\u response\u normalization(conv2)
conv2=maxpool2d(conv2,k=2)
#卷积层
以tf.name_scope(“conv3”)作为作用域:
conv3=conv2d(conv2,cnn_权重['wc3'],cnn_偏差['bc3']))
#最大池(下采样)
#conv3=tf.nn.local\u response\u normalization(conv3)
conv3=maxpool2d(conv3,k=2)
临时批次大小=tf.形状(x)[0]#批次大小形状
以tf.name_scope(“deconv1”)作为作用域:
输出形状=[temp\u batch\u大小,99,99,64]
步幅=[1,2,2,1]
#conv4=deconv2d(conv3,权重['wdc1'],偏差['bdc1'],输出形状,步幅)
deconv=tf.nn.conv2d_转置(conv3,cnn_权重['wdc1'],输出_形状=输出_形状,步幅=步幅,padding=“相同”)
deconv=tf.nn.bias_add(deconv,cnn_bias['bdc1'])
conv4=tf.nn.relu(deconv)
#conv4=tf.nn.local\u response\u normalization(conv4)
以tf.name_scope(“deconv2”)作为作用域:
输出形状=[temp\u批次大小,198,198,32]
步幅=[1,2,2,1]
conv5=解压2D(conv4,cnn_权重['wdc2'],cnn_偏差['bdc2'],输出_形状,步幅)
#conv5=tf.nn.local\u response\u normalization(conv5)
以tf.name_scope(“deconv3”)作为作用域:
输出形状=[temp\u batch\u size,396396,1]
#这次不要使用ReLu——因为输出层
conv6=tf.nn.conv2d_转置(conv5,cnn_权重['wdc3'],输出_形状=输出_形状,步幅=[1,2,2,1],padding=“有效”)
x=tf.nn.bias_add(conv6,cnn_bias['bdc3'])
#包括辍学
#conv6=tf.nn.dropout(conv6,dropout)
x=tf.重塑(conv6,[-1,n\u输入x,n\u输入y])
#准备数据形状以匹配“rnn”功能要求
#当前数据输入形状:(批量大小,n步,n输入)
#排列批次大小和n步数
x=tf.transpose(x[1,0,2])
#重塑为(n个步骤*批量大小,n个输入)
x=tf.重塑(x,[-1,n\u输入\u x])
#拆分以获取形状的“n_步数”张量列表(批量大小,n_隐藏)
#此输入为sha