Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/308.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python “wd1”]、偏差[“bd1”]) fc1=tf.nn.relu(fc1) #申请退学 fc1=tf.nn.辍学(fc1,辍学) #输出,类预测 out=tf.add(tf.matmul(fc1,权重['out']),偏差['out'])) 返回 #存储层重量和偏差 权重={ #5x5转换,1个输入,32个输出 “wc1”:tf.Variable(tf.random_normal([5,5,1,32]), #5x5转换,32个输入,64个输出 “wc2”:tf.Variable(tf.random_normal([5,5,32,64]), #完全连接,7*7*64输入,1024输出 'wd1':tf.变量(tf.random_normal([7*7*641024]), #1024个输入,10个输出(类预测) 'out':tf.Variable(tf.random\u normal([1024,n\u类])) } 偏差={ “bc1”:tf.Variable(tf.random_normal([32]), “bc2”:tf.Variable(tf.random_normal([64]), 'bd1':tf.Variable(tf.random_normal([1024]), 'out':tf.Variable(tf.random\u normal([n\u类])) } #构造模型 pred=转换网络(x、权重、偏差、保持概率) #定义损失和优化器 #成本=tf.reduce\u均值(tf.nn.softmax\u交叉\u熵\u与逻辑(pred,y)) 成本=tf.减少平均值(pred) 优化器=tf.train.AdamOptimizer(学习率=学习率)。最小化((pred-y)**2) #评价模型 正确的_pred=y 准确度=tf.reduce_平均值(tf.cast(correct_pred,tf.float32)) #初始化变量 初始化所有变量() #启动图表 使用tf.Session()作为sess: sess.run(初始化) 步骤=1 打印(y) #继续训练直到达到最大迭代次数 步骤*批量大小_Python_Machine Learning_Tensorflow_Deep Learning_Supervised Learning - Fatal编程技术网

Python “wd1”]、偏差[“bd1”]) fc1=tf.nn.relu(fc1) #申请退学 fc1=tf.nn.辍学(fc1,辍学) #输出,类预测 out=tf.add(tf.matmul(fc1,权重['out']),偏差['out'])) 返回 #存储层重量和偏差 权重={ #5x5转换,1个输入,32个输出 “wc1”:tf.Variable(tf.random_normal([5,5,1,32]), #5x5转换,32个输入,64个输出 “wc2”:tf.Variable(tf.random_normal([5,5,32,64]), #完全连接,7*7*64输入,1024输出 'wd1':tf.变量(tf.random_normal([7*7*641024]), #1024个输入,10个输出(类预测) 'out':tf.Variable(tf.random\u normal([1024,n\u类])) } 偏差={ “bc1”:tf.Variable(tf.random_normal([32]), “bc2”:tf.Variable(tf.random_normal([64]), 'bd1':tf.Variable(tf.random_normal([1024]), 'out':tf.Variable(tf.random\u normal([n\u类])) } #构造模型 pred=转换网络(x、权重、偏差、保持概率) #定义损失和优化器 #成本=tf.reduce\u均值(tf.nn.softmax\u交叉\u熵\u与逻辑(pred,y)) 成本=tf.减少平均值(pred) 优化器=tf.train.AdamOptimizer(学习率=学习率)。最小化((pred-y)**2) #评价模型 正确的_pred=y 准确度=tf.reduce_平均值(tf.cast(correct_pred,tf.float32)) #初始化变量 初始化所有变量() #启动图表 使用tf.Session()作为sess: sess.run(初始化) 步骤=1 打印(y) #继续训练直到达到最大迭代次数 步骤*批量大小

Python “wd1”]、偏差[“bd1”]) fc1=tf.nn.relu(fc1) #申请退学 fc1=tf.nn.辍学(fc1,辍学) #输出,类预测 out=tf.add(tf.matmul(fc1,权重['out']),偏差['out'])) 返回 #存储层重量和偏差 权重={ #5x5转换,1个输入,32个输出 “wc1”:tf.Variable(tf.random_normal([5,5,1,32]), #5x5转换,32个输入,64个输出 “wc2”:tf.Variable(tf.random_normal([5,5,32,64]), #完全连接,7*7*64输入,1024输出 'wd1':tf.变量(tf.random_normal([7*7*641024]), #1024个输入,10个输出(类预测) 'out':tf.Variable(tf.random\u normal([1024,n\u类])) } 偏差={ “bc1”:tf.Variable(tf.random_normal([32]), “bc2”:tf.Variable(tf.random_normal([64]), 'bd1':tf.Variable(tf.random_normal([1024]), 'out':tf.Variable(tf.random\u normal([n\u类])) } #构造模型 pred=转换网络(x、权重、偏差、保持概率) #定义损失和优化器 #成本=tf.reduce\u均值(tf.nn.softmax\u交叉\u熵\u与逻辑(pred,y)) 成本=tf.减少平均值(pred) 优化器=tf.train.AdamOptimizer(学习率=学习率)。最小化((pred-y)**2) #评价模型 正确的_pred=y 准确度=tf.reduce_平均值(tf.cast(correct_pred,tf.float32)) #初始化变量 初始化所有变量() #启动图表 使用tf.Session()作为sess: sess.run(初始化) 步骤=1 打印(y) #继续训练直到达到最大迭代次数 步骤*批量大小,python,machine-learning,tensorflow,deep-learning,supervised-learning,Python,Machine Learning,Tensorflow,Deep Learning,Supervised Learning,这不是一个好的神经网络(数据未标准化,学习率高两倍,训练精度尚未编程),但图像处理代码可以工作 希望这有帮助 您基本上已经描述了您需要什么:您需要找到一组带有标签的新图像。您可以a)查找现有的一组图像和标签,或者b)下载您自己的图像并自己标记它们。现有60000个图像(50000个序列/10000个测试)不适合您的用例?这是一个非常好的开始执行计算机视觉的数据集。请向我们提供有关cifar-10数据集的更多信息,以便我们能够帮助您创建相同的结构。不知道cifar-10数据集是如何构建的人们将没有

这不是一个好的神经网络(数据未标准化,学习率高两倍,训练精度尚未编程),但图像处理代码可以工作


希望这有帮助

您基本上已经描述了您需要什么:您需要找到一组带有标签的新图像。您可以a)查找现有的一组图像和标签,或者b)下载您自己的图像并自己标记它们。现有60000个图像(50000个序列/10000个测试)不适合您的用例?这是一个非常好的开始执行计算机视觉的数据集。请向我们提供有关cifar-10数据集的更多信息,以便我们能够帮助您创建相同的结构。不知道cifar-10数据集是如何构建的人们将没有时间研究它来帮助美国。
#!/usr/bin/python
import cv2
import numpy as np
import tensorflow as tf
import glob
import re
import random


# Parameters
learning_rate = 0.001
training_iters = 20000
batch_size = 120
display_step = 10

# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 1 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units

image = np.reshape(np.asarray(mnist.train.images[0]), (28,28))

#Process Images

cv_img = []
for img in glob.glob("./images/*.jpeg"):
    n  = cv2.cvtColor(cv2.resize(cv2.imread(img), (28,28)), cv2.COLOR_BGR2GRAY)
    n = np.asarray(n)
    n = np.reshape(n, n_input)
    cv_img.append(n)

#Process File for angle, here we read the text line by line and make a list
with open("./images/allinfo.txt") as f:
    content = f.readlines()

#Initialize arrays to unpack data file
angle = []
image_number = []


#Iterate through the text list and split each one by the comma separating the values. 
#Turn the text into floats for use in the network
for i in range(len(content)):
    content[i] = content[i][:-1].split(',')
    image_number.append(float(content[i][1]))
    angle.append(float(content[i][7]))

#Divide both angle and image number into test and train data sets
angle = np.atleast_2d(angle).T


##Encode angle into 10 classes (it ranges -1 to 1)
for i in range(len(angle)):
    angle[i] = random.uniform(-1,1)
    angle[i] = int((angle[i]+1.0)*n_classes/2.)


#Create a one-hot version of angle
angle_one_hot = np.zeros((len(angle),n_classes))

for c in range(len(angle)):
    one_hot = np.zeros(n_classes)
    one_hot[int(angle[c])] = 1
    angle_one_hot[c] = one_hot


image_number = np.atleast_2d(image_number).T
test_data =  np.hstack((image_number, angle))
#print test_data
train_percent = .8
train_number = int(len(test_data)*train_percent)
train_data = np.zeros((train_number, 2))
for i in range(train_number):
    rand = random.randrange(0,len(test_data))
    train_data[i] = test_data[rand]
    test_data = np.delete(test_data, rand, 0)
test_data_images = test_data[:,0]
test_data_angles = test_data[:,1]
train_data_images, train_data_angles = train_data[:,0], train_data[:,1]



def gen_batch(angles, images, batch_size, image_array=cv_img):
    indices = random.sample(xrange(0,len(images)), batch_size)
    batch_images = []
    batch_angles = []
 #   print angles
    for i in range(batch_size):
        batch_images.append(image_array[int(images[indices[i]])][:])
        batch_angles.append(angles[indices[i]])
    batch_images = np.asarray(batch_images)
    batch_angles = np.asarray(batch_angles)

    return batch_images, batch_angles


# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
    # Conv2D wrapper, with bias and relu activation
    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
    x = tf.nn.bias_add(x, b)
    return tf.nn.relu(x)


def maxpool2d(x, k=2):
    # MaxPool2D wrapper
    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
                          padding='SAME')


# Create model
def conv_net(x, weights, biases, dropout):
    # Reshape input picture
    x = tf.reshape(x, shape=[-1, 28, 28, 1])

    # Convolution Layer
    conv1 = conv2d(x, weights['wc1'], biases['bc1'])
    # Max Pooling (down-sampling)
    conv1 = maxpool2d(conv1, k=2)

    # Convolution Layer
    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    # Max Pooling (down-sampling)
    conv2 = maxpool2d(conv2, k=2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)

    # Output, class prediction
    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    return out

# Store layers weight & bias
weights = {
    # 5x5 conv, 1 input, 32 outputs
    'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
    # 5x5 conv, 32 inputs, 64 outputs
    'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
    # fully connected, 7*7*64 inputs, 1024 outputs
    'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
    # 1024 inputs, 10 outputs (class prediction)
    'out': tf.Variable(tf.random_normal([1024, n_classes]))
}

biases = {
    'bc1': tf.Variable(tf.random_normal([32])),
    'bc2': tf.Variable(tf.random_normal([64])),
    'bd1': tf.Variable(tf.random_normal([1024])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

# Construct model
pred = conv_net(x, weights, biases, keep_prob)

# Define loss and optimizer
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
cost = tf.reduce_mean(pred)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize((pred-y)**2)

# Evaluate model
correct_pred = y
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()

# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    step = 1
    print(y)
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_x, batch_y = gen_batch(train_data_angles, train_data_images, batch_size)
        #cv2.imshow('trash', batch_x[0,:].reshape((28,28)))
        #cv2.waitKey(0)
        #print(batch_y)
        # Run optimization op (backprop)
        sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
                                       keep_prob: dropout})
        if step % display_step == 0:
            # Calculate batch loss and accuracy
            loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
                                                              y: batch_y,
                                                              keep_prob: 1.})
            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
                  "{:.6f}".format(loss) + ", Training Accuracy= " + \
                  "{:.5f}".format(acc)
        step += 1
    print "Optimization Finished!"

    # Calculate accuracy for all test images
    img, lbls = gen_batch(test_data_angles, test_data_images, len(test_data_angles))
    print "Testing Accuracy:", \
        sess.run(accuracy, feed_dict={x: img,
                                      y: lbls,
                                      keep_prob: 1.})