Python 理解CNN超参数

Python 理解CNN超参数,python,tensorflow,machine-learning,neural-network,conv-neural-network,Python,Tensorflow,Machine Learning,Neural Network,Conv Neural Network,我有一些关于卷积神经网络的问题。我的代码一点也不干净,所以我用正手道歉 首先,我有一个数据集,包含10000个维度为(28,28,1)的图像。我的愿望是建立一个卷积神经网络,将这些图像分为5类(这是著名的Zalando数据集的一半) 这是我的密码 class layers(ABC): def __init__(self, filter_size, number_of_neurons, fully_conn_neurons): self.filter_size = fil

我有一些关于卷积神经网络的问题。我的代码一点也不干净,所以我用正手道歉

首先,我有一个数据集,包含10000个维度为(28,28,1)的图像。我的愿望是建立一个卷积神经网络,将这些图像分为5类(这是著名的Zalando数据集的一半)

这是我的密码

class layers(ABC): 
    def __init__(self, filter_size, number_of_neurons, fully_conn_neurons):
        self.filter_size = filter_size #placeholder for filter 
        self.number_of_neurons = number_of_neurons #The number of neurons
        self.fully_conn_neurons = fully_conn_neurons #Amount of neurons in the last layer

        return

class new_conv_layer(ABC):
    def __init__(self, filters, number_of_filters, initial_input, namew, nameb, defrel):
        self.filters = filters
        self.number_of_filters = number_of_filters #16 is amount of filters
        self.color_chan = 1
        self.shape = [filters, filters, self.color_chan, number_of_filters]
        self.defrel = False

        self.weight = tf.get_variable(name=namew, shape =self.shape, initializer = tf.initializers.glorot_normal)
        self.bias = tf.Variable(tf.constant(0.05, shape = [number_of_filters], name=nameb))

        self.layer = tf.nn.conv2d(input = initial_input, filter = self.weight, strides=[1,2,2,1], padding="SAME")
        self.layer += self.bias 

        self.layer = tf.nn.max_pool(value=self.layer, ksize = [1,2,2,1], strides = [1,2,2,1], padding="SAME")

        if defrel == True:
            self.layer = tf.nn.relu(self.layer)


    def flatten(self):
        flat_shape = self.layer.shape
        self.features = flat_shape[1:].num_elements()
        self.layer = tf.reshape(self.layer, [-1, self.features])

        return self.layer, self.features

x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
x_image = tf.reshape(x, [-1, 28, 28, 1])
y = tf.placeholder(tf.float32, [None, 5])



layer1 = new_conv_layer(filters=4,number_of_filters=16, initial_input= x_image, namew ="w", nameb="b", defrel=True)
layer2 = new_conv_layer(filters=4,number_of_filters=32, initial_input=layer1.layer, namew="fuckoff", nameb="fuck", defrel=False)


layer_flat, num_features = layer2.flatten()


class fully_connected(ABC):
    def __init__(self, previous_layer, inp, outp, namea, nameb):

        self.previous_layer = previous_layer
        self.weights = tf.get_variable(shape =[inp, outp], initializer = tf.initializers.glorot_normal, name=namea)
        self.biases = tf.Variable(tf.constant(0.05, shape = [outp], name = nameb))
        self.temp_layer = tf.matmul(self.previous_layer, self.weights) + self.biases
        self.new_layer = tf.nn.relu(self.temp_layer)






layer_fc1 = fully_connected(layer_flat, inp=num_features, outp=128, namea = "t", nameb= "u")
layer_fc2 = fully_connected(layer_fc1.new_layer, inp=128, outp=5, nameb="h", namea="z")



epochs = 300
learning_rate = 0.05
batch_size = 128


pred = tf.nn.softmax(layer_fc2.new_layer)
print(pred.shape)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = pred, labels = y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


# drop out, regularization 
# call back 

init = tf.global_variables_initializer()
with tf.Session() as sess:
    sess.run(init) 
    train_loss = []
    test_loss = []
    train_accuracy = []
    test_accuracy = []
    summary_writer = tf.summary.FileWriter('./Output', sess.graph)
    for i in range(epochs):
        for batch in range(len(train_X)//batch_size):
            batch_x = train_X[batch*batch_size:min((batch+1)*batch_size,len(train_X))]
            batch_y = train_y[batch*batch_size:min((batch+1)*batch_size,len(train_y))]    
            opt = sess.run(optimizer, feed_dict={x: batch_x,
                                                              y: batch_y})
            loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
                                                              y: batch_y})
        print("Iter " + str(i) + ", Loss= " + \
                      "{:.6f}".format(loss) + ", Training Accuracy= " + \
                      "{:.5f}".format(acc))
        print("Optimization Finished!")


        test_acc,valid_loss = sess.run([accuracy,cost], feed_dict={x: test_X,y : test_y})
        train_loss.append(loss)
        test_loss.append(valid_loss)
        train_accuracy.append(acc)
        test_accuracy.append(test_acc)
        print("Testing Accuracy:","{:.5f}".format(test_acc))
    summary_writer.close()

我遇到了两个不同的问题:我不能更改过滤器,因为它会给我错误:InvalidArgumentError:input和filter必须具有相同的深度:16 vs 1。其次,我的测试准确率只有50%,这一点都不好


我知道这是非常宽泛的,但我是否严重遗漏了什么?

self.color\u chan=1
这是错误的。对于初始输入,图像有一个通道,但在第一层之后有16个通道(因为第一层有16个过滤器)。将其更改为类似于self.color\u chan=int(initial\u input.shape[3])


关于准确度,我不确定如果你也有这个错误,你是如何得到任何结果的,但是,在任何情况下,请注意,虽然50%的准确度不是很好,但它比随机的要好(五个类的准确度大约为20%)。您可能需要进一步试验过滤器大小等,以改进它(因为它是一个众所周知的数据集,所以有几个示例可供您尝试重现)。该数据集是专门为与MNIST“兼容”而设计的,但无论如何要困难得多。

@jdehsea感谢您的回复!它确实解决了第一个问题(我只添加了一个过滤器就可以运行它)。然而,改变这一点会使每次迭代的准确率精确到20%。奇怪的