Tensorflow 无法为张量'输入形状(64、150、150、3)的值;x#u 4:0';,其形状为';(?,22500)和#x27;

Tensorflow 无法为张量'输入形状(64、150、150、3)的值;x#u 4:0';,其形状为';(?,22500)和#x27;,tensorflow,Tensorflow,我试图用自定义图像实现我自己的卷积网络,但遇到了大小错误。有谁能帮忙解决这个问题吗? 优化函数中的错误 大小比较错误 下图显示了在第一个卷积层中处理图像的基本思想。输入图像描述了数字7,此处显示了图像的四个副本,因此我们可以更清楚地看到过滤器是如何移动到图像的不同位置的。对于滤波器的每个位置,计算滤波器和滤波器下的图像像素之间的点积,从而在输出图像中产生单个像素。因此,在整个输入图像上移动过滤器会生成新图像 bee_dir = "/beeVSwasp/bee/*.jpg" w

我试图用自定义图像实现我自己的卷积网络,但遇到了大小错误。有谁能帮忙解决这个问题吗? 优化函数中的错误 大小比较错误

下图显示了在第一个卷积层中处理图像的基本思想。输入图像描述了数字7,此处显示了图像的四个副本,因此我们可以更清楚地看到过滤器是如何移动到图像的不同位置的。对于滤波器的每个位置,计算滤波器和滤波器下的图像像素之间的点积,从而在输出图像中产生单个像素。因此,在整个输入图像上移动过滤器会生成新图像

bee_dir = "/beeVSwasp/bee/*.jpg"
wasp_dir = "/beeVSwasp/wasp/*.jpg"

bee = np.array(imread_collection(bee_dir))
wasp = np.array(imread_collection(wasp_dir))

for counter,i in enumerate(bee):
    bee[counter] = cv2.resize(i,(150,150))

for counter,i in enumerate(wasp):
    wasp[counter] = cv2.resize(i,(150,150))
    
bee_y = np.tile(1,bee.shape[0])
wsp_y = np.tile(0,wasp.shape[0])

resp = np.append(bee_y,wsp_y)
data = np.append(bee,wasp)

tmp = list(zip(data, resp))
random.shuffle(tmp)
data, resp = zip(*tmp)

x_train, x_test, y_train, y_test = train_test_split(data,resp,test_size=0.33)

#conv 1
filter_size1 = 5
num_filters1 = 16

#conv2
filter_size2 = 5
num_filters2 = 32

# dense layer
dense_size = 128

img_size = 150
img_size_flat = img_size*img_size
img_shape = (img_size,img_size)
num_channels = 3
num_classes = 2

def new_weights(shape):
    return tf.Variable(tf.truncated_normal(shape, stddev=0.05))

def new_biases(length):
    return tf.Variable(tf.constant(0.05, shape=[length]))
    
    def new_conv_layer(input,              # The previous layer.
                       num_input_channels, # Num. channels in prev. layer.
                       filter_size,        # Width and height of each filter.
                       num_filters,        # Number of filters.
                       use_pooling=True):  # Use 2x2 max-pooling.
        shape = [filter_size, filter_size, num_input_channels, num_filters]
        biases = new_biases(length=num_filters)
        layer = tf.nn.conv2d(input=input,
                             filter=weights,
                             strides=[1, 1, 1, 1],
                             padding='SAME')
        layer += biases
        if use_pooling:
            layer = tf.nn.max_pool(value=layer,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding='SAME')
        layer = tf.nn.relu(layer)
        return layer, weights
    
    def flatten_layer(layer):
        layer_shape = layer.get_shape()
        num_features = layer_shape[1:4].num_elements()
        layer_flat = tf.reshape(layer, [-1, num_features])
        return layer_flat, num_features
    
    def new_fc_layer(input,          # The previous layer.
                     num_inputs,     # Num. inputs from prev. layer.
                     num_outputs,    # Num. outputs.
                     use_relu=True): # Use Rectified Linear Unit (ReLU)?
        weights = new_weights(shape=[num_inputs, num_outputs])
        biases = new_biases(length=num_outputs)
    
        layer = tf.matmul(input, weights) + biases
    
        # Use ReLU?
        if use_relu:
            layer = tf.nn.relu(layer)
    
        return layer
    
    import tensorflow.compat.v1 as tf
    tf.disable_v2_behavior()
    
    x = tf.placeholder(tf.float32, shape=[None, img_size_flat], name='x')
    x_image = tf.reshape(x, [-1, img_size, img_size, num_channels])
    y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
    y_true_cls = tf.argmax(y_true, axis=1)
    
    layer_conv1, weights_conv1 = \
        new_conv_layer(input=x_image,
                       num_input_channels=num_channels,
                       filter_size=filter_size1,
                       num_filters=num_filters1,
                       use_pooling=True)
    
    layer_conv2, weights_conv2 = \
        new_conv_layer(input=layer_conv1,
                       num_input_channels=num_filters1,
                       filter_size=filter_size2,
                       num_filters=num_filters2,
                       use_pooling=True)
    
    layer_fc1 = new_fc_layer(input=layer_flat,
                             num_inputs=num_features,
                             num_outputs=dense_size,
                             use_relu=True)
    
    layer_fc2 = new_fc_layer(input=layer_fc1,
                             num_inputs=dense_size,
                             num_outputs=num_classes,
                             use_relu=False)
    
    y_pred = tf.nn.softmax(layer_fc2)
    y_pred_cls = tf.argmax(y_pred, axis=1)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,                                                      labels=y_true)

cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
session = tf.Session()
session.run(tf.global_variables_initializer())
train_batch_size = 64
def get_random_batch(batch_size):
    return random.sample(x_train,batch_size),random.sample(y_train,batch_size),0            

total_iterations = 0
def optimize(num_iterations):
    global total_iterations
    start_time = time.time()
    for i in range(total_iterations,
                   total_iterations + num_iterations):
        x_batch, y_true_batch, _ = get_random_batch(batch_size=train_batch_size)
        feed_dict_train = {x: x_batch,
                           y_true: y_true_batch}
        session.run(optimizer, feed_dict=feed_dict_train)
        if i % 100 == 0:
            acc = session.run(accuracy, feed_dict=feed_dict_train)
            msg = "Optimization Iteration: {0:>6}, Training Accuracy: {1:>6.1%}"
            print(msg.format(i + 1, acc))
    total_iterations += num_iterations
    end_time = time.time()
    time_dif = end_time - start_time
    print("Time usage: " + str(timedelta(seconds=int(round(time_dif)))))

optimize(1)