Warning: file_get_contents(/data/phpspider/zhask/data//catemap/1/hibernate/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Tensorflow 层顺序_10的输入0与层不兼容::预期最小值ndim=4,发现ndim=2_Tensorflow_Keras_Reshape_Cnn - Fatal编程技术网

Tensorflow 层顺序_10的输入0与层不兼容::预期最小值ndim=4,发现ndim=2

Tensorflow 层顺序_10的输入0与层不兼容::预期最小值ndim=4,发现ndim=2,tensorflow,keras,reshape,cnn,Tensorflow,Keras,Reshape,Cnn,在重塑xtraindata和xtest数据之前,我遇到了错误: “层顺序_10的输入0与层不兼容::预期最小值_ndim=4,发现ndim=2。”。按顺序将xtraindata和xtestdata重塑为(1400,24,24,1)和(600,24,24,1)后。然后我得到了这样的错误: “不兼容的形状:[32,1]与[32,6,6,1] [[node mean_squared_error/SquaredDifference(定义于C:\Users\User\Documents\car_perso

在重塑xtraindata和xtest数据之前,我遇到了错误: “层顺序_10的输入0与层不兼容::预期最小值_ndim=4,发现ndim=2。”。按顺序将xtraindata和xtestdata重塑为(1400,24,24,1)和(600,24,24,1)后。然后我得到了这样的错误: “不兼容的形状:[32,1]与[32,6,6,1] [[node mean_squared_error/SquaredDifference(定义于C:\Users\User\Documents\car_person.py:188)][Op:[推理测试函数]

函数调用堆栈: 测试功能“ 我无法使求值函数在创建的模型上工作。为了使测试数据与模型兼容,我应该做什么

import numpy as np
import matplotlib.pyplot as plt
import os
import time
import cv2
import pandas as pd
import tensorflow as tf
import itertools as it
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
  except RuntimeError as e:
    print(e)


#gpu_options=K.tf.GPUOptions(per_process_gpu_memory_fraction=0.35)

path = "C:/Users/User/Desktop/tunel_data"
training_data=[]

def create_training_data(training_data, path):
    categories = ["tunel_data_other", "tunel_data_car"]
    for category in categories:
        path=os.path.join(path, category)
        for img in os.listdir(path):
            print(img)
            if category=="tunel_data_other":
                class_num= 0
                #image=Image.open(img)
                #new_image = image.resize((50, 50))
                #new_image.save('car'+img.index())
                #try:
                image_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)/255
                new_array = cv2.resize(image_array, (24, 24))
                print(new_array.shape)
                training_data.append([new_array, class_num])
                #except:
                    #pass
            elif category=="tunel_data_car":
                class_num = 1
                #image=Image.open(img)
                #new_image = image.resize((50, 50))
                #new_image.save('person'+img.index())
                #try:
                image_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)/255
                new_array = cv2.resize(image_array, (24, 24))
                print(new_array.shape)
                training_data.append([new_array, class_num])
                #except:
                    #pass
        path = "C:/Users/User/Desktop/tunel_data"
    return training_data

create_training_data(training_data, path)

x=[]
y=[]

for i in range(len(training_data)):
    x.append(training_data[i][0])
    y.append(training_data[i][1])
#print(x)
#print(y)
     
x = np.array(x).reshape(2000, 576)
"""
principle_features = PCA(n_components=250)
feature = principle_features.fit_transform(x)
"""
feature = x
label = y

feature_df = pd.DataFrame(feature)

#df = DataFrame (People_List,columns=['First_Name','Last_Name','Age'])

label_df = pd.DataFrame(label)


data = pd.concat([feature_df, label_df], axis=1).to_csv('complete.csv')


data = pd.read_csv("complete.csv")

data = data.sample(frac=1).reset_index(drop=True)

print(data)

x_test, x_train, y_test, y_train = train_test_split(x, y, test_size=0.7, random_state=65)
xtraindata=pd.DataFrame(data=x_train[:,:])
xtestdata=pd.DataFrame(data=x_test[:,:])
print(xtraindata)

ytraindata=pd.DataFrame(data=y_train[:])
ytestdata=pd.DataFrame(data=y_test[:])
print(ytraindata)

xtraindata = np.asarray(xtraindata)
ytraindata = np.asarray(ytraindata)
xtestdata = np.asarray(xtestdata)
ytestdata = np.asarray(ytestdata)
x=np.asarray(x)
y=np.asarray(y)


xtraindata = xtraindata.reshape(1400,24,24,1)
xtestdata = xtestdata.reshape(600,24,24,1)

activation = ["tanh", "relu", "sigmoid", "softmax"]
input_size1 = range(10)
input_size2 = range(10)
k_scores = []
in_size = []

possible = list(it.permutations(activation, 4))

for c in possible:
    for i in input_size1:
        for a in input_size2:
            model = tf.keras.Sequential([tf.keras.layers.Conv2D(256, kernel_size=(3,3), padding='same', activation='relu'),
                                         tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
                                         tf.keras.layers.Conv2D(512, kernel_size=(3,3), padding='same', activation='relu'),
                                         tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
                                         tf.keras.layers.Dense(250, activation=c[0]),
                                         tf.keras.layers.Dense(i, activation=c[1]),
                                         tf.keras.layers.Dense(a, activation=c[2]),
                                         tf.keras.layers.Dense(1, activation=c[3])])
            model.compile(optimizer='sgd', loss='mse')
            val_loss = model.evaluate(xtestdata, ytestdata, verbose=1)
            k_scores.append(val_loss)
            in_size.append([i,a])
            
print(k_scores)
print("Best activation functions for each layer:", possible[(k_scores.index((min(k_scores)))) % len(possible)],
      "/n Best input sizes:", "840", in_size[k_scores.index((min(k_scores)))][0], in_size[k_scores.index((min(k_scores)))][1], "1")

model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(250, activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][0]))
model.add(tf.keras.layers.Dense(in_size[k_scores.index((min(k_scores)))][0], activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][1]))
model.add(tf.keras.layers.Dense(in_size[k_scores.index((min(k_scores)))][1], activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][2]))
model.add(tf.keras.layers.Dense(1, activation=possible[(k_scores.index((min(k_scores)))) % len(possible)][3]))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy", "mse"])
model.fit(x, y, batch_size=16, epochs=5)
predictions = model.predict([x_test])
print(predictions)
print(predictions.shape)

输出层大小不同。您需要大小
(32,1)
,但模型的输出是
(32,6,6,1)

MaxPooling2D
Dense()
之间插入
flant()
,也许这项工作很好

这是提示<代码>.evaluate方法仅适用于经过培训的模型。您应该首先使用
.fit