Python 多尺度can网络的阵列通过问题

Python 多尺度can网络的阵列通过问题,python,keras,deep-learning,cnn,Python,Keras,Deep Learning,Cnn,我正在处理音频数据(.wav)文件,我创建了两个CNN网络来传递数据。因此,最后的两个模型将使用连接这两个模型的输出的功能,并给出分类。但我在训练它时遇到了问题,它说该模型预计会看到2个阵列,但它得到了一个阵列。任何能帮我解决这个问题的人我都会感激的,我已经挣扎了好几个星期了 这是我的配置文件 class Config: def __init__(self, mode='conv', nfilt=26, nfeat=40, nfft=4096, rate=16000): self.mod

我正在处理音频数据(.wav)文件,我创建了两个CNN网络来传递数据。因此,最后的两个模型将使用连接这两个模型的输出的功能,并给出分类。但我在训练它时遇到了问题,它说该模型预计会看到2个阵列,但它得到了一个阵列。任何能帮我解决这个问题的人我都会感激的,我已经挣扎了好几个星期了

这是我的配置文件

class Config:
def __init__(self, mode='conv', nfilt=26, nfeat=40, nfft=4096, rate=16000):
    self.mode = mode
    self.nfilt = nfilt
    self.nfeat = nfeat
    self.nfft = nfft
    self.rate = rate
    self.step = int(rate / 10)
    self.model_path = os.path.join('models', mode + '.model')
    self.p_path = os.path.join('pickles', mode + '.p')
我为我的mfcc构建了一个函数

构建一个用于构建的函数 def build_rand_feat():

这是我的多尺度网络

def get_conv_模型():

X = []
y = []
_min, _max = float('inf'), -float('inf')
for _ in tqdm(range(n_sample)):
    rand_class = np.random.choice(class_dist.index, p=prob_dist)
    file = np.random.choice(df[df.label == rand_class].index)
    data, rate = sf.read('audios/' + file)
    label = df.at[file, 'label']
    rand_index = np.random.randint(0, data.shape[0] - config.step)
    sample = data[rand_index:rand_index + config.step]
    X_sample = mfcc(sample, rate, winlen=0.05, winstep=0.02, numcep=config.nfeat, nfilt=config.nfilt, nfft=config.nfft)
    
    _min = min(np.amin(X_sample), _min)
    _max = max(np.amax(X_sample), _max)
    X.append(X_sample)  
    y.append(classes.index(label))
config.min = _min
X = np.array(X)
y = np.array(y)
X = (X - _min) / (_max - _min)
if config.mode == 'conv':
    X = X.reshape(X.shape[0], X.shape[1], X.shape[2], 1)
    print(X.shape)
elif config.mode == 'time':
    X = X.reshape(X.shape[0], X.shape[1], X.shape[2])
y = to_categorical(y, num_classes=10)
config.data = (X, y)


return X, y
main_model = Sequential()
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
main_model.add(MaxPool2D((2, 2)))
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(MaxPool2D((2, 2), padding='same'))
main_model.add(Flatten())

second_model = Sequential()
second_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
second_model.add(MaxPool2D((2, 2), padding='same'))
second_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
second_model.add(MaxPool2D((2, 2)))
second_model.add(Flatten())

# first model upper
main_model = Sequential()
main_model.add(Conv2D(64, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
main_model.add(BatchNormalization())
main_model.add(Dropout(0.3))
main_model.add(Flatten())

# second model lower
lower_model1 = Sequential()
lower_model1.add(MaxPool2D(strides=(1, 1), padding='same', input_shape=input_shape))
lower_model1.add(Conv2D(128, (3, 3), activation='relu', strides=(1, 1), padding='same', input_shape=input_shape))
lower_model1.add(BatchNormalization())
lower_model1.add(Dropout(0.3))
lower_model1.add(Conv2D(256, (3, 3), activation='relu', strides=(1, 1), padding='same'))
lower_model1.add(BatchNormalization())
lower_model1.add(Dropout(0.3))
lower_model1.add(Conv2D(512, (3, 3), activation='relu', strides=(1, 1), padding='same'))
lower_model1.add(Flatten())

# merged models
merged_model = Concatenate()([main_model.output, lower_model1.output])
x = Dense(256, activation='relu')(merged_model)
x = Dropout(0.3)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.3)(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.3)(x)
output = Dense(10, activation='softmax')(x)
final_model = Model(inputs=[main_model.input, lower_model1.input], outputs=[output])
final_model.summary()
final_model.compile(loss="categorical_crossentropy", optimizer=Adam(0.001), metrics=['acc'])
print(K.eval(final_model.optimizer.lr))
#class_weight = compute_class_weight('balanced', np.unique(y_flat), y_flat)
final_model.fit(X,y, epochs=10, batch_size=64, shuffle=True, validation_split=0.3)
return main_model, lower_model1, merged_model