Python 基于scikit学习的多输入Keras模型交叉验证
我想对我的神经网络模型应用K-Fold交叉验证,如下所示:Python 基于scikit学习的多输入Keras模型交叉验证,python,machine-learning,keras,scikit-learn,neural-network,Python,Machine Learning,Keras,Scikit Learn,Neural Network,我想对我的神经网络模型应用K-Fold交叉验证,如下所示: from sklearn.model_selection import StratifiedKFold from numpy import * from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import cross_val_score import numpy X = df.iloc[:,0:10165]
from sklearn.model_selection import StratifiedKFold
from numpy import *
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
import numpy
X = df.iloc[:,0:10165]
X = X.to_numpy()
X = X.reshape([X.shape[0], X.shape[1],1])
X_train_1 = X[:,0:10080,:]
X_train_2 = X[:,10080:10165,:].reshape(921,85)
Y = df.iloc[:,10168:10170]
Y = Y.to_numpy()
def my_model():
inputs_1 = keras.Input(shape=(10080,1))
layer1 = Conv1D(64,14)(inputs_1)
layer2 = layers.MaxPool1D(5)(layer1)
layer3 = Conv1D(64, 14)(layer2)
layer4 = layers.GlobalMaxPooling1D()(layer3)
inputs_2 = keras.Input(shape=(85,))
layer5 = layers.concatenate([layer4, inputs_2])
layer6 = Dense(128, activation='relu')(layer5)
layer7 = Dense(2, activation='softmax')(layer6)
model_2 = keras.models.Model(inputs = [inputs_1, inputs_2], output = [layer7])
model_2.summary()
adam = keras.optimizers.Adam(lr = 0.0001)
model_2.compile(loss = 'categorical_crossentropy', optimizer = adam, metrics = ['acc'])
return model_2
model_2 = KerasClassifier(build_fn=my_model, epochs=150, batch_size=10, verbose=0)
kfold = StratifiedKFold(n_splits=10, shuffle=True)
results = cross_val_score(model_2, [X_train_1,X_train_2], Y, cv=kfold)
print(results.mean())
我犯了这个错误
ValueError Traceback (most recent call last)
<ipython-input-44-297145425a53> in <module>()
42 # evaluate using 10-fold cross validation
43 kfold = StratifiedKFold(n_splits=10, shuffle=True)
---> 44 results = cross_val_score(model_2, [X_train_1,X_train_2], Y, cv=kfold)
45 print(results.mean())
3 frames
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
203 if len(uniques) > 1:
204 raise ValueError("Found input variables with inconsistent numbers of"
--> 205 " samples: %r" % [int(l) for l in lengths])
206
207
ValueError: Found input variables with inconsistent numbers of samples: [2, 921]
当我不进行K-Fold交叉验证时,模型可以完美运行。也就是说,如果我只适合:
model_2.compile(loss = 'categorical_crossentropy', optimizer = adam, metrics = ['acc'])
history = model_2.fit([X_train_1,X_train_2], y_train, epochs = 120, batch_size = 256, validation_split = 0.2, callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)])
因此,我不确定错误消息是什么。感谢您的帮助。谢谢
编辑: 这是原始模型:
inputs_1 = keras.Input(shape=(10081,1))
layer1 = Conv1D(64,14)(inputs_1)
layer2 = layers.MaxPool1D(5)(layer1)
layer3 = Conv1D(64, 14)(layer2)
layer4 = layers.GlobalMaxPooling1D()(layer3)
inputs_2 = keras.Input(shape=(85,))
layer5 = layers.concatenate([layer4, inputs_2])
layer6 = Dense(128, activation='relu')(layer5)
layer7 = Dense(2, activation='softmax')(layer6)
model_2 = keras.models.Model(inputs = [inputs_1, inputs_2], output = [layer7])
model_2.summary()
X_train, X_test, y_train, y_test = train_test_split(df.iloc[:,0:10166], df[['Result1','Result2']].values, test_size=0.2)
X_train = X_train.to_numpy()
X_train = X_train.reshape([X_train.shape[0], X_train.shape[1], 1])
X_train_1 = X_train[:,0:10081,:]
X_train_2 = X_train[:,10081:10166,:].reshape(736,85)
X_test = X_test.to_numpy()
X_test = X_test.reshape([X_test.shape[0], X_test.shape[1], 1])
X_test_1 = X_test[:,0:10081,:]
X_test_2 = X_test[:,10081:10166,:].reshape(185,85)
adam = keras.optimizers.Adam(lr = 0.0005)
model_2.compile(loss = 'categorical_crossentropy', optimizer = adam, metrics = ['acc'])
history = model_2.fit([X_train_1,X_train_2], y_train, epochs = 120, batch_size = 256, validation_split = 0.2, callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)])
Scikit的
cross_val_分数
抱怨,因为它检测到X和y的长度不同。那是因为你通过了:
[X_train_1,X_train_2]
其中X
实际上在轴0上有2
false“sample”,因为它是一个包含两个成员的列表。相反,y
在轴0上有921
样本。
编辑: 经过一些研究,我发现sklearn的
split()
方法既不支持多输入数据也不支持一个热编码标签。
解决方案: 因此,作为一种解决方法,您可以使用sklearn进行自己的交叉验证,如下所示: 首先导入并定义我们需要的所有内容:
from sklearn.model_selection import StratifiedKFold
import numpy as np
import keras
from keras import layers
from keras.layers import Conv1D, Dense
from keras.utils.np_utils import to_categorical
# This is just for dummy data ##################################
X_train_1 = np.random.randint(0, 10000, (921, 10080, 1))
X_train_2 = np.random.randint(0, 10000, (921, 85))
Y_kat = np.random.randint(0, 2, (921))
Y = to_categorical(Y_kat, num_classes=2)
# This is just for dummy data ##################################
def my_model():
inputs_1 = keras.Input(shape=(10080, 1))
layer1 = Conv1D(64,14)(inputs_1)
layer2 = layers.MaxPool1D(5)(layer1)
layer3 = Conv1D(64, 14)(layer2)
layer4 = layers.GlobalMaxPooling1D()(layer3)
inputs_2 = keras.Input(shape=(85,))
layer5 = layers.concatenate([layer4, inputs_2])
layer6 = Dense(128, activation='relu')(layer5)
layer7 = Dense(2, activation='softmax')(layer6)
model_2 = keras.models.Model(inputs = [inputs_1, inputs_2], output = [layer7])
# model_2.summary()
adam = keras.optimizers.Adam(lr = 0.0001)
model_2.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['acc'])
return model_2
# We need convert one_hot encoded labels to categorical labels for skf
Y_kat = np.argmax(Y, axis=1)
n_folds = 5
skf = StratifiedKFold(n_splits=n_folds, shuffle=True)
skf = skf.split(X_train_1, Y_kat)
cv_score = []
for i, (train, test) in enumerate(skf):
# currently keras doesn't have like model.reset(), so the easiest way
# recompiling our model in every step of the loop see below more
# create model
model_2 = my_model()
print("Running Fold", i+1, "/", n_folds)
model_2.fit([X_train_1[train], X_train_2[train]], Y[train], epochs=150, batch_size=10)
result = model_2.evaluate([X_train_1[test], X_train_2[test]], Y[test])
# if we want only the accuracy metric
cv_score.append(result[1])
# we have to clear previous model to reset weights
# currently keras doesn't have like model.reset()
keras.backend.clear_session()
print("\nMean accuracy of the crossvalidation: {}".format(np.mean(cv_score)))
Mean accuracy of the crossvalidation: 0.5049177408218384
现在我们来看实际的解决方案:
from sklearn.model_selection import StratifiedKFold
import numpy as np
import keras
from keras import layers
from keras.layers import Conv1D, Dense
from keras.utils.np_utils import to_categorical
# This is just for dummy data ##################################
X_train_1 = np.random.randint(0, 10000, (921, 10080, 1))
X_train_2 = np.random.randint(0, 10000, (921, 85))
Y_kat = np.random.randint(0, 2, (921))
Y = to_categorical(Y_kat, num_classes=2)
# This is just for dummy data ##################################
def my_model():
inputs_1 = keras.Input(shape=(10080, 1))
layer1 = Conv1D(64,14)(inputs_1)
layer2 = layers.MaxPool1D(5)(layer1)
layer3 = Conv1D(64, 14)(layer2)
layer4 = layers.GlobalMaxPooling1D()(layer3)
inputs_2 = keras.Input(shape=(85,))
layer5 = layers.concatenate([layer4, inputs_2])
layer6 = Dense(128, activation='relu')(layer5)
layer7 = Dense(2, activation='softmax')(layer6)
model_2 = keras.models.Model(inputs = [inputs_1, inputs_2], output = [layer7])
# model_2.summary()
adam = keras.optimizers.Adam(lr = 0.0001)
model_2.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['acc'])
return model_2
# We need convert one_hot encoded labels to categorical labels for skf
Y_kat = np.argmax(Y, axis=1)
n_folds = 5
skf = StratifiedKFold(n_splits=n_folds, shuffle=True)
skf = skf.split(X_train_1, Y_kat)
cv_score = []
for i, (train, test) in enumerate(skf):
# currently keras doesn't have like model.reset(), so the easiest way
# recompiling our model in every step of the loop see below more
# create model
model_2 = my_model()
print("Running Fold", i+1, "/", n_folds)
model_2.fit([X_train_1[train], X_train_2[train]], Y[train], epochs=150, batch_size=10)
result = model_2.evaluate([X_train_1[test], X_train_2[test]], Y[test])
# if we want only the accuracy metric
cv_score.append(result[1])
# we have to clear previous model to reset weights
# currently keras doesn't have like model.reset()
keras.backend.clear_session()
print("\nMean accuracy of the crossvalidation: {}".format(np.mean(cv_score)))
Mean accuracy of the crossvalidation: 0.5049177408218384
Out:
from sklearn.model_selection import StratifiedKFold
import numpy as np
import keras
from keras import layers
from keras.layers import Conv1D, Dense
from keras.utils.np_utils import to_categorical
# This is just for dummy data ##################################
X_train_1 = np.random.randint(0, 10000, (921, 10080, 1))
X_train_2 = np.random.randint(0, 10000, (921, 85))
Y_kat = np.random.randint(0, 2, (921))
Y = to_categorical(Y_kat, num_classes=2)
# This is just for dummy data ##################################
def my_model():
inputs_1 = keras.Input(shape=(10080, 1))
layer1 = Conv1D(64,14)(inputs_1)
layer2 = layers.MaxPool1D(5)(layer1)
layer3 = Conv1D(64, 14)(layer2)
layer4 = layers.GlobalMaxPooling1D()(layer3)
inputs_2 = keras.Input(shape=(85,))
layer5 = layers.concatenate([layer4, inputs_2])
layer6 = Dense(128, activation='relu')(layer5)
layer7 = Dense(2, activation='softmax')(layer6)
model_2 = keras.models.Model(inputs = [inputs_1, inputs_2], output = [layer7])
# model_2.summary()
adam = keras.optimizers.Adam(lr = 0.0001)
model_2.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['acc'])
return model_2
# We need convert one_hot encoded labels to categorical labels for skf
Y_kat = np.argmax(Y, axis=1)
n_folds = 5
skf = StratifiedKFold(n_splits=n_folds, shuffle=True)
skf = skf.split(X_train_1, Y_kat)
cv_score = []
for i, (train, test) in enumerate(skf):
# currently keras doesn't have like model.reset(), so the easiest way
# recompiling our model in every step of the loop see below more
# create model
model_2 = my_model()
print("Running Fold", i+1, "/", n_folds)
model_2.fit([X_train_1[train], X_train_2[train]], Y[train], epochs=150, batch_size=10)
result = model_2.evaluate([X_train_1[test], X_train_2[test]], Y[test])
# if we want only the accuracy metric
cv_score.append(result[1])
# we have to clear previous model to reset weights
# currently keras doesn't have like model.reset()
keras.backend.clear_session()
print("\nMean accuracy of the crossvalidation: {}".format(np.mean(cv_score)))
Mean accuracy of the crossvalidation: 0.5049177408218384
希望能有所帮助。您对
[X\u train\u 1,X\u train\u 2]
的意图是什么?@Geeocode输入分为输入1
和输入2
,因为前者是时间序列数据,后者是统计数据。考虑到数据的不同性质,我将它们分成不同的层次,请参见下面我编辑的答案。是的,我想是的。这是否意味着cross\u val\u score
不允许将两个训练数据组合成一个?好的替代方案是什么?请参考原始模型的编辑问题。我已经添加了更多的背景模型information@nilsinelabore你能试试列表(zip(X\u列车1,X\u列车2))
?我不确定,但可能有用。不过,您可能会遇到另一个错误,但这确实是个问题。长度不一致可能会得到解决,但keras可能不会接受。谢谢@Geeocode的回答。我真的很欣赏自己进行交叉验证的想法,尽管它引发了另一个问题(请参阅问题更新)。输入数据似乎也存在类似的问题。我会从那里继续努力。如果你有其他建议,请告诉我。Thanks@nilsinelabore不幸的是,列表(zip())在这里不起作用。