Python 如何使用预定义/训练(hdf5)文件和wights来预测一类新的eeg数据?

Python 如何使用预定义/训练(hdf5)文件和wights来预测一类新的eeg数据?,python,model,classification,keras,theano,Python,Model,Classification,Keras,Theano,我有一个名为(bestmodel.hdf5)的预定义文件,它是使用Keras库(python)和theano创建的 使用以下代码创建经过训练的模型 # set parameters batch_size = 1280 nb_epoch = 3000 #6000 l1_decay=0.00 l2_decay=0 # .5 # 0.01 0.06 sigma=0.005 in_drop_rate = .2 drop_rate = .5 print (tr_X.shape[1]) # set ne

我有一个名为(bestmodel.hdf5)的预定义文件,它是使用Keras库(python)和theano创建的

使用以下代码创建经过训练的模型

# set parameters
batch_size = 1280
nb_epoch = 3000 #6000
l1_decay=0.00
l2_decay=0 # .5
# 0.01  0.06
sigma=0.005
in_drop_rate = .2
drop_rate = .5

print (tr_X.shape[1])
# set network layout
model = Sequential()
model.add(Dense(2184, input_shape=(tr_X.shape[1],)
                , init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(in_drop_rate))


model.add(Dense(1310, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))

model.add(Dense(786, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))

model.add(Dense(472, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))


model.add(Dense(4, W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(Activation('softmax'))

# Callbacks
model_checkpoint = ModelCheckpoint('best_model.hdf5', monitor='val_loss', save_best_only=True)
early = EarlyStopping(monitor='val_loss', patience=600, verbose=0)

# fit and evaluate the model
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=0.001))#SGD(lr=0.0019, momentum=0.9, decay=0.0, nesterov=True))
history = model.fit(tr_X, tr_y, batch_size=batch_size,
                    nb_epoch=nb_epoch, verbose=0,  callbacks=[early, model_checkpoint],
                    validation_data=(va_X, va_y))
model.load_weights('best_model.hdf5')
tr_pr = model.predict(tr_X, batch_size=batch_size, verbose=0)
然而,为了测试真实数据(表单实验),我有一个不同的大小作为输入(例如,我有552,而不是2184)

因此,读取hdf5权重文件并使用它预测数据的类别。我写了以下内容:

# set parameters
batch_size = 4
l1_decay=0.00
l2_decay=0 # .5
# 0.01  0.06
sigma=0.005
in_drop_rate = .2
drop_rate = .5

# set network layout
model = Sequential()
model.add(Dense(552, input_shape=(552,)
                , init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(in_drop_rate))


model.add(Dense(331, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))

model.add(Dense(189, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))

model.add(Dense(119, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))

model.add(Dense(4, W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(Activation('softmax'))


model.load_weights('best_model.hdf5')
te_pr = model.predict(X, batch_size=batch_size, verbose=0)
运行代码时,出现以下异常:

C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py:106: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(119, kernel_regularizer=<keras.reg..., kernel_initializer="he_normal")`

model.add(Dense(119, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))


C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py:112: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(4, kernel_regularizer=<keras.reg...)`

Traceback (most recent call last):

File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\main2.py", line 88, in BrowseFileHandler

expcal.calclate_Experiment()

File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py", line 66, in calclate_Experiment

predictions = DNN(X)

File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py", line 117, in DNN

te_pr = model.predict(X, batch_size=batch_size, verbose=0)

File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\models.py", line 902, in predict

return self.model.predict(x, batch_size=batch_size, verbose=verbose)

File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\engine\training.py", line 1585, in predict

batch_size=batch_size, verbose=verbose)

File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\engine\training.py", line 1212, in _predict_loop

batch_outs = f(ins_batch)

File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\backend\theano_backend.py", line 1158, in __call__

return self.function(*inputs)

File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\compile\function_module.py", line 898, in __call__

storage_map=getattr(self.fn, 'storage_map', None))

File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\gof\link.py", line 325, in raise_with_op

reraise(exc_type, exc_value, exc_trace)

File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\compile\function_module.py", line 884, in __call__

self.fn() if output_subset is None else\

ValueError: dimension mismatch in args to gemm (4,552)x(2184,2184)->(4,2184)

Apply node that caused the error: GpuDot22(GpuFromHost.0, dense_1/kernel)

Toposort index: 28

Inputs types: [CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix)]

Inputs shapes: [(4, 552), (2184, 2184)]

Inputs strides: [(552, 1), (2184, 1)]

Inputs values: ['not shown', 'not shown']

Outputs clients: [[GpuElemwise{Add}[(0, 0)](GpuDot22.0, 
GpuDimShuffle{x,0}.0), GpuElemwise{Composite{(i0 + i1 + (i2 * i3))}}[(0, 3)]
(GpuDot22.0, GpuDimShuffle{x,0}.0, CudaNdarrayConstant{[[ 0.005]]}, GpuReshape{2}.0)]]



HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.

HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.


model.add(Dense(4, W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))

C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project\u code and dataset\End User\Experiment\u Calculation.py:106:UserWarning:更新对Keras 2 API的'Dense'调用:'Dense(119,内核正则化器=这是非常直接的

您训练了一个模型,第一层是2184x2184矩阵。因此,您保存的权重是针对2184输入进行训练的,并根据您训练的输入类型进行调整

如果我理解正确,您希望将此矩阵应用于552长度的输入…您正在构建一个模型,其中第一层是552x552矩阵,您希望将2184x2184矩阵加载到其中…这是没有办法的…它不起作用,您的输入应该完全相同。您无法更改经过培训的模型。


我希望您理解为什么它不起作用:-)如果不起作用,请要求澄清

谢谢@NassimBen的回答。我得到了,但我想做相反的事情(模型是在2184x2184上训练的,而测试输入(用于预测)是552x552)。在我尝试放大输入,使其大小与模型所需大小相同之后,我得到了答案,并且它可以工作。因此,目前我正试图使经过训练和测试的数据具有相同的大小。所以,我不会面对这样的问题。