Python 为什么我的CNN keras模型的预测总是一样的?

Python 为什么我的CNN keras模型的预测总是一样的?,python,keras,deep-learning,classification,conv-neural-network,Python,Keras,Deep Learning,Classification,Conv Neural Network,我训练了一个CNN模型,将ECG信号分为4个不同的类别,然后保存。现在我加载了它,并试图用它预测一些事情,我给它32个心电图信号。所以第一个预测总是给我4个不同的百分比,但是接下来的31个是相同的。有人知道为什么吗? 整个代码可在此处找到: 对于预测,我稍微修改了批处理生成器: def __data_generation(h5file, list_IDs, batch_size, dim, nperseg, noverlap, n_channels, sequence_length, n_cla

我训练了一个CNN模型,将ECG信号分为4个不同的类别,然后保存。现在我加载了它,并试图用它预测一些事情,我给它32个心电图信号。所以第一个预测总是给我4个不同的百分比,但是接下来的31个是相同的。有人知道为什么吗? 整个代码可在此处找到:

对于预测,我稍微修改了批处理生成器:

def __data_generation(h5file, list_IDs, batch_size, dim, nperseg, noverlap, n_channels, sequence_length, n_classes, shuffle):
    'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
    # Initialization
    X = np.empty((batch_size, *dim, n_channels), dtype = float)
    y = np.empty((batch_size), dtype = int)
    # Generate data
    for i, ID in enumerate(list_IDs):
        data = extend_ts(h5file[ID]['ecgdata'][:, 0], sequence_length)
        data = np.reshape(data, (1, len(data)))

        # Generate spectrogram
        data_spectrogram = spectrogram(data, nperseg = nperseg, noverlap = noverlap)[2]   

        # Normalize
        data_norm = (data_spectrogram - np.mean(data_spectrogram))/np.std(data_spectrogram)

        X[i,] = np.expand_dims(data_norm, axis = 3)

        return X
我给它一些参数,在val_发生器中输入32个信号(我知道它现在不是发生器)

然后当我试图预测它的时候

from keras.models import load_model
model = load_model("Physionet17_ECG_CNN.model")
prediction = model.predict(val_generator)
print(prediction)
我得到了这些预测

[[1.9367344e-03 9.4601721e-01 6.0286120e-02 1.1672693e-03]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]]
有人知道为什么会这样吗? 谢谢

以防有人想查看预测的完整代码

import tensorflow as tf
import numpy as np
import pandas as pd
import os
import h5py
import matplotlib
import scipy.io
import cv2
import numpy as np
from matplotlib import pyplot as plt

# Magic
%matplotlib inline
matplotlib.style.use('ggplot')

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder

# Keras5
import keras
from keras.models import Sequential
from keras import layers
from keras import optimizers
from keras import backend as K
from keras import regularizers

# Tensorflow -- the info from the devices used
import tensorflow as tf
from tensorflow.python.client import device_lib

# Custom imports
from physionet_processing import (fetch_h5data, spectrogram, 
                                  special_parameters, transformed_stats, extend_ts)

from physionet_generator_Copy1 import DataGeneratorAdib

### Open hdf5 file, load the labels and define training/validation splits ###

# Data folder and hdf5 dataset file
data_root = os.path.normpath('.')
hd_file = os.path.join(data_root, 'physioANDptb.h5')

# Open hdf5 file
h5file =  h5py.File(hd_file, 'r')
print(h5file)
# Get a list of dataset names -- keys()-gives the list of all attributes
dataset_list = list(h5file.keys())
#print(dataset_list)
# Encode labels to integer numbers
label_set = ['A', 'N', 'O', '~']
encoder = LabelEncoder().fit(label_set)
label_set_codings = [0, 1, 2, 3]

### Set up batch generators ###

# Parameters needed for the batch generator
# Maximum sequence length
max_length = 18286

# Output dimensions
sequence_length = max_length
spectrogram_nperseg = 64 # Spectrogram window
spectrogram_noverlap = 32 # Spectrogram overlap
n_classes = len(label_set)

batch_size = 32

# import ipdb; ipdb.set_trace() # debugging starts here

# calculate image dimensions
data = fetch_h5data(h5file, [0], sequence_length)# fetch some raw sequences from the hdf5 file
_, _, Sxx = spectrogram(data, nperseg = spectrogram_nperseg, noverlap = spectrogram_noverlap)
dim = Sxx[0].shape
print(data.shape)

# print(dataset_list)

def __data_generation(h5file, list_IDs, batch_size, dim, nperseg, noverlap, n_channels, sequence_length, n_classes, shuffle):
    'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
    # Initialization
    X = np.empty((batch_size, *dim, n_channels), dtype = float)
    y = np.empty((batch_size), dtype = int)
    # Generate data
    for i, ID in enumerate(list_IDs):
        data = extend_ts(h5file[ID]['ecgdata'][:, 0], sequence_length)
        data = np.reshape(data, (1, len(data)))

        # Generate spectrogram
        data_spectrogram = spectrogram(data, nperseg = nperseg, noverlap = noverlap)[2]   

        # Normalize
        data_norm = (data_spectrogram - np.mean(data_spectrogram))/np.std(data_spectrogram)

        X[i,] = np.expand_dims(data_norm, axis = 3)

        return X

params = {'batch_size': batch_size,
          'dim': dim,
          'nperseg': spectrogram_nperseg,
          'noverlap': spectrogram_noverlap,
          'n_channels': 1,
          'sequence_length': sequence_length,
          'n_classes': n_classes,
          'shuffle': True}


val_generator = __data_generation(h5file, dataset_list, **params)

for i, batch in enumerate(val_generator):
    if i == 1:
        break


from keras.models import load_model
model = load_model("Physionet17_ECG_CNN_E50_SPE200.model")

val_generator = np.arange(18810*3)
val_generator = val_generator.reshape(3, 570, 33, 1)
# bla1 = np.arange(18810)
# bla1 = bla1.reshape(1, 570, 33, 1)
# val_generator(2,)= bla1
val_generator.shape

val_generator = np.random.random((32, 570, 33, 1))


prediction = model.predict(val_generator)
print(prediction)

这是不同版本的代码吗?似乎val_生成器是由完全随机数组成的这是不同版本的代码吗?似乎val_生成器是由完全随机的数字组成的
[[1.9367344e-03 9.4601721e-01 6.0286120e-02 1.1672693e-03]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]
 [3.3482336e-04 6.3787904e-03 5.1438063e-02 9.9722642e-01]]
import tensorflow as tf
import numpy as np
import pandas as pd
import os
import h5py
import matplotlib
import scipy.io
import cv2
import numpy as np
from matplotlib import pyplot as plt

# Magic
%matplotlib inline
matplotlib.style.use('ggplot')

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder

# Keras5
import keras
from keras.models import Sequential
from keras import layers
from keras import optimizers
from keras import backend as K
from keras import regularizers

# Tensorflow -- the info from the devices used
import tensorflow as tf
from tensorflow.python.client import device_lib

# Custom imports
from physionet_processing import (fetch_h5data, spectrogram, 
                                  special_parameters, transformed_stats, extend_ts)

from physionet_generator_Copy1 import DataGeneratorAdib

### Open hdf5 file, load the labels and define training/validation splits ###

# Data folder and hdf5 dataset file
data_root = os.path.normpath('.')
hd_file = os.path.join(data_root, 'physioANDptb.h5')

# Open hdf5 file
h5file =  h5py.File(hd_file, 'r')
print(h5file)
# Get a list of dataset names -- keys()-gives the list of all attributes
dataset_list = list(h5file.keys())
#print(dataset_list)
# Encode labels to integer numbers
label_set = ['A', 'N', 'O', '~']
encoder = LabelEncoder().fit(label_set)
label_set_codings = [0, 1, 2, 3]

### Set up batch generators ###

# Parameters needed for the batch generator
# Maximum sequence length
max_length = 18286

# Output dimensions
sequence_length = max_length
spectrogram_nperseg = 64 # Spectrogram window
spectrogram_noverlap = 32 # Spectrogram overlap
n_classes = len(label_set)

batch_size = 32

# import ipdb; ipdb.set_trace() # debugging starts here

# calculate image dimensions
data = fetch_h5data(h5file, [0], sequence_length)# fetch some raw sequences from the hdf5 file
_, _, Sxx = spectrogram(data, nperseg = spectrogram_nperseg, noverlap = spectrogram_noverlap)
dim = Sxx[0].shape
print(data.shape)

# print(dataset_list)

def __data_generation(h5file, list_IDs, batch_size, dim, nperseg, noverlap, n_channels, sequence_length, n_classes, shuffle):
    'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
    # Initialization
    X = np.empty((batch_size, *dim, n_channels), dtype = float)
    y = np.empty((batch_size), dtype = int)
    # Generate data
    for i, ID in enumerate(list_IDs):
        data = extend_ts(h5file[ID]['ecgdata'][:, 0], sequence_length)
        data = np.reshape(data, (1, len(data)))

        # Generate spectrogram
        data_spectrogram = spectrogram(data, nperseg = nperseg, noverlap = noverlap)[2]   

        # Normalize
        data_norm = (data_spectrogram - np.mean(data_spectrogram))/np.std(data_spectrogram)

        X[i,] = np.expand_dims(data_norm, axis = 3)

        return X

params = {'batch_size': batch_size,
          'dim': dim,
          'nperseg': spectrogram_nperseg,
          'noverlap': spectrogram_noverlap,
          'n_channels': 1,
          'sequence_length': sequence_length,
          'n_classes': n_classes,
          'shuffle': True}


val_generator = __data_generation(h5file, dataset_list, **params)

for i, batch in enumerate(val_generator):
    if i == 1:
        break


from keras.models import load_model
model = load_model("Physionet17_ECG_CNN_E50_SPE200.model")

val_generator = np.arange(18810*3)
val_generator = val_generator.reshape(3, 570, 33, 1)
# bla1 = np.arange(18810)
# bla1 = bla1.reshape(1, 570, 33, 1)
# val_generator(2,)= bla1
val_generator.shape

val_generator = np.random.random((32, 570, 33, 1))


prediction = model.predict(val_generator)
print(prediction)