Machine learning 如何有效地训练具有大图像数据集的CNN模型

Machine learning 如何有效地训练具有大图像数据集的CNN模型,machine-learning,keras,computer-vision,conv-neural-network,Machine Learning,Keras,Computer Vision,Conv Neural Network,我是机器学习的初学者。我正在制作一个CNN模型,使用keras从叶子图像中检测害虫。在训练数据的过程中,我的记忆超过了极限,无法进行训练。我曾经使用过kaggle/Google协作,但在这两个方面我都有内存问题。 有人建议我使用数据生成器,但在尝试使用数据生成器时,我却无能为力。是否有其他有效培训的方法或是否使用数据生成器的示例(已经看过许多示例,但在添加时遇到问题) import numpy as np import pickle import cv2 from os import listd

我是机器学习的初学者。我正在制作一个CNN模型,使用keras从叶子图像中检测害虫。在训练数据的过程中,我的记忆超过了极限,无法进行训练。我曾经使用过kaggle/Google协作,但在这两个方面我都有内存问题。 有人建议我使用数据生成器,但在尝试使用数据生成器时,我却无能为力。是否有其他有效培训的方法或是否使用数据生成器的示例(已经看过许多示例,但在添加时遇到问题)

import numpy as np
import pickle
import cv2
from os import listdir
from sklearn.preprocessing import LabelBinarizer
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation, Flatten, Dropout, Dense
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.preprocessing import image
from keras.preprocessing.image import img_to_array
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt



EPOCHS = 25
INIT_LR = 1e-3
BS = 32
default_image_size = tuple((256, 256))
image_size = 0
directory_root = 'PlantVillage/'
width=256
height=256
depth=3

#Function to convert images to array

def convert_image_to_array(image_dir):
    try:
        image = cv2.imread(image_dir)
        if image is not None:
            image = cv2.resize(image,default_image_size)
            return img_to_array(image)
        else:
            return np.array([])
    except Exception as e:
        print(f"Error : {e}")
        return None


image_list, label_list = [], []
try:
    print("[INFO] Loading images ...")
    root_dir = listdir(directory_root)

    #Looping inside root_directory
    for directory in root_dir :
        # remove .DS_Store from list
        if directory == ".DS_Store" :
            root_dir.remove(directory)

    for plant_folder in root_dir :
        plant_disease_folder_list = listdir(f"{directory_root}/{plant_folder}")
        print(f"[INFO] Processing {plant_folder} ...")

        #looping in images
        for disease_folder in plant_disease_folder_list :
            # remove .DS_Store from list
            if disease_folder == ".DS_Store" :
                plant_disease_folder_list.remove(plant_folder)

        #If all data taken not able to train
        for images in plant_disease_folder_list:
            image_directory = f"{directory_root}/{plant_folder}/{images}"
            if image_directory.endswith(".jpg") == True or image_directory.endswith(".JPG") == True:
                image_list.append(convert_image_to_array(image_directory))
                label_list.append(plant_folder)



    print("[INFO] Image loading completed")  
except Exception as e:
    print(f"Error : {e}")



#Get Size of Processed Image
image_size = len(image_list)

#Converting multi-class labels to binary labels(belong or doesnot belong in the class)

label_binarizer = LabelBinarizer()
image_labels = label_binarizer.fit_transform(label_list)

#Saving label binarizer instance using pickle
pickle.dump(label_binarizer,open('label_transform.pkl','wb'))
n_classes = len(label_binarizer.classes_)

print(label_binarizer.classes_)

#Normalizing image from [0,255] to [0,1]
np_image_list = np.array(image_list, dtype = np.float)/255.0

#Splitting data into training and test set 80:20
print('Splitting data to train,test')
x_train, x_test, y_train, y_test = train_test_split(np_image_list, image_labels, test_size=0.2, random_state = 42)

#Creating image generator object which performs random rotations, shifs,flips,crops,sheers
aug = ImageDataGenerator(
        rotation_range = 25, width_shift_range=0.1,
        height_shift_range=0.1, shear_range=0.2,
        zoom_range=0.2, horizontal_flip = True,
        fill_mode="nearest")

model = Sequential()

inputShape = (height, width, depth)
chanDim = -1
if K.image_data_format() == "channels_first":
    inputShape = (depth, height, width)
    chanDim = 1

model.add(Conv2D(32, (3, 3), padding="same",input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(128, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())

model.add(Dense(32))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(n_classes))
model.add(Activation("softmax"))

#model.summary()
#Compiling the CNN
opt = Adam(lr= INIT_LR, decay= INIT_LR/EPOCHS)

#distribution
model.compile(loss="binary_crossentropy", optimizer = opt, metrics=["accuracy"])

#training the Model
print("Training Model.....")

history = model.fit_generator(
        aug.flow(x_train, y_train, batch_size= BS),
        validation_data = (x_test, y_test),
        steps_per_epoch = len(x_train) // BS,
        epochs = EPOCHS, verbose = 1
        )

您也可以在其中找到代码。

这里的问题是,您已经在工作区中加载了完整的数据,这会占用大量内存,并在进程上产生大量额外负载

您可以使用
数据生成器
目录中的
flow\u
,它允许您定义
增强
预处理
管道以及
动态数据
。这里的优点是工作区没有额外的数据负载。您可以找到一个示例


请随意提问。

您可以将图像转换为二进制格式,这种格式可以被tensorflow称为“tfrecord”格式理解

请参考以下链接


您要训练多少张图像?对于1-2M图像的训练,我使用了10k图像块。因此,将10k图像读入内存,拟合模型,加载下一个块,拟合模型,直到所有图像都经过一个历元,然后按所需历元数进行迭代。我不知道生成器是否有更聪明的方法r或如果它影响精度。def sgd(X_列,y_列,小批量大小=200):而True:对于范围内的iter(n_iter):打印('Iteration{}'。格式(iter))#随机数据点X_列,y_列=对范围内的i(0,X_列。形状[0],小批量大小)洗牌(X_列,y_列):#获取当前小批量/区块的(X,y)对X_train\u mini=X_train[i:i+小批量大小]y_train\u mini=y_train[i:i+小批量大小]返回X_train\u mini,y_train\u mini