Python VGG16净验证精度

Python VGG16净验证精度,python,tensorflow,deep-learning,Python,Tensorflow,Deep Learning,我尝试对16个图像类标记的数据进行分类。我得到的验证准确率为60%,但当我试图预测验证数据上的类标签时。我的准确率达到6% 从保存的文件加载权重 您能告诉我为什么会发生这种情况吗?在使用predict\u generator获得预测后,我找不到您用于手动计算精度的代码 下面是一个简单的猫和狗分类程序,我已经运行了一个时代- 验证精度在历元结束后显示 然后使用save_weights保存权重 随后使用加载权重加载模型 使用sklearn的混淆矩阵和计算精度构建混淆矩阵 历元结束后计算的验证精度与手

我尝试对16个图像类标记的数据进行分类。我得到的验证准确率为60%,但当我试图预测验证数据上的类标签时。我的准确率达到6%

从保存的文件加载权重
您能告诉我为什么会发生这种情况吗?

在使用
predict\u generator
获得预测后,我找不到您用于手动计算精度的代码

下面是一个简单的猫和狗分类程序,我已经运行了一个时代-

  • 验证精度在历元结束后显示
  • 然后使用
    save_weights
    保存权重
  • 随后使用
    加载权重加载模型
  • 使用
    sklearn
    混淆矩阵和计算精度构建混淆矩阵
  • 历元结束后计算的验证精度与手动计算的精度相匹配

    代码-

    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    from tensorflow.keras.optimizers import Adam
    
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    
    _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
    
    path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
    
    PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
    
    train_dir = os.path.join(PATH, 'train')
    validation_dir = os.path.join(PATH, 'validation')
    
    train_cats_dir = os.path.join(train_dir, 'cats')  # directory with our training cat pictures
    train_dogs_dir = os.path.join(train_dir, 'dogs')  # directory with our training dog pictures
    validation_cats_dir = os.path.join(validation_dir, 'cats')  # directory with our validation cat pictures
    validation_dogs_dir = os.path.join(validation_dir, 'dogs')  # directory with our validation dog pictures
    
    num_cats_tr = len(os.listdir(train_cats_dir))
    num_dogs_tr = len(os.listdir(train_dogs_dir))
    
    num_cats_val = len(os.listdir(validation_cats_dir))
    num_dogs_val = len(os.listdir(validation_dogs_dir))
    
    total_train = num_cats_tr + num_dogs_tr
    total_val = num_cats_val + num_dogs_val
    
    batch_size = 128
    epochs = 1
    IMG_HEIGHT = 150
    IMG_WIDTH = 150
    
    train_image_generator = ImageDataGenerator(rescale=1./255,brightness_range=[0.5,1.5]) # Generator for our training data
    validation_image_generator = ImageDataGenerator(rescale=1./255,brightness_range=[0.5,1.5]) # Generator for our validation data
    
    train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
                                                               directory=train_dir,
                                                               shuffle=True,
                                                               target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                               class_mode='binary')
    
    val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
                                                                  directory=validation_dir,
                                                                  target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                                  class_mode='binary')
    
    model = Sequential([
        Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
        MaxPooling2D(),
        Conv2D(32, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(64, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(1)
    ])
    
    optimizer = 'SGD'
    
    model.compile(optimizer=optimizer, 
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])
    
    history = model.fit_generator(
              train_data_gen,
              steps_per_epoch=total_train // batch_size,
              epochs=epochs,
              validation_data=val_data_gen,
              validation_steps=total_val // batch_size)
    
    # Save the weights
    model.save_weights('my_model.hdf5')
    
    from sklearn.metrics import confusion_matrix
    
    # Load the weights
    model.load_weights('my_model.hdf5')
    
    # Reset 
    val_data_gen.reset()
    
    # Predictions
    pred = model.predict_generator(val_data_gen)
    predicted = np.argmax(pred,axis=1)
    
    # Actual Labels
    labels = val_data_gen.classes
    
    # Compute Accuracy
    conf_mat = confusion_matrix(predicted, labels)
    acc = np.sum(conf_mat.diagonal()) / np.sum(conf_mat)
    print('Validation accuracy: {} %'.format(acc*100))
    
    Found 2000 images belonging to 2 classes.
    Found 1000 images belonging to 2 classes.
    15/15 [==============================] - 29s 2s/step - loss: 0.6917 - accuracy: 0.4952 - val_loss: 0.6906 - val_accuracy: 0.5045
    Validation accuracy: 50.0 %
    
    输出-

    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    from tensorflow.keras.optimizers import Adam
    
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    
    _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
    
    path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
    
    PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
    
    train_dir = os.path.join(PATH, 'train')
    validation_dir = os.path.join(PATH, 'validation')
    
    train_cats_dir = os.path.join(train_dir, 'cats')  # directory with our training cat pictures
    train_dogs_dir = os.path.join(train_dir, 'dogs')  # directory with our training dog pictures
    validation_cats_dir = os.path.join(validation_dir, 'cats')  # directory with our validation cat pictures
    validation_dogs_dir = os.path.join(validation_dir, 'dogs')  # directory with our validation dog pictures
    
    num_cats_tr = len(os.listdir(train_cats_dir))
    num_dogs_tr = len(os.listdir(train_dogs_dir))
    
    num_cats_val = len(os.listdir(validation_cats_dir))
    num_dogs_val = len(os.listdir(validation_dogs_dir))
    
    total_train = num_cats_tr + num_dogs_tr
    total_val = num_cats_val + num_dogs_val
    
    batch_size = 128
    epochs = 1
    IMG_HEIGHT = 150
    IMG_WIDTH = 150
    
    train_image_generator = ImageDataGenerator(rescale=1./255,brightness_range=[0.5,1.5]) # Generator for our training data
    validation_image_generator = ImageDataGenerator(rescale=1./255,brightness_range=[0.5,1.5]) # Generator for our validation data
    
    train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
                                                               directory=train_dir,
                                                               shuffle=True,
                                                               target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                               class_mode='binary')
    
    val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
                                                                  directory=validation_dir,
                                                                  target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                                  class_mode='binary')
    
    model = Sequential([
        Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
        MaxPooling2D(),
        Conv2D(32, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(64, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(1)
    ])
    
    optimizer = 'SGD'
    
    model.compile(optimizer=optimizer, 
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])
    
    history = model.fit_generator(
              train_data_gen,
              steps_per_epoch=total_train // batch_size,
              epochs=epochs,
              validation_data=val_data_gen,
              validation_steps=total_val // batch_size)
    
    # Save the weights
    model.save_weights('my_model.hdf5')
    
    from sklearn.metrics import confusion_matrix
    
    # Load the weights
    model.load_weights('my_model.hdf5')
    
    # Reset 
    val_data_gen.reset()
    
    # Predictions
    pred = model.predict_generator(val_data_gen)
    predicted = np.argmax(pred,axis=1)
    
    # Actual Labels
    labels = val_data_gen.classes
    
    # Compute Accuracy
    conf_mat = confusion_matrix(predicted, labels)
    acc = np.sum(conf_mat.diagonal()) / np.sum(conf_mat)
    print('Validation accuracy: {} %'.format(acc*100))
    
    Found 2000 images belonging to 2 classes.
    Found 1000 images belonging to 2 classes.
    15/15 [==============================] - 29s 2s/step - loss: 0.6917 - accuracy: 0.4952 - val_loss: 0.6906 - val_accuracy: 0.5045
    Validation accuracy: 50.0 %
    

    在使用
    predict\u generator
    获得预测后,我找不到用于手动计算精度的代码

    下面是一个简单的猫和狗分类程序,我已经运行了一个时代-

  • 验证精度在历元结束后显示
  • 然后使用
    save_weights
    保存权重
  • 随后使用
    加载权重加载模型
  • 使用
    sklearn
    混淆矩阵和计算精度构建混淆矩阵
  • 历元结束后计算的验证精度与手动计算的精度相匹配

    代码-

    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    from tensorflow.keras.optimizers import Adam
    
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    
    _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
    
    path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
    
    PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
    
    train_dir = os.path.join(PATH, 'train')
    validation_dir = os.path.join(PATH, 'validation')
    
    train_cats_dir = os.path.join(train_dir, 'cats')  # directory with our training cat pictures
    train_dogs_dir = os.path.join(train_dir, 'dogs')  # directory with our training dog pictures
    validation_cats_dir = os.path.join(validation_dir, 'cats')  # directory with our validation cat pictures
    validation_dogs_dir = os.path.join(validation_dir, 'dogs')  # directory with our validation dog pictures
    
    num_cats_tr = len(os.listdir(train_cats_dir))
    num_dogs_tr = len(os.listdir(train_dogs_dir))
    
    num_cats_val = len(os.listdir(validation_cats_dir))
    num_dogs_val = len(os.listdir(validation_dogs_dir))
    
    total_train = num_cats_tr + num_dogs_tr
    total_val = num_cats_val + num_dogs_val
    
    batch_size = 128
    epochs = 1
    IMG_HEIGHT = 150
    IMG_WIDTH = 150
    
    train_image_generator = ImageDataGenerator(rescale=1./255,brightness_range=[0.5,1.5]) # Generator for our training data
    validation_image_generator = ImageDataGenerator(rescale=1./255,brightness_range=[0.5,1.5]) # Generator for our validation data
    
    train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
                                                               directory=train_dir,
                                                               shuffle=True,
                                                               target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                               class_mode='binary')
    
    val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
                                                                  directory=validation_dir,
                                                                  target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                                  class_mode='binary')
    
    model = Sequential([
        Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
        MaxPooling2D(),
        Conv2D(32, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(64, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(1)
    ])
    
    optimizer = 'SGD'
    
    model.compile(optimizer=optimizer, 
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])
    
    history = model.fit_generator(
              train_data_gen,
              steps_per_epoch=total_train // batch_size,
              epochs=epochs,
              validation_data=val_data_gen,
              validation_steps=total_val // batch_size)
    
    # Save the weights
    model.save_weights('my_model.hdf5')
    
    from sklearn.metrics import confusion_matrix
    
    # Load the weights
    model.load_weights('my_model.hdf5')
    
    # Reset 
    val_data_gen.reset()
    
    # Predictions
    pred = model.predict_generator(val_data_gen)
    predicted = np.argmax(pred,axis=1)
    
    # Actual Labels
    labels = val_data_gen.classes
    
    # Compute Accuracy
    conf_mat = confusion_matrix(predicted, labels)
    acc = np.sum(conf_mat.diagonal()) / np.sum(conf_mat)
    print('Validation accuracy: {} %'.format(acc*100))
    
    Found 2000 images belonging to 2 classes.
    Found 1000 images belonging to 2 classes.
    15/15 [==============================] - 29s 2s/step - loss: 0.6917 - accuracy: 0.4952 - val_loss: 0.6906 - val_accuracy: 0.5045
    Validation accuracy: 50.0 %
    
    输出-

    import tensorflow as tf
    from tensorflow.keras.models import Sequential
    from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
    from tensorflow.keras.preprocessing.image import ImageDataGenerator
    from tensorflow.keras.optimizers import Adam
    
    import os
    import numpy as np
    import matplotlib.pyplot as plt
    
    _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
    
    path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
    
    PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
    
    train_dir = os.path.join(PATH, 'train')
    validation_dir = os.path.join(PATH, 'validation')
    
    train_cats_dir = os.path.join(train_dir, 'cats')  # directory with our training cat pictures
    train_dogs_dir = os.path.join(train_dir, 'dogs')  # directory with our training dog pictures
    validation_cats_dir = os.path.join(validation_dir, 'cats')  # directory with our validation cat pictures
    validation_dogs_dir = os.path.join(validation_dir, 'dogs')  # directory with our validation dog pictures
    
    num_cats_tr = len(os.listdir(train_cats_dir))
    num_dogs_tr = len(os.listdir(train_dogs_dir))
    
    num_cats_val = len(os.listdir(validation_cats_dir))
    num_dogs_val = len(os.listdir(validation_dogs_dir))
    
    total_train = num_cats_tr + num_dogs_tr
    total_val = num_cats_val + num_dogs_val
    
    batch_size = 128
    epochs = 1
    IMG_HEIGHT = 150
    IMG_WIDTH = 150
    
    train_image_generator = ImageDataGenerator(rescale=1./255,brightness_range=[0.5,1.5]) # Generator for our training data
    validation_image_generator = ImageDataGenerator(rescale=1./255,brightness_range=[0.5,1.5]) # Generator for our validation data
    
    train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
                                                               directory=train_dir,
                                                               shuffle=True,
                                                               target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                               class_mode='binary')
    
    val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
                                                                  directory=validation_dir,
                                                                  target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                                  class_mode='binary')
    
    model = Sequential([
        Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
        MaxPooling2D(),
        Conv2D(32, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(64, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(1)
    ])
    
    optimizer = 'SGD'
    
    model.compile(optimizer=optimizer, 
              loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
              metrics=['accuracy'])
    
    history = model.fit_generator(
              train_data_gen,
              steps_per_epoch=total_train // batch_size,
              epochs=epochs,
              validation_data=val_data_gen,
              validation_steps=total_val // batch_size)
    
    # Save the weights
    model.save_weights('my_model.hdf5')
    
    from sklearn.metrics import confusion_matrix
    
    # Load the weights
    model.load_weights('my_model.hdf5')
    
    # Reset 
    val_data_gen.reset()
    
    # Predictions
    pred = model.predict_generator(val_data_gen)
    predicted = np.argmax(pred,axis=1)
    
    # Actual Labels
    labels = val_data_gen.classes
    
    # Compute Accuracy
    conf_mat = confusion_matrix(predicted, labels)
    acc = np.sum(conf_mat.diagonal()) / np.sum(conf_mat)
    print('Validation accuracy: {} %'.format(acc*100))
    
    Found 2000 images belonging to 2 classes.
    Found 1000 images belonging to 2 classes.
    15/15 [==============================] - 29s 2s/step - loss: 0.6917 - accuracy: 0.4952 - val_loss: 0.6906 - val_accuracy: 0.5045
    Validation accuracy: 50.0 %