Python 从Kagglectanddog训练数据集,加载数据集有一些类型的BMP错误

Python 从Kagglectanddog训练数据集,加载数据集有一些类型的BMP错误,python,tensorflow,Python,Tensorflow,但当我试图找出错误时,我做到了: def process_path(img_path, label): label = tf.one_hot(label, depth=class_num) image = tf.io.read_file(img_path) # image = tf.image.decode_bmp(image) image = tf.image.decode_jpeg(image) image = tf.image.convert_ima

但当我试图找出错误时,我做到了:

def process_path(img_path, label):
    label = tf.one_hot(label, depth=class_num)
    image = tf.io.read_file(img_path)
#     image = tf.image.decode_bmp(image)
    image = tf.image.decode_jpeg(image)
    image = tf.image.convert_image_dtype(image, tf.float32)
    image = tf.image.resize(image, [im_height, im_width])
    return image, label

AUTOTUNE = tf.data.experimental.AUTOTUNE

# load train dataset
train_dataset = tf.data.Dataset.from_tensor_slices((train_image_list, train_label_list))
train_dataset = train_dataset.shuffle(buffer_size=train_num)\
    .map(process_path, num_parallel_calls=AUTOTUNE).repeat().\
    batch(batch_size).prefetch(AUTOTUNE)

# load val dataset
val_dataset = tf.data.Dataset.from_tensor_slices((val_image_list, val_label_list))
val_dataset = val_dataset.map(process_path, num_parallel_calls=AUTOTUNE)\
    .repeat().batch(batch_size)
我想打印“train_数据集”中的第一个项目

它有时给我错误的提示,有时我没有错误

InvalidArgumentError:尝试使用错误的op解码BMP格式。请改用
decode\u BMP
decode\u image
。使用的操作:解码JPEG
[{{node DecodeJpeg}}]]

我所有的密码

for i, img in enumerate(train_dataset):
    try:
        if i==0:
            break
        print(img)
        print('*********************',i,'***********************')
        
    except Exception as e:
        print(e)
        print('0000000000000000',i,'0000000000000000')

#!/usr/bin/env python
# coding: utf-8

# In[52]:


import matplotlib.pyplot as plt
from model import AlexNet_v1, AlexNet_v2
import tensorflow as tf
import json
import os
import time
import glob
import random

os.environ['CUDA_DEVICE_ORDER'] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"



gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
    except RuntimeError as e:
        print(e)
        exit(-1)

data_root = os.path.abspath(os.path.join(os.getcwd()))
image_path = os.path.join(data_root, 'datasets')
train_dir = os.path.join(image_path, 'train')
validation_dir = os.path.join(image_path, 'val')

assert os.path.exists(train_dir), "cannot find {}".format(train_dir)
assert os.path.exists(validation_dir), "cannot find {}".format(validation_dir)

# create direction for saving weights
if not os.path.exists('save_weights'):
    os.makedirs('save_weights')

im_height = 224
im_width = 224
batch_size = 128
epochs = 10

# class dict
data_class = [cla for cla in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir, cla))]
class_num = len(data_class)
class_dict = dict((value, index) for index, value in enumerate(data_class))

# reverse value and key of dict
inverse_dict = dict((val, key) for key, val in class_dict.items())

json_str = json.dumps(inverse_dict, indent=4)
with open('class_indices.json', 'w') as json_file:
    json_file.write(json_str)

# load train images list
train_image_list = glob.glob(train_dir + "/*/*.jpg")
random.shuffle(train_image_list)
train_num = len(train_image_list)
assert train_num > 0, "cannot find any .jpg file in {}".format(train_dir)
train_label_list = [class_dict[path.split(os.path.sep)[-2]] for path in train_image_list]

# load validation images list
val_image_list = glob.glob(validation_dir+'/*/*.jpg')
random.shuffle(val_image_list)
val_num = len(val_image_list)
assert train_num > 0, "cannot find any .jpg file in {}".format(validation_dir)
val_label_list = [class_dict[path.split(os.path.sep)[-2]] for path in val_image_list]

print("using {} images for training, {} images for validation.".format(train_num,
                                                                       val_num))


def process_path(img_path, label):
    label = tf.one_hot(label, depth=class_num)
    image = tf.io.read_file(img_path)
#     image = tf.image.decode_bmp(image)
    image = tf.image.decode_jpeg(image)
    image = tf.image.convert_image_dtype(image, tf.float32)
    image = tf.image.resize(image, [im_height, im_width])
    return image, label

AUTOTUNE = tf.data.experimental.AUTOTUNE

# load train dataset
train_dataset = tf.data.Dataset.from_tensor_slices((train_image_list, train_label_list))
train_dataset = train_dataset.shuffle(buffer_size=train_num)    .map(process_path, num_parallel_calls=AUTOTUNE).repeat().    batch(batch_size).prefetch(AUTOTUNE)

# load val dataset
val_dataset = tf.data.Dataset.from_tensor_slices((val_image_list, val_label_list))
val_dataset = val_dataset.map(process_path, num_parallel_calls=AUTOTUNE)    .repeat().batch(batch_size)










model = AlexNet_v1(im_height=im_height, im_width=im_width, num_classes=2)
model.summary()

model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),
              metrics=['accuracy'])
callbacks = [tf.keras.callbacks.ModelCheckpoint(filepath='./save_weights/myAlex_{epoch}.h5',
                                                save_best_only=True,
                                                save_weights_only=True,
                                                monitor='val_loss')]
history = model.fit(x=train_dataset,
                    steps_per_epoch=train_num//batch_size,
                    epochs=epochs,
                    validation_data=val_dataset,
                    validation_steps=val_num//batch_size,
                    callbacks=callbacks)



history_dict = history.history



train_loss = history_dict['loss']
train_acc = history_dict['accuracy']
val_loss = history_dict['val_loss']
val_acc = history_dict['val_accuracy']



# figure 1
plt.figure()
plt.plot(range(epochs), train_loss, label = 'train_loss')
plt.plot(range(epochs), val_loss, label = 'val_loss')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('loss')

# figure 2
plt.figure()
plt.plot(range(epochs), train_acc, label = 'train_acc')
plt.plot(range(epochs), val_acc, label = 'val_acc')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('accuracy')

from tensorflow.keras import layers, models, Model, Sequential


def AlexNet_v1(im_height=224, im_width=224, num_classes=1000):
    # tensorflow中的tensor通道排序是NHWC
    input_image = layers.Input(shape=(im_height, im_width, 3), dtype="float32")  # output(None, 224, 224, 3)
    x = layers.ZeroPadding2D(((1, 2), (1, 2)))(input_image)                      # output(None, 227, 227, 3)
    x = layers.Conv2D(48, kernel_size=11, strides=4, activation="relu")(x)       # output(None, 55, 55, 48)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)                              # output(None, 27, 27, 48)
    x = layers.Conv2D(128, kernel_size=5, padding="same", activation="relu")(x)  # output(None, 27, 27, 128)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)                              # output(None, 13, 13, 128)
    x = layers.Conv2D(192, kernel_size=3, padding="same", activation="relu")(x)  # output(None, 13, 13, 192)
    x = layers.Conv2D(192, kernel_size=3, padding="same", activation="relu")(x)  # output(None, 13, 13, 192)
    x = layers.Conv2D(128, kernel_size=3, padding="same", activation="relu")(x)  # output(None, 13, 13, 128)
    x = layers.MaxPool2D(pool_size=3, strides=2)(x)                              # output(None, 6, 6, 128)

    x = layers.Flatten()(x)                         # output(None, 6*6*128)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(2048, activation="relu")(x)    # output(None, 2048)
    x = layers.Dropout(0.2)(x)
    x = layers.Dense(2048, activation="relu")(x)    # output(None, 2048)
    x = layers.Dense(num_classes)(x)                  # output(None, 5)
    predict = layers.Softmax()(x)

    model = models.Model(inputs=input_image, outputs=predict)
    return model