Python 3.x 遮罩rcnn不适用于高分辨率图像

Python 3.x 遮罩rcnn不适用于高分辨率图像,python-3.x,tensorflow,neural-network,keras,Python 3.x,Tensorflow,Neural Network,Keras,我曾在本参考文章之后使用通过工具培训一个高分辨率图像集(注,例如:2400*1920)。在这里,我编辑了Ballon.py,代码如下: import os import sys import json import datetime import numpy as np import skimage.draw # Root directory of the project ROOT_DIR = os.path.abspath("../../") # Import Mask

我曾在本参考文章之后使用
通过
工具培训一个高分辨率图像集(,例如:2400*1920)。在这里,我编辑了Ballon.py,代码如下:

import os
import sys
import json
import datetime
import numpy as np
import skimage.draw

# Root directory of the project
ROOT_DIR = os.path.abspath("../../")

# Import Mask RCNN
sys.path.append(ROOT_DIR)  # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils

# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")

if COCO_WEIGHTS_PATH is None:
    print('weights not available')
else:
    print('weights available')


DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")

#  Configurations
class NeuralCodeConfig(Config):
    NAME = "screens"

    # We use a GPU with 12GB memory, which can fit two images.
    # Adjust down if you use a smaller GPU.
    IMAGES_PER_GPU = 1

    # Number of classes (including background)
    NUM_CLASSES = 1 + 10 # Background + other region classes

    # Number of training steps per epoch
    STEPS_PER_EPOCH = 30

    # Skip detections with < 90% confidence
    DETECTION_MIN_CONFIDENCE = 0.9


#  Dataset
class NeuralCodeDataset(utils.Dataset):
    def load_screen(self, dataset_dir, subset):
        """Load a subset of the screens dataset.
        dataset_dir: Root directory of the dataset.
        subset: Subset to load: train or val
        """
        # Add classes.
        self.add_class("screens",1,"logo")
        self.add_class("screens",2,"slider")
        self.add_class("screens",3,"navigation")
        self.add_class("screens",4,"forms")
        self.add_class("screens",5,"social_media_icons")
        self.add_class("screens",6,"video")
        self.add_class("screens",7,"map")
        self.add_class("screens",8,"pagination")
        self.add_class("screens",9,"pricing_table_block")
        self.add_class("screens",10,"gallery")

        
        # Train or validation dataset?
        assert subset in ["train", "val"]
        dataset_dir = os.path.join(dataset_dir, subset)
    
         # Load annotations
        # VGG Image Annotator saves each image in the form:
        # { 'filename': '28503151_5b5b7ec140_b.jpg',
        #   'regions': {
        #       '0': {
        #           'region_attributes': {},
        #           'shape_attributes': {
        #               'all_points_x': [...],
        #               'all_points_y': [...],
        #               'name': 'polygon'}},
        #       ... more regions ...
        #   },
        #   'size': 100202
        # }
        # We mostly care about the x and y coordinates of each region
        annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
        if annotations is None:
            print ("region data json not loaded")
        else:
            print("region data json loaded")
        # print(annotations)
        annotations = list(annotations.values())  # don't need the dict keys

        # The VIA tool saves images in the JSON even if they don't have any
        # annotations. Skip unannotated images.
        annotations = [a for a in annotations if a['regions']]

        # Add images
        for a in annotations:
            # Get the x, y coordinaets of points of the polygons that make up
            # the outline of each object instance. There are stores in the
            # shape_attributes and region_attributes (see json format above)
            polygons = [r['shape_attributes'] for r in a['regions']]
            screens = [r['region_attributes']for r in a['regions']]
            #getting the filename by spliting 
            class_name = screens[0]['html']
            file_name = a['filename'].split("/")
            file_name = file_name[len(file_name)-1]

            #getting class_ids with file_name
            class_ids = class_name+"_"+file_name
            # #getting width an height of the images
            # height = [h['height'] for h in polygons]
            # width = [w['width'] for w in polygons]
            # print(height,'height')
            # print('polygons',polygons)

            # load_mask() needs the image size to convert polygons to masks.
            # Unfortunately, VIA doesn't include it in JSON, so we must readpath
            # the image. This is only managable since the dataset is tiny.
            image_path = os.path.join(dataset_dir,file_name)
            image = skimage.io.imread(image_path)
             #resizing images
            # image = utils.resize_image(image, min_dim=800, max_dim=1000, min_scale=None, mode="square")
            # print('image',image)
            height,width = image.shape[:2]
            # print('height',height)
            # print('width',width)
            # height = 800
            # width = 800
            
   
            self.add_image(
                "screens",
                image_id=file_name,  # use file name as a unique image id
                path=image_path,
                width=width, height=height,
                polygons=polygons,
                class_ids=class_ids)

    def load_mask(self, image_id):
        """Generate instance masks for an image.
       Returns:
        masks: A bool array of shape [height, width, instance count] with
            one mask per instance.
        class_ids: a 1D array of class IDs of the instance masks.
        """
        # If not a screens dataset image, delegate to parent class.
        image_info = self.image_info[image_id]
        if image_info["source"] != "screens":
            return super(self.__class__, self).load_mask(image_id)

        # Convert polygons to a bitmap mask of shape
        # [height, width, instance_count]
        info = self.image_info[image_id]
        mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
                        dtype=np.uint8)
        for i, p in enumerate(info["polygons"]):
            # Get indexes of pixels inside the polygon and set them to 1
            rr, cc = skimage.draw.polygon(p['y'], p['x'])
            mask[rr, cc, i] = 1

        # Return mask, and array of class IDs of each instance. Since we have
        # one class ID only, we return an array of 1s
        # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
        # class_ids = np.array(class_ids,dtype=np.int32)
        return mask,class_ids

    def image_reference(self, image_id):
        """Return the path of the image."""
        info = self.image_info[image_id]
        if info["source"] == "screens":
            return info["path"]
        else:
            super(self.__class__, self).image_reference(image_id)



def train(model):
    # Train the model.
    # Training dataset.

    dataset_train = NeuralCodeDataset()
    dataset_train.load_screen(args.dataset, "train")
    dataset_train.prepare()

    # Validation dataset
    dataset_val = NeuralCodeDataset()
    dataset_val.load_screen(args.dataset, "val")
    dataset_val.prepare()

    # *** This training schedule is an example. Update to your needs ***
    # Since we're using a very small dataset, and starting from
    # COCO trained weights, we don't need to train too long. Also,
    # no need to train all layers, just the heads should do it.
    print("Training network heads")
    model.train(dataset_train, dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=30,
                layers='heads')

#  Training
if __name__ == '__main__':
    import argparse

# Parse command line arguments
parser = argparse.ArgumentParser(
    description='Train Mask R-CNN to detect screens.')
parser.add_argument("command",
                    metavar="<command>",
                    help="'train' or 'splash'")
parser.add_argument('--dataset', required='True',
                    metavar="../../datasets/screens",
                    help='Directory of the screens dataset')
parser.add_argument('--weights', required=True,
                    metavar="/weights.h5",
                    help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
                    default=DEFAULT_LOGS_DIR,
                    metavar="../../logs/",
                    help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
                    metavar="path or URL to image",
                    help='Image to apply the color splash effect on')
parser.add_argument('--video', required=False,
                    metavar="path or URL to video",
                    help='Video to apply the color splash effect on')
args = parser.parse_args()

# Validate arguments
if args.command == "train":
    assert args.dataset, "Argument --dataset is required for training"
elif args.command == "splash":
    assert args.image or args.video,\
           "Provide --image or --video to apply color splash"

print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)

# Configurations
if args.command == "train":
    config = NeuralCodeConfig()
else:
    class InferenceConfig(NeuralCodeConfig):
        # Set batch size to 1 since we'll be running inference on
        # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
        GPU_COUNT = 1
        IMAGES_PER_GPU = 1
    config = InferenceConfig()
config.display()

# Create model
if args.command == "train":
    model = modellib.MaskRCNN(mode="training", config=config,
                              model_dir=args.logs)
else:
    model = modellib.MaskRCNN(mode="inference", config=config,
                              model_dir=args.logs)

# Select weights file to load
if args.weights.lower() == "coco":
    weights_path = COCO_WEIGHTS_PATH
    # Download weights file
    if not os.path.exists(weights_path):
        utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
    # Find last trained weights
    weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
    # Start from ImageNet trained weights
    weights_path = model.get_imagenet_weights()
else:
    weights_path = args.weights

# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
    # Exclude the last layers because they require a matching
    # number of classes
    model.load_weights(weights_path, by_name=True, exclude=[
        "mrcnn_class_logits", "mrcnn_bbox_fc",
        "mrcnn_bbox", "mrcnn_mask"])
else:
    model.load_weights(weights_path, by_name=True)

# Train or evaluate
if args.command == "train":
    train(model)
# elif args.command == "splash":
#     detect_and_color_splash(model, image_path=args.image,
#                             video_path=args.video)
else:
    print("'{}' is not recognized. "
          "Use 'train' or 'splash'".format(args.command))
我的笔记本电脑图形规格如下:

Nvidia GeForce 830M(2 GB),具有250个CUDA内核

CPU规格:

Intel Core i5(第四代),8 GB RAM


这里可能是什么情况?这是图像的分辨率还是我的GPU无法使用。我要继续使用CPU吗?

我正在与Mask RCNN共享我的观察结果,同时培训我的自定义数据集

我的数据集包含各种尺寸的图像(即最小图像约为1700 x 1600像素,最大图像约为8500 x 4600像素)

我正在接受nVIDIA RTX 2080Ti、32 GB DDR4 RAM的培训,在培训期间,我收到以下警告;但培训过程已经完成

UserWarning:将稀疏索引转换为未知形状的稠密张量。这可能会消耗大量内存。 “将稀疏索引转换为未知形状的稠密张量。”

2019-05-23 15:25:23.433774:W T:\src\github\tensorflow\tensorflow\core\common\u runtime\bfc\n分配器。cc:219]分配器(GPU 0\u bfc)试图分配3.14GiB时内存不足。调用者指出这不是一个故障,但可能意味着如果有更多的内存可用,性能可能会提高

几个月前,我在我的笔记本电脑上试用了这款产品,它有12GB内存和nVIDIA 920M(2GB GPU);并且遇到了类似的内存错误

所以,我们可以怀疑GPU内存的大小是导致此错误的一个因素

此外,批量大小是另一个影响因素;但是我看到您已经设置了
图像\u PER\u GPU=1
。如果在mrcnn文件夹中的config.py文件中搜索
批次大小
,您将发现——

self.BATCH\u SIZE=self.IMAGES\u PER\u GPU*self.GPU计数

因此,在您的例子中,
批量大小是1


总之,我建议您在功能更强大的GPU上尝试相同的代码。

这些是警告,而不是错误。我看不出你有什么问题,除了大图像需要大量GPU内存之外。@MatiasValdenegro:它会在指示用户警告后停止迭代。.我调整了图像大小(低于1024*800),但也会发生同样的情况。.然后请在运行软件时包含完整的日志,包括所有错误和警告。@MatiasValdenegro:我包括了完整的错误日志。。请仔细查看并帮助我编写代码