Mask-rcnn做检测应用

这几天在接触mask-rcnn的原理和检测数据,包括数据集的制作和标注,在一块1080ti上训练模型,到目前位置还有问题没有解决,在matterport/mask-rcnn的github仓库的issues下找解决方案,没有找到能够解决我的问题的回答。

我的问题的在balloon案例上做修改,对标记多个class的数据进行识别和实例分割,balloon是对一个class做检测,我也尝试了只对我的一个类别进行训练,是能够训练,并得到检测结果的,但是一般的应用都是需要进行多个类别的检测和识别,街上的行人,汽车,障碍物,交通灯等等都是无人驾驶的检测对象,如果我们尝试的视觉解决方案采用mask-rcnn做实验,就必要要进行多个类别的检测,所以必须探究多个class的实例分割,下面将相关的代码放上来,作为参考使用。

训练代码


#Mask R-CNN


import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from skimage import *
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")

# Import Mask RCNN
sys.path.append(ROOT_DIR)  # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils

# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")

# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")

############################################################
#  Configurations
############################################################


class DefectConfig(Config):
    """Configuration for training on the toy  dataset.
    Derives from the base Config class and overrides some values.
    """
    # Give the configuration a recognizable name
    NAME = "defect"

    # We use a GPU with 12GB memory, which can fit two images.
    # Adjust down if you use a smaller GPU.
    IMAGES_PER_GPU = 1

    # Number of classes (including background)
    NUM_CLASSES = 1 + 6  # Background + defect

    # Number of training steps per epoch
    STEPS_PER_EPOCH = 100

    # Skip detections with < 90% confidence
    DETECTION_MIN_CONFIDENCE = 0.9


############################################################
#  Dataset
############################################################

class DefectDataset(utils.Dataset):

    def load_defect(self, dataset_dir, subset):
        """Load a subset of the Balloon dataset.
        dataset_dir: Root directory of the dataset.
        subset: Subset to load: train or val
        """
        # Add classes. We have 6 classes to add.
        self.add_class("x", 1, "a")
        self.add_class("x", 2, "b")
        self.add_class("x", 3, "c")
        self.add_class("x", 4, "d")
        self.add_class("x", 5, "e")
        self.add_class("x", 6, "f")

        # Train or validation dataset
        assert subset in ["train", "val"]
        dataset_dir = os.path.join(dataset_dir, subset)

        # Load annotations
        # We mostly care about the x and y coordinates of each region
        # Note: In VIA 2.0, regions was changed from a dict to a list.
        annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
        annotations = list(annotations.values())  # don't need the dict keys

        # The VIA tool saves images in the JSON even if they don't have any
        # annotations. Skip unannotated images.
        annotations = [a for a in annotations if a['regions']]
        # print("ANNOTATIONS INFO:", annotations)
        idlist = []
        # Add images
        for a in annotations:
            # Get the x, y coordinaets of points of the polygons that make up
            # the outline of each object instance. These are stores in the
            # shape_attributes (see json format above)
            # The if condition is needed to support VIA versions 1.x and 2.x.
            # print(type(a['regions'])) #list
            '''if type(a['regions']) is dict:
                polygons = [r['shape_attributes'] for r in a['regions'].values()]
            else:
                polygons = [r['shape_attributes'] for r in a['regions']]'''
            #print(a['regions'])
            polygons = [r['shape_attributes'] for r in a['regions']]
            #name = [r['region_attributes']['type'] for r in a['regions']]
            #print("[NAME INFO:]", name)
            #print(type(name)) #list
            '''
            [NAME INFO:] [{'porosity': True}, {'porosity': True}, {'surface hollow': True}, 
            {'porosity': True}, {'lacking of sintering': True}, {'porosity': True}, 
            {'porosity': True}, {'porosity': True}, {'surface hollow': True}, 
            {'surface hollow': True}, {'surface hollow': True}, {'surface hollow': True}, 
            {'surface hollow': True}, {'surface hollow': True}, {'surface hollow': True}, 
            {'surface hollow': True}, {'surface hollow': True}]
            
            '''
            '''name_dict = {"porosity": 1,
                     "crack": 2,
                         "porosity array": 3,
                         "lacking of sintering": 4,
                         "surface hollow": 5,
                         "surface scratch": 6}
            name_id = [name_dict[a] for a in name]'''
            class_names = [r['region_attributes'] for r in a['regions']]
            for i in range(len(class_names)):
                if class_names[i]["type"] == "porosity":
                    idlist.append(1)
                elif class_names[i]["type"] == "crack":
                    idlist.append(2)
                elif class_names[i]["type"] == "porosity":
                    idlist.append(3)
                elif class_names[i]["type"] == "lacking of sintering":
                    idlist.append(4)
                elif class_names[i]["type"] == "surface hollow":
                    idlist.append(5)
                elif class_names[i]["type"] == "surface scratch":
                    idlist.append(6)


            #i = len(name)
            #for i in range(len(name)):
            #    name_id = [name_dict[a] for a in name[i].keys()]

            from skimage.io import imread
            # load_mask() needs the image size to convert polygons to masks.
            # Unfortunately, VIA doesn't include it in JSON, so we must read
            # the image. This is only managable since the dataset is tiny.
            image_path = os.path.join(dataset_dir, a['filename'])
            image = skimage.io.imread(image_path)
            height, width = image.shape[:2]

            self.add_image(
                "defect",
                image_id=a['filename'],  # use file name as a unique image id
                path=image_path,
                class_id=np.array(idlist),
                width=width, height=height,
                polygons=polygons)

    def load_mask(self, image_id):
        """Generate instance masks for an image.
       Returns:
        masks: A bool array of shape [height, width, instance count] with
            one mask per instance.
        class_ids: a 1D array of class IDs of the instance masks.
        """
        # If not a balloon dataset image, delegate to parent class.
        image_info = self.image_info[image_id]
        if image_info["source"] != "defect":
            return super(self.__class__, self).load_mask(image_id)

        name_id = image_info["class_id"]
        print("[name id]", name_id)

        # Convert polygons to a bitmap mask of shape
        # [height, width, instance_count]
        info = self.image_info[image_id]
        mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
                        dtype=np.uint8)
        class_ids = np.array(name_id, dtype=np.int32)

        for i, p in enumerate(info["polygons"]):
            # Get indexes of pixels inside the polygon and set them to 1
            rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
            mask[rr, cc, i] = 1

        # Return mask, and array of class IDs of each instance. Since we have
        # one class ID only, we return an array of 1s
        # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
        return (mask.astype(np.bool), class_ids)

    def image_reference(self, image_id):
        """Return the path of the image."""
        info = self.image_info[image_id]
        if info["source"] == "defect":
            return info["path"]
        else:
            super(self.__class__, self).image_reference(image_id)


def train(model):
    """Train the model."""
    # Training dataset.
    dataset_train = DefectDataset()
    dataset_train.load_defect(args.dataset, "train")
    dataset_train.prepare()

    # Validation dataset
    dataset_val = DefectDataset()
    dataset_val.load_defect(args.dataset, "val")
    dataset_val.prepare()

    # *** This training schedule is an example. Update to your needs ***
    # Since we're using a very small dataset, and starting from
    # COCO trained weights, we don't need to train too long. Also,
    # no need to train all layers, just the heads should do it.
    print("Training network heads")
    model.train(dataset_train, dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=30,
                layers='heads')


def color_splash(image, mask):
    """Apply color splash effect.
    image: RGB image [height, width, 3]
    mask: instance segmentation mask [height, width, instance count]

    Returns result image.
    """
    # Make a grayscale copy of the image. The grayscale copy still
    # has 3 RGB channels, though.
    gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
    # Copy color pixels from the original color image where mask is set
    if mask.shape[-1] > 0:
        # We're treating all instances as one, so collapse the mask into one layer
        mask = (np.sum(mask, -1, keepdims=True) >= 1)
        splash = np.where(mask, image, gray).astype(np.uint8)
    else:
        splash = gray.astype(np.uint8)
    return splash


def detect_and_color_splash(model, image_path=None, video_path=None):
    assert image_path or video_path

    # Image or video?
    if image_path:
        # Run model detection and generate the color splash effect
        print("Running on {}".format(args.image))
        # Read image
        image = skimage.io.imread(args.image)
        # Detect objects
        r = model.detect([image], verbose=1)[0]
        # Color splash
        splash = color_splash(image, r['masks'])
        # Save output
        file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
        skimage.io.imsave(file_name, splash)
    elif video_path:
        import cv2
        # Video capture
        vcapture = cv2.VideoCapture(video_path)
        width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = vcapture.get(cv2.CAP_PROP_FPS)

        # Define codec and create video writer
        file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
        vwriter = cv2.VideoWriter(file_name,
                                  cv2.VideoWriter_fourcc(*'MJPG'),
                                  fps, (width, height))

        count = 0
        success = True
        while success:
            print("frame: ", count)
            # Read next image
            success, image = vcapture.read()
            if success:
                # OpenCV returns images as BGR, convert to RGB
                image = image[..., ::-1]
                # Detect objects
                r = model.detect([image], verbose=0)[0]
                # Color splash
                splash = color_splash(image, r['masks'])
                # RGB -> BGR to save image to video
                splash = splash[..., ::-1]
                # Add image to video writer
                vwriter.write(splash)
                count += 1
        vwriter.release()
    print("Saved to ", file_name)


############################################################
#  Training
############################################################

if __name__ == '__main__':
    import argparse

    # Parse command line arguments
    parser = argparse.ArgumentParser(
        description='Train Mask R-CNN to detect balloons.')
    parser.add_argument("command",
                        metavar="",
                        help="'train' or 'splash'")
    parser.add_argument('--dataset', required=False,
                        metavar="/path/to/balloon/dataset/",
                        help='Directory of the Balloon dataset')
    parser.add_argument('--weights', required=True,
                        metavar="/path/to/weights.h5",
                        help="Path to weights .h5 file or 'coco'")
    parser.add_argument('--logs', required=False,
                        default=DEFAULT_LOGS_DIR,
                        metavar="/path/to/logs/",
                        help='Logs and checkpoints directory (default=logs/)')
    parser.add_argument('--image', required=False,
                        metavar="path or URL to image",
                        help='Image to apply the color splash effect on')
    parser.add_argument('--video', required=False,
                        metavar="path or URL to video",
                        help='Video to apply the color splash effect on')
    args = parser.parse_args()

    # Validate arguments
    if args.command == "train":
        assert args.dataset, "Argument --dataset is required for training"
    elif args.command == "splash":
        assert args.image or args.video,\
               "Provide --image or --video to apply color splash"

    print("Weights: ", args.weights)
    print("Dataset: ", args.dataset)
    print("Logs: ", args.logs)

    # Configurations
    if args.command == "train":
        config = DefectConfig()
    else:
        class InferenceConfig(DefectConfig):
            # Set batch size to 1 since we'll be running inference on
            # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
            GPU_COUNT = 1
            IMAGES_PER_GPU = 1
        config = InferenceConfig()
    config.display()

    # Create model
    if args.command == "train":
        model = modellib.MaskRCNN(mode="training", config=config,
                                  model_dir=args.logs)
    else:
        model = modellib.MaskRCNN(mode="inference", config=config,
                                  model_dir=args.logs)

    # Select weights file to load
    if args.weights.lower() == "coco":
        weights_path = COCO_WEIGHTS_PATH
        # Download weights file
        if not os.path.exists(weights_path):
            utils.download_trained_weights(weights_path)
    elif args.weights.lower() == "last":
        # Find last trained weights
        weights_path = model.find_last()
    elif args.weights.lower() == "imagenet":
        # Start from ImageNet trained weights
        weights_path = model.get_imagenet_weights()
    else:
        weights_path = args.weights

    # Load weights
    print("Loading weights ", weights_path)
    if args.weights.lower() == "coco":
        # Exclude the last layers because they require a matching
        # number of classes
        model.load_weights(weights_path, by_name=True, exclude=[
            "mrcnn_class_logits", "mrcnn_bbox_fc",
            "mrcnn_bbox", "mrcnn_mask"])
    else:
        model.load_weights(weights_path, by_name=True)

    # Train or evaluate
    if args.command == "train":
        train(model)
    elif args.command == "splash":
        detect_and_color_splash(model, image_path=args.image,
                                video_path=args.video)
    else:
        print("'{}' is not recognized. "
              "Use 'train' or 'splash'".format(args.command))

参考方案1

def load_student(self, dataset_dir, subset):
    self.add_class("student", 1, "student")
    self.add_class("student", 2, "bag")

    # Train or validation dataset?
    assert subset in ["train", "val"]
    dataset_dir = os.path.join(dataset_dir, subset)


    annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
    annotations = list(annotations.values())  # don't need the dict keys


    annotations = [a for a in annotations if a['regions']]

        # Add images
    for a in annotations:
        polygons = [r['shape_attributes'] for r in a['regions'].values()]
        objects = [s['region_attributes'] for s in a['regions'].values()]
        
        print(objects)
        num_ids=[]
        for n in objects:
        	#print(n)
        	#print(type(n))
            try:
            	if n['object_name']=='student':
            		num_ids.append(1)
            	elif n['object_name']=='bag':
            		num_ids.append(2)
            except:
                pass
        	
        #num_ids = [int(n['object_name']) for n in objects]
        image_path = os.path.join(dataset_dir, a['filename'])
        image = skimage.io.imread(image_path)
        height, width = image.shape[:2]

        self.add_image(
            "student",
            image_id=a['filename'],  # use file name as a unique image id
            path=image_path,
            width=width, height=height,
            polygons=polygons,
            num_ids=num_ids)

参考方案2

def load_multi_number(self, dataset_dir, subset):
"""Load a subset of the number dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: train or val
"""
# Add classes
self.add_class("object", 1, "A")
self.add_class("object", 2, "B")
self.add_class("object", 3, "C")
self.add_class("object", 4, "D")
self.add_class("object", 5, "E")
self.add_class("object", 6, "F")
self.add_class("object", 7, "G")
self.add_class("object", 8, "H")
self.add_class("object", 9, "I")
self.add_class("object", 10, "J")
self.add_class("object", 11, "K")
self.add_class("object", 12, "browl")

    # Train or validation dataset?
    assert subset in ["train", "val"]
    dataset_dir = os.path.join(dataset_dir, subset)

    annotations = json.load(open(os.path.join(dataset_dir, ".../train/via_region_data.json")))
    annotations = list(annotations.values())  # don't need the dict keys

    # The VIA tool saves images in the JSON even if they don't have any
    # annotations. Skip unannotated images.
    annotations = [a for a in annotations if a['regions']]

    # Add images
    for a in annotations:
        # Get the x, y coordinaets of points of the polygons that make up
        # the outline of each object instance. There are stores in the
        # shape_attributes (see json format above)
        # for b in a['regions'].values():
        #    polygons = [{**b['shape_attributes'], **b['region_attributes']}]
        # print("string=", polygons)
        # for r in a['regions'].values():
        #    polygons = [r['shape_attributes']]
        #    # print("polygons=", polygons)
        #    multi_numbers = [r['region_attributes']]
            # print("multi_numbers=", multi_numbers)
        polygons = [r['shape_attributes'] for r in a['regions'].values()]
        objects = [s['region_attributes'] for s in a['regions'].values()]
        # print("multi_numbers=", multi_numbers)
        # num_ids = [n for n in multi_numbers['number'].values()]
        # for n in multi_numbers:
        num_ids = [int(n['object']) for n in objects]
        # print("num_ids=", num_ids)
        # print("num_ids_new=", num_ids_new)
        # categories = [s['region_attributes'] for s in a['regions'].values()]
        # load_mask() needs the image size to convert polygons to masks.
        # Unfortunately, VIA doesn't include it in JSON, so we must read
        # the image. This is only managable since the dataset is tiny.
        image_path = os.path.join(dataset_dir, a['filename'])
        image = skimage.io.imread(image_path)
        height, width = image.shape[:2]

        self.add_image(
            "object",
            image_id=a['filename'],  # use file name as a unique image id
            path=image_path,
            width=width, height=height,
            polygons=polygons,
            num_ids=num_ids)


def load_mask(self, image_id):
    """Generate instance masks for an image.
   Returns:
    masks: A bool array of shape [height, width, instance count] with
        one mask per instance.
    class_ids: a 1D array of class IDs of the instance masks.
    """
    # If not a number dataset image, delegate to parent class.
    info = self.image_info[image_id]
    if info["source"] != "object":
        return super(self.__class__, self).load_mask(image_id)
    num_ids = info['num_ids']
    # Convert polygons to a bitmap mask of shape
    # [height, width, instance_count]
    mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
                    dtype=np.uint8)

    for i, p in enumerate(info["polygons"]):
        # Get indexes of pixels inside the polygon and set them to 1
        rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
        mask[rr, cc, i] = 1
    # print("info['num_ids']=", info['num_ids'])
    # Map class names to class IDs.
    num_ids = np.array(num_ids, dtype=np.int32)
    return mask, num_ids

def image_reference(self, image_id):
    """Return the path of the image."""
    info = self.image_info[image_id]
    if info["source"] == "object":
        return info["path"]
    else:
        super(self.__class__, self).image_reference(image_id)

 

你可能感兴趣的:(dl,目标检测)