detectron2入门示例

参考官网文档,运行示例脚本。

在jupyter内运行。


# Install detectron2

"""

# install dependencies: (use cu101 because colab has CUDA 10.1)

!pip install -U torch==1.5 torchvision==0.6 -f https://download.pytorch.org/whl/cu101/torch_stable.html

!pip install cython pyyaml==5.1

!pip install -U'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'

import torch, torchvision

print(torch.__version__, torch.cuda.is_available())

!gcc --version

# opencv is pre-installed on colab

# install detectron2:

!pip install detectron2==0.1.2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu101/index.html

# You may need to restart your runtime prior to this, to let your installation take effect

# Some basic setup:

# Setup detectron2 logger

import detectron2

from detectron2.utils.loggerimport setup_logger

setup_logger()

# import some common libraries

import numpyas np

import cv2

import random

from google.colab.patchesimport cv2_imshow

# import some common detectron2 utilities

from detectron2import model_zoo

from detectron2.engineimport DefaultPredictor

from detectron2.configimport get_cfg

from detectron2.utils.visualizerimport Visualizer

from detectron2.dataimport MetadataCatalog

"""

# Run a pre-trained detectron2 model

We first download a random image from the COCO dataset:

"""

!wget http://images.cocodataset.org/val2017/000000439715.jpg -Oinput.jpg

im = cv2.imread("./input.jpg")

cv2_imshow(im)

"""Then, we create a detectron2 config and a detectron2 `DefaultPredictor` to run inference on this image."""

detectron2入门示例_第1张图片

cfg = get_cfg()

# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library

cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))

cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST =0.5  # set threshold for this model

# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well

cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")

predictor = DefaultPredictor(cfg)

outputs = predictor(im)

# look at the outputs. See https://detectron2.readthedocs.io/tutorials/models.html#model-output-format for specification

outputs["instances"].pred_classes

outputs["instances"].pred_boxes

# We can use `Visualizer` to draw the predictions on the image.

v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),scale=1.2)

v = v.draw_instance_predictions(outputs["instances"].to("cpu"))

cv2_imshow(v.get_image()[:, :, ::-1])

"""

# Train on a custom dataset

In this section, we show how to train an existing detectron2 model on a custom dataset in a new format.

We use [the balloon segmentation dataset](https://github.com/matterport/Mask_RCNN/tree/master/samples/balloon)

which only has one class: balloon.

We'll train a balloon segmentation model from an existing model pre-trained on COCO dataset, available in detectron2's model zoo.

Note that COCO dataset does not have the "balloon" category. We'll be able to recognize this new class in a few minutes.

## Prepare the dataset

"""

# download, decompress the data

!wget https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip

!unzip balloon_dataset.zip > /dev/null

"""Register the balloon dataset to detectron2, following the [detectron2 custom dataset tutorial](https://detectron2.readthedocs.io/tutorials/datasets.html).

Here, the dataset is in its custom format, therefore we write a function to parse it and prepare it into detectron2's standard format. See the tutorial for more details.

"""

# if your dataset is in COCO format, this cell can be replaced by the following three lines:

# from detectron2.data.datasets import register_coco_instances

# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")

# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")

import os

import numpyas np

import json

from detectron2.structuresimport BoxMode

def get_balloon_dicts(img_dir):

json_file = os.path.join(img_dir,"via_region_data.json")

with open(json_file)as f:

imgs_anns = json.load(f)

dataset_dicts = []

for idx, vin enumerate(imgs_anns.values()):

record = {}

filename = os.path.join(img_dir, v["filename"])

height, width = cv2.imread(filename).shape[:2]

record["file_name"] = filename

record["image_id"] = idx

record["height"] = height

record["width"] = width

annos = v["regions"]

objs = []

for _, annoin annos.items():

assert not anno["region_attributes"]

anno = anno["shape_attributes"]

px = anno["all_points_x"]

py = anno["all_points_y"]

poly = [(x +0.5, y +0.5)for x, yin zip(px, py)]

poly = [pfor xin polyfor pin x]

obj = {

"bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],

"bbox_mode": BoxMode.XYXY_ABS,

"segmentation": [poly],

"category_id":0,

"iscrowd":0

            }

objs.append(obj)

record["annotations"] = objs

dataset_dicts.append(record)

return dataset_dicts

from detectron2.dataimport DatasetCatalog, MetadataCatalog

for din ["train","val"]:

DatasetCatalog.register("balloon_" + d,lambda d=d: get_balloon_dicts("balloon/" + d))

MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"])

balloon_metadata = MetadataCatalog.get("balloon_train")

"""To verify the data loading is correct, let's visualize the annotations of randomly selected samples in the training set:"""

dataset_dicts = get_balloon_dicts("balloon/train")

for din random.sample(dataset_dicts,3):

img = cv2.imread(d["file_name"])

visualizer = Visualizer(img[:, :, ::-1],metadata=balloon_metadata,scale=0.5)

vis = visualizer.draw_dataset_dict(d)

cv2_imshow(vis.get_image()[:, :, ::-1])

"""

detectron2入门示例_第2张图片
detectron2入门示例_第3张图片

## Train!

Now, let's fine-tune a coco-pretrained R50-FPN Mask R-CNN model on the balloon dataset. It takes ~6 minutes to train 300 iterations on Colab's K80 GPU, or ~2 minutes on a P100 GPU.

"""

from detectron2.engineimport DefaultTrainer

from detectron2.configimport get_cfg

cfg = get_cfg()

cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))

cfg.DATASETS.TRAIN = ("balloon_train",)

cfg.DATASETS.TEST = ()

cfg.DATALOADER.NUM_WORKERS =2

cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")# Let training initialize from model zoo

cfg.SOLVER.IMS_PER_BATCH =2

cfg.SOLVER.BASE_LR =0.00025  # pick a good LR

cfg.SOLVER.MAX_ITER =300    # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset

cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE =128   # faster, and good enough for this toy dataset (default: 512)

cfg.MODEL.ROI_HEADS.NUM_CLASSES =1  # only has one class (ballon)

os.makedirs(cfg.OUTPUT_DIR,exist_ok=True)

trainer = DefaultTrainer(cfg)

trainer.resume_or_load(resume=False)

trainer.train()

# Commented out IPython magic to ensure Python compatibility.

# Look at training curves in tensorboard:

# %load_ext tensorboard

# %tensorboard --logdir output

"""

## Inference & evaluation using the trained model

Now, let's run inference with the trained model on the balloon validation dataset. First, let's create a predictor using the model we just trained:

"""

cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR,"model_final.pth")

cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST =0.7   # set the testing threshold for this model

cfg.DATASETS.TEST = ("balloon_val", )

predictor = DefaultPredictor(cfg)

"""Then, we randomly select several samples to visualize the prediction results."""

from detectron2.utils.visualizerimport ColorMode

dataset_dicts = get_balloon_dicts("balloon/val")

for din random.sample(dataset_dicts,3):

im = cv2.imread(d["file_name"])

outputs = predictor(im)

v = Visualizer(im[:, :, ::-1],

metadata=balloon_metadata,

scale=0.8,

instance_mode=ColorMode.IMAGE_BW# remove the colors of unsegmented pixels

    )

v = v.draw_instance_predictions(outputs["instances"].to("cpu"))

cv2_imshow(v.get_image()[:, :, ::-1])

detectron2入门示例_第4张图片
detectron2入门示例_第5张图片
detectron2入门示例_第6张图片

"""We can also evaluate its performance using AP metric implemented in COCO API.

This gives an AP of ~70%. Not bad!

"""

from detectron2.evaluationimport COCOEvaluator, inference_on_dataset

from detectron2.dataimport build_detection_test_loader

evaluator = COCOEvaluator("balloon_val", cfg,False,output_dir="./output/")

val_loader = build_detection_test_loader(cfg,"balloon_val")

inference_on_dataset(trainer.model, val_loader, evaluator)

# another equivalent way is to use trainer.test

"""

# Other types of builtin models

"""

# Inference with a keypoint detection model

cfg = get_cfg()

cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml"))

cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST =0.7  # set threshold for this model

cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")

predictor = DefaultPredictor(cfg)

outputs = predictor(im)

v = Visualizer(im[:,:,::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),scale=1.2)

v = v.draw_instance_predictions(outputs["instances"].to("cpu"))

cv2_imshow(v.get_image()[:, :, ::-1])

# Inference with a panoptic segmentation model

cfg = get_cfg()

cfg.merge_from_file(model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"))

cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")

predictor = DefaultPredictor(cfg)

panoptic_seg, segments_info = predictor(im)["panoptic_seg"]

v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),scale=1.2)

v = v.draw_panoptic_seg_predictions(panoptic_seg.to("cpu"), segments_info)

cv2_imshow(v.get_image()[:, :, ::-1])

"""

# Run panoptic segmentation on a video

(by me: can not run because of network issue)网络原因,暂时运行不了

"""

# This is the video we're going to process

from IPython.displayimport YouTubeVideo, display

video = YouTubeVideo("ll8TgCZ0plk",width=500)

display(video)

# Install dependencies, download the video, and crop 5 seconds for processing

!pip install youtube-dl

!pip uninstall -y opencv-python opencv-contrib-python

!apt install python3-opencv# the one pre-installed have some issues

!youtube-dl https://www.youtube.com/watch?v=ll8TgCZ0plk -f22 -o video.mp4

!ffmpeg -i video.mp4 -t00:00:06 -c:v copy video-clip.mp4

# Run frame-by-frame inference demo on this video (takes 3-4 minutes)

# Using a model trained on COCO dataset

!git clone https://github.com/facebookresearch/detectron2

!python detectron2/demo/demo.py --config-file detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml --video-input video-clip.mp4 --confidence-threshold0.6 --output video-output.mkv \

--opts MODEL.WEIGHTS detectron2://COCO-PanopticSegmentation/panoptic_fpn_R_101_3x/139514519/model_final_cafdb1.pkl

# Download the results

from google.colabimport files

files.download('video-output.mkv')

你可能感兴趣的:(detectron2入门示例)