OpenCV+Python 指定物体识别

本文介绍一种基于HoG+Pyramids+Sliding Windows+SVM的物体识别方法

基本流程

(1)确定最小检测物体,对原图img缩放,缩放比例为(滑动窗大小/最小物体大小)。
(2)缩放后的图片,构建金字塔。
(3)对金字塔的每一层,通过滑动窗获取patch(类似ROI的概念),对patch归一化处理,之后给训练好的物体检测器识别,将识别成功的窗口位置和概率保存。(特征使用HoG,分类算法使用SVM)
(4)将物体窗口映射到原图img中的物体位置,概率不变。
(5)NMS处理重叠窗口。

HoG

论文
HoG原理1
HoG原理2
Histogram of Oriented Gradients 方向梯度直方图,是特征描述符算法大家庭的一份子,家庭成员还有SIFT,SURF,ORB。
统计图像局部区域的梯度方向信息来作为该局部图像区域的表征。

基本步骤
  1. 灰度化。如果使用彩色图像,每个像素的梯度幅值取三个通道最大的,梯度方向取梯度幅值最大通道的方向。
  2. 归一化。对图像进行Gamma矫正,一般对每个像素的灰度值开根号。
  3. 计算每个像素的梯度。梯度方向一般分为9的区间(bin)。20°/bin for 无符号方向(0~180°),40°/bin for 有符号方向(0~360°)
  4. 统计每个Cell的梯度方向直方图。一般一个Cell是8*8个像素。9个特征值,对应9个bin。
  5. 统计每个Block的梯度方向直方图图。一般一个Block是2*2个Cell。36个特征值的向量,同时做归一化。
  6. 计算HoG特征向量。根据Block的总个数,生成总的HoG向量(36*n维向量)。
HoG存在的问题
  1. 不知道物体在图像的哪个位置。
  2. 不知道图像的尺度。

Image Pyramids

解决图像的尺度问题
图像金字塔1
OpenCV中图像金字塔的基本步骤

  1. 根据系数改变图像的大小。
  2. 平滑图像。
def resize(img, scaleFactor):
    return cv2.resize(img, (int(img.shape[1] * (1 / scaleFactor)), int(img.shape[0] * (1 / scaleFactor))), interpolation=cv2.INTER_AREA)

def pyramid(image, scale=1.5, minSize=(200, 80)):
    yield image

    while True:
        image = resize(image, scale)
        if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:
            break

    yield image

Sliding Window

通过扫描图像的不同区域来解决图像中物体所在位置的问题。

def sliding_window(image, step, window_size):
    for y in range(0, image.shape[0], step):
        for x in range(0, image.shape[1], step):
            yield (x, y, image[y:y + window_size[1], x:x + window_size[0]])
None-Maximam Suppression

NMS1
NMS解决的是如何在重叠区域中保留置信度最高的区域。

import numpy as np


# Malisiewicz et al.
# Python port by Adrian Rosebrock
def non_max_suppression_fast(boxes, overlapThresh):
    # if there are no boxes, return an empty list
    if len(boxes) == 0:
        return []

    # if the bounding boxes integers, convert them to floats --
    # this is important since we'll be doing a bunch of divisions
    if boxes.dtype.kind == "i":
        boxes = boxes.astype("float")

    # initialize the list of picked indexes 
    pick = []

    # grab the coordinates of the bounding boxes
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    scores = boxes[:, 4]
    # compute the area of the bounding boxes and sort the bounding
    # boxes by the score/probability of the bounding box
    area = (x2 - x1 + 1) * (y2 - y1 + 1)
    idxs = np.argsort(scores)[::-1]

    # keep looping while some indexes still remain in the indexes
    # list
    while len(idxs) > 0:
        # grab the last index in the indexes list and add the
        # index value to the list of picked indexes
        last = len(idxs) - 1
        i = idxs[last]
        pick.append(i)

        # find the largest (x, y) coordinates for the start of
        # the bounding box and the smallest (x, y) coordinates
        # for the end of the bounding box
        xx1 = np.maximum(x1[i], x1[idxs[:last]])
        yy1 = np.maximum(y1[i], y1[idxs[:last]])
        xx2 = np.minimum(x2[i], x2[idxs[:last]])
        yy2 = np.minimum(y2[i], y2[idxs[:last]])

        # compute the width and height of the bounding box
        w = np.maximum(0, xx2 - xx1 + 1)
        h = np.maximum(0, yy2 - yy1 + 1)

        # compute the ratio of overlap
        overlap = (w * h) / area[idxs[:last]]

        # delete all indexes from the index list that have
        idxs = np.delete(idxs, np.concatenate(([last],
                                               np.where(overlap > overlapThresh)[0])))

    # return only the bounding boxes that were picked using the
    # integer data type
    return int(boxes[pick])

SVM

算法重点:找到离分隔超平面最近的点,确保它们离分隔面的距离尽可能远。这里点到分隔面的距离被称为间隔margin,这个margin尽可能的大。支持向量support vector就是离分隔超平面最近的那些点,我们要最大化支持向量到分隔面的距离。

实例

行人检测

import cv2

def draw_person(image, persont):
    x, y, w, h = persont
    cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)

img = cv2.imread("people1.jpg")
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
rects, weights = hog.detectMultiScale(img)
for person in rects:
    draw_person(img, person)
cv2.imshow("people detection", img)

"""
        detectMultiScale(img[, hitThreshold[, winStride[, padding[, scale[, finalThreshold[, useMeanshiftGrouping]]]]]]) -> foundLocations, foundWeights
        .   @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
        .   of rectangles.
        .   @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
        .   @param foundLocations Vector of rectangles where each rectangle contains the detected object.
        .   @param foundWeights Vector that will contain confidence values for each detected object.
        .   @param hitThreshold Threshold for the distance between features and SVM classifying plane.
        .   Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient).
        .   But if the free coefficient is omitted (which is allowed), you can specify it manually here.
        .   @param winStride Window stride. It must be a multiple of block stride. 步长
        .   @param padding Padding 填充
        .   @param scale Coefficient of the detection window increase.
        .   @param finalThreshold Final threshold
        .   @param useMeanshiftGrouping indicates grouping algorithm
"""
行人检测效果.png

检测一般物体

推荐博文

import cv2
import numpy as np

datapath = "TrainImages/"


def path(cls, i):
    return "%s/%s%d.pgm" % (datapath, cls, i + 1)


def extract_sift(fn):
    im = cv2.imread(fn, 0)
    return extract.compute(im, detect.detect(im))[1]


def bow_features(fn):
    im = cv2.imread(fn, 0)
    return extract_bow.compute(im, detect.detect(im))


def predict(fn):
    f = bow_features(fn)
    p = svm.predict(f)
    print(fn, "\t", p[1][0][0])
    return p


pos, neg = "pos-", "neg-"
# 创建sift特征提取
detect = cv2.xfeatures2d.SIFT_create()
extract = cv2.xfeatures2d.SIFT_create()

# 创建基于flann的匹配器
flann_params = dict(algorithm=1, trees=5)
matcher = cv2.FlannBasedMatcher(flann_params, {})
# 创建bow训练器
bow_kmeans_trainer = cv2.BOWKMeansTrainer(40)
# 创建词袋模型
extract_bow = cv2.BOWImgDescriptorExtractor(extract, matcher)
# 为模型输入正负样本
for i in range(8):
    bow_kmeans_trainer.add(extract_sift(path(pos, i)))
    bow_kmeans_trainer.add(extract_sift(path(neg, i)))
# cluster函数,执行k-means分类,并且返回词汇。进一步制定extract_bow提取描述符
voc = bow_kmeans_trainer.cluster()
extract_bow.setVocabulary(voc)
# 创建2个数组,生成svm模型所需正负样本标签
traindata, trainlabels = [], []
for i in range(20):
    traindata.extend(bow_features(path(pos, i)));
    trainlabels.append(1)
    traindata.extend(bow_features(path(neg, i)));
    trainlabels.append(-1)
# 创建并训练一个svm模型
svm = cv2.ml.SVM_create()
svm.train(np.array(traindata), cv2.ml.ROW_SAMPLE, np.array(trainlabels))

# 测试两图
car, notcar = "car1", "car2"
car_img = cv2.imread(car)
notcar_img = cv2.imread(notcar)
car_predict = predict(car)
not_car_predict = predict(notcar)

font = cv2.FONT_HERSHEY_SIMPLEX

if car_predict[1][0][0] == 1.0:
    cv2.putText(car_img, 'Car Detected', (10, 30), font, 1, (0, 255, 0), 2, cv2.LINE_AA)

if not_car_predict[1][0][0] == -1.0:
    cv2.putText(notcar_img, 'Car Not Detected', (10, 30), font, 1, (0, 0, 255), 2, cv2.LINE_AA)

cv2.imshow('BOW + SVM Success', car_img)
cv2.imshow('BOW + SVM Failure', notcar_img)
cv2.waitKey(0)
cv2.destroyAllWindows()

你可能感兴趣的:(OpenCV+Python 指定物体识别)