【opencv-python- 识别图片中的矩形轮廓,进行原比例恢复】

使用opencv-python 识别图片中的矩形轮廓,进行原比例恢复

首先进行图片导入

cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
# get是获取视频属性,set是重新设置视频属性
cap.set(3, 640)
cap.set(4, 480)
while True:
    success, img = cap.read()
    imgContour = img.copy()
    res = preprocess(img)
    getContours(res)
    cv2.imshow("res", imgContour)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

对图片进行预处理,转为灰度图,高斯模糊,边缘化,膨胀和腐蚀

def preprocess(img):
    imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur = cv2.GaussianBlur(imgGray,(3, 3),0)
    imgCanny = cv2.Canny(imgBlur, 200, 200)
    imgdialation = cv2.dilate(imgCanny,kernel,iterations=1)
    imgerode = cv2.erode(imgdialation,kernel,iterations=1)
    res = stackImages(0.3,([img,imgGray,imgBlur],[imgCanny, imgdialation, imgerode]))
    cv2.imshow("yy",res)
    return imgerode

然后识别处理后的图片轮廓,找出有四个拐点的矩形,选择图片中的最大矩形
轮廓形状识别详细之前博客有写,输出最大矩形


def getContours(img):
    # contours:list结构,列表中每个元素代表一个边沿信息。每个元素是(x, 1, 2)的三维向量,x表示该条边沿里共有多少个像素点,第三维的那个“2”表示每个点的横、纵坐标;
    # hierarchy:返回类型是(x, 4)的二维ndarray。x和contours里的x是一样的意思。
    contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
    maxarea = 0
    bigcontour = np.array([])
    bigcnt = np.array([])
    for cnt in contours:
        #传入轮廓计算面积
        area = cv2.contourArea(cnt)
        print(area)
        if area>5000:
            # cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
            #  计算轮廓的周长
            peri = cv2.arcLength(cnt,True)
            #print(peri)
            approx = cv2.approxPolyDP(cnt,0.02*peri,True)
            # 提取拐点
            objCor = len(approx)
            x, y, w, h = cv2.boundingRect(approx)
            if objCor == 4 and area > maxarea:
                maxarea = area
                bigcnt = cnt
                bigcontour = approx
    cv2.drawContours(imgContour, bigcontour, -1, (255, 0, 0), 15)
    cv2.drawContours(imgContour, bigcnt, -1, (255, 0, 0), 3)
    return bigcontour

对最大的矩形进行复原,透视矫正图片

width,height =200,300
pts1=np.float32([[651,511],[749,535],[614,594],[717,621]])
pts2=np.float32([[0,0],[width,0],[0,height],[width,height]])

matrix  = cv2.getPerspectiveTransform(pts1,pts2)
imgput = cv2.warpPerspective(img,matrix,(width,height))

cv2.imshow("majiang", imgput)
cv2.waitKey(0)

【opencv-python- 识别图片中的矩形轮廓,进行原比例恢复】_第1张图片

注意如果这里是视频的话输出的矩形坐标不能很容易确定,四角的坐标先后顺序这里需要恢复一下

def reorder (myPoints):
    myPoints = myPoints.reshape((4,2))
    myPointsNew = np.zeros((4,1,2),np.int32)
    add = myPoints.sum(1)
    #print("add", add)
    myPointsNew[0] = myPoints[np.argmin(add)]
    myPointsNew[3] = myPoints[np.argmax(add)]
    diff = np.diff(myPoints,axis=1)
    myPointsNew[1]= myPoints[np.argmin(diff)]
    myPointsNew[2] = myPoints[np.argmax(diff)]
    #print("NewPoints",myPointsNew)
    return myPointsNew

处理效果如图
【opencv-python- 识别图片中的矩形轮廓,进行原比例恢复】_第2张图片

完整的代码

import cv2
import numpy as np


###################################
widthImg=540
heightImg =640
#####################################

cap = cv2.VideoCapture(1)
cap.set(10,150)


def preProcessing(img):
    imgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur = cv2.GaussianBlur(imgGray,(5,5),1)
    imgCanny = cv2.Canny(imgBlur,200,200)
    kernel = np.ones((5,5))
    imgDial = cv2.dilate(imgCanny,kernel,iterations=2)
    imgThres = cv2.erode(imgDial,kernel,iterations=1)
    return imgThres

def getContours(img):
    biggest = np.array([])
    maxArea = 0
    contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    for cnt in contours:
        area = cv2.contourArea(cnt)
        if area>5000:
            #cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
            peri = cv2.arcLength(cnt,True)
            approx = cv2.approxPolyDP(cnt,0.02*peri,True)
            if area >maxArea and len(approx) == 4:
                biggest = approx
                maxArea = area
    cv2.drawContours(imgContour, biggest, -1, (255, 0, 0), 20)
    return biggest

def reorder (myPoints):
    myPoints = myPoints.reshape((4,2))
    myPointsNew = np.zeros((4,1,2),np.int32)
    add = myPoints.sum(1)
    #print("add", add)
    myPointsNew[0] = myPoints[np.argmin(add)]
    myPointsNew[3] = myPoints[np.argmax(add)]
    diff = np.diff(myPoints,axis=1)
    myPointsNew[1]= myPoints[np.argmin(diff)]
    myPointsNew[2] = myPoints[np.argmax(diff)]
    #print("NewPoints",myPointsNew)
    return myPointsNew

def getWarp(img,biggest):
    biggest = reorder(biggest)
    pts1 = np.float32(biggest)
    pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])
    matrix = cv2.getPerspectiveTransform(pts1, pts2)
    imgOutput = cv2.warpPerspective(img, matrix, (widthImg, heightImg))

    imgCropped = imgOutput[20:imgOutput.shape[0]-20,20:imgOutput.shape[1]-20]
    imgCropped = cv2.resize(imgCropped,(widthImg,heightImg))

    return imgCropped


def stackImages(scale,imgArray):
    rows = len(imgArray)
    cols = len(imgArray[0])
    rowsAvailable = isinstance(imgArray[0], list)
    width = imgArray[0][0].shape[1]
    height = imgArray[0][0].shape[0]
    if rowsAvailable:
        for x in range ( 0, rows):
            for y in range(0, cols):
                if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
                    imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
                else:
                    imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
                if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
        imageBlank = np.zeros((height, width, 3), np.uint8)
        hor = [imageBlank]*rows
        hor_con = [imageBlank]*rows
        for x in range(0, rows):
            hor[x] = np.hstack(imgArray[x])
        ver = np.vstack(hor)
    else:
        for x in range(0, rows):
            if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
                imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
            else:
                imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
            if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
        hor= np.hstack(imgArray)
        ver = hor
    return ver

while True:
    success, img = cap.read()
    img = cv2.resize(img,(widthImg,heightImg))
    imgContour = img.copy()

    imgThres = preProcessing(img)
    biggest = getContours(imgThres)
    if biggest.size !=0:
        imgWarped=getWarp(img,biggest)
        # imageArray = ([img,imgThres],
        #           [imgContour,imgWarped])
        imageArray = ([imgContour, imgWarped])
        cv2.imshow("ImageWarped", imgWarped)
    else:
        # imageArray = ([img, imgThres],
        #               [img, img])
        imageArray = ([imgContour, img])

    stackedImages = stackImages(0.6,imageArray)
    cv2.imshow("WorkFlow", stackedImages)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

你可能感兴趣的:(opencv,python,opencv,图像处理)