2021SC@SDUSC
通过对于该项目的分析以及相关博客文章的阅读与理解,小组成员暂时确定核心代码在于HyperLPR-master文件夹下的HyperLPRLite.py文件的函数SimpleRecognizePlateByE2E()、hyperlpr_py3文件夹下的pipline.py文件的函数SimpleRecognizePlateByE2E()和函数SimpleRecognizePlate(),我们将对这几个函数进行分析与对比出有利的数据,以为后面的分析能做出更有效的调整。
我将与其中一个小组成员共同分析SimpleRecognizePlate()函数,并对底层进行探索。SimleSimpleRecognizePlate()函数含义为简易识别板函数,主要用于简单地识别车牌
#coding=utf-8
from . import detect
from . import finemapping as fm
from . import segmentation
import cv2
import time
import numpy as np
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
import json
import sys
from . import typeDistinguish as td
import imp
def SimpleRecognizePlate(image):
t0 = time.time()
images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1)
res_set = []
for j,plate in enumerate(images):
plate, rect, origin_plate =plate
# plate = cv2.cvtColor(plate, cv2.COLOR_RGB2GRAY)
plate =cv2.resize(plate,(136,36*2))
t1 = time.time()
ptype = td.SimplePredict(plate)
if ptype>0 and ptype<5:
plate = cv2.bitwise_not(plate)
image_rgb = fm.findContoursAndDrawBoundingBox(plate)
image_rgb = fv.finemappingVertical(image_rgb)
cache.verticalMappingToFolder(image_rgb)
print("e2e:", e2e.recognizeOne(image_rgb))
image_gray = cv2.cvtColor(image_rgb,cv2.COLOR_RGB2GRAY)
# image_gray = horizontalSegmentation(image_gray)
cv2.imshow("image_gray",image_gray)
# cv2.waitKey()
cv2.imwrite("./"+str(j)+".jpg",image_gray)
# cv2.imshow("image",image_gray)
# cv2.waitKey(0)
print("校正",time.time() - t1,"s")
# cv2.imshow("image,",image_gray)
# cv2.waitKey(0)
t2 = time.time()
val = segmentation.slidingWindowsEval(image_gray)
# print val
print("分割和识别",time.time() - t2,"s")
if len(val)==3:
blocks, res, confidence = val
if confidence/7>0.7:
image = drawRectBox(image,rect,res)
res_set.append(res)
for i,block in enumerate(blocks):
block_ = cv2.resize(block,(25,25))
block_ = cv2.cvtColor(block_,cv2.COLOR_GRAY2BGR)
image[j * 25:(j * 25) + 25, i * 25:(i * 25) + 25] = block_
if image[j*25:(j*25)+25,i*25:(i*25)+25].shape == block_.shape:
pass
if confidence>0:
print("车牌:",res,"置信度:",confidence/7)
else:
pass
# print "不确定的车牌:", res, "置信度:", confidence
print(time.time() - t0,"s")
return image,res_set
此次文章主要分析SimpleRecognizePlate()函数部分代码,如下
t0 = time.time()
images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1)
t0 = time.time()
为实现time.time(), pipline.py文件调用了库time
time库是Python中处理时间的标准库,用于计算机时间的表达,提供获取系统时间并格式化输出功能与提供系统级精确计时功能,用于程序性能分析
time():获取当前时间戳,即计算机内部时间值,返回一个浮点数
则t0 = time.time()含义为通过time对象调用的time(),将获取到的当前时间戳存入t0变量
images = detect.detectPlateRough(image,image.shape[0],top_bottom_padding_rate=0.1)
为实现detect.detectPlateRough(),当前文件引入了hyperlpr_py3文件夹下的detect.py文件的detectPlateRough()函数,含义为检测板材粗糙度,对图片进行粗糙度检测并进行有效修剪
def detectPlateRough(image_gray,resize_h = 720,en_scale =1.08 ,top_bottom_padding_rate = 0.05):
print(image_gray.shape)
if top_bottom_padding_rate>0.2:
print("error:top_bottom_padding_rate > 0.2:",top_bottom_padding_rate)
exit(1)
height = image_gray.shape[0]
padding = int(height*top_bottom_padding_rate)
scale = image_gray.shape[1]/float(image_gray.shape[0])
image = cv2.resize(image_gray, (int(scale*resize_h), resize_h))
image_color_cropped = image[padding:resize_h-padding,0:image_gray.shape[1]]
image_gray = cv2.cvtColor(image_color_cropped,cv2.COLOR_RGB2GRAY)
watches = watch_cascade.detectMultiScale(image_gray, en_scale, 2, minSize=(36, 9),maxSize=(36*40, 9*40))
cropped_images = []
for (x, y, w, h) in watches:
cropped_origin = cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h)))
x -= w * 0.14
w += w * 0.28
y -= h * 0.6
h += h * 1.1;
cropped = cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h)))
cropped_images.append([cropped,[x, y+padding, w, h],cropped_origin])
return cropped_images
传入的形参image_gray,resize_h = 720,en_scale =1.08 ,top_bottom_padding_rate = 0.05的含义分别为图片本身,图片调整大小的高度,图片缩放比例以及头部底部与内边距的比率
if top_bottom_padding_rate>0.2:
print("error:top_bottom_padding_rate > 0.2:",top_bottom_padding_rate)
exit(1)
对头部底部与内边距的比率进行判断,若不在规定大小则结束该错误退出改该编译器
height = image_gray.shape[0]
padding = int(height*top_bottom_padding_rate)
scale = image_gray.shape[1]/float(image_gray.shape[0])
获取图片的垂直高度,内边距大小以及图片宽与高之比
image = cv2.resize(image_gray, (int(scale*resize_h), resize_h))
引用了库cv2,函数resize()是根据传入的图片本身image_gray,图片调整的宽度以及图片调整的高度对图片进行缩放,返回一个完成缩放的图片
image_color_cropped = image[padding:resize_h-padding,0:image_gray.shape[1]]
函数image_color_cropped()是根据颜色来裁剪图片,其调用了opencv中的方法,目的是裁剪出车牌照中的车牌图片,除去其他的无关的东西,为之后的分析图片做出有效处理
image_gray = cv2.cvtColor(image_color_cropped,cv2.COLOR_RGB2GRAY)
引用库cv2,函数cvtColor()是根据已裁剪完成的图片image_color_cropped并将其转换成灰度图片
watches = watch_cascade.detectMultiScale(image_gray, en_scale, 2, minSize=(36, 9),maxSize=(36*40, 9*40))
#watch_cascade来自
watch_cascade = cv2.CascadeClassifier('./model/cascade.xml')
引用库cv2,调用了函数CascadeClassifier()与函数detectMultiScale()
函数CascadeClassifier():是Opencv中做人脸检测的时候的一个级联分类器。并且既可以使用Haar,也可以使用LBP特征
函数detectMultiScale():opencv2中人脸检测使用的是 detectMultiScale函数。它可以检测出图片中所有的人脸,并将人脸用vector保存各个人脸的坐标、大小(用矩形表示)
则通过传输的数据可知,调用这两个函数的目的为对于经过处理后的图片,识别出车牌的具体的位置和大小并且作为数组储存起来
cropped_images = []
for (x, y, w, h) in watches:
cropped_origin = cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h)))
x -= w * 0.14
w += w * 0.28
y -= h * 0.6
h += h * 1.1;
cropped = cropped_from_image(image_color_cropped, (int(x), int(y), int(w), int(h)))
cropped_images.append([cropped,[x, y+padding, w, h],cropped_origin])
return cropped_images
通过这几个操作将识别出来的车牌具体的位置和大小进行数据上的处理并且返回这些值