使用opencv和vgg16对车库车位识别

大致流程:
1:截取视频中的一张或者几张图像
2:描点出停车厂的大致区域
3:通过直线检测出区域中的停车位的直线
4:并对直线区域进行手动微调校验,进行分割车位,找出每个车位的坐标并保存下来

数据来源:
通过1,2,3,4步剪切出每个车位的图片保存,然后进行打标签,拿到vgg16模型中去训练,并保存训练好的模型

预测部分:
通过视频每一帧(可以选择减少帧数),进行图片操作,截切车位信息,拿到vgg16模型中去预测,将最后结果通过视频展现出来

裁剪车位代码和地图

import operator
import os.path

import cv2
import numpy as np
import pickle
def cv_show(img):
  #  img=cv2.pyrDown((img))
    cv2.imshow('name',img)
    cv2.waitKey()
    cv2.destroyAllWindows()
img=cv2.imread('../park/test_images/scene1380.jpg')
lower=np.uint8([120,120,120])
upper=np.uint8([255,255,255])
#1:把低于120,和高于255变为0,中间的变为255,过滤背景
masked=cv2.inRange(img,lower,upper)
#cv_show(masked)
#2:表示只有255的时候才会保留下来,可以得到更清晰的轮廓
mask=cv2.bitwise_and(img,img,mask=masked)
#cv_show(mask)
#灰度图
cvt=cv2.cvtColor(mask,cv2.COLOR_RGB2GRAY)
#边缘检测
can=cv2.Canny(cvt,50,200)
#cv_show(can)
#3:在图像上选取几个坐标点,圈出需要的区域
rows,cols=can.shape[:2]
pt_1  = [cols*0.05, rows*0.90]
pt_2 = [cols*0.05, rows*0.70]
pt_3 = [cols*0.30, rows*0.55]
pt_4 = [cols*0.6, rows*0.15]
pt_5 = [cols*0.90, rows*0.15]
pt_6 = [cols*0.90, rows*0.90]
vertices = np.array([[pt_1, pt_2, pt_3, pt_4, pt_5, pt_6]], dtype=np.int32)
poin_img=img.copy()
#把画出来
#poin_img=cv2.cvtColor(poin_img,cv2.COLOR_GRAY2RGB)
for point in vertices[0]:
    cv2.circle(poin_img,(point[0],point[1]),10,(0,255),4)
cv_show(poin_img)
#4:通过点坐标取出可用的部分
#获得一个大小相同黑背景的can
mask=np.zeros_like(can)
if len(mask.shape)==2:
    #对顶点连接的白色进行填充
    cv2.fillPoly(mask,vertices,255)
    #只是获取需要的一部分图像上的数据
img2=cv2.bitwise_and(can,mask)
cv_show(img2)
#5:通过霍夫曼直线检测找出图像上的符合要求的直线
#minLineLengh(线的最短长度,比这个短的都被忽略)和MaxLineCap(两条直线之间的最大间隔,小于此值,认为是一条直线)
#rho距离精度,theta角度精度,threshod超过设定阈值才被检测出线段
lines=cv2.HoughLinesP(img2,rho=0.1,theta=np.pi/10,threshold=15,minLineLength=9,maxLineGap=4)
#过滤不需要的线,把线画出来
chaned=[]
lines_img=img.copy()
for line in lines:
    for x1,y1,x2,y2 in line:
        if abs(y2-y1)<=1 and abs(x2-x1)>=25 and abs(x2-x1)<=55:
            chaned.append((x1,y1,x2,y2))
            cv2.line(lines_img,(x1,y1),(x2,y2),[255,0,0],2)
print(len(chaned))
cv_show(lines_img)
#对直接按照x进行排序
list1=sorted(chaned,key=operator.itemgetter(0,1))
#找到多个列,相当于每列是一排
clusters={}
index=0
#参数直线长度设置为20,刚好可以得到视频上的12个簇,其中1到1个簇中为两列,分开处理
clus_dist=20
for i in range(len(list1)-1):
    distance=abs(list1[i+1][0]-list1[i][0])
    if distance<=clus_dist:
        if not  index in clusters.keys():
            clusters[index]=[]
        clusters[index].append(list1[i])
        clusters[index].append(list1[i+1])
    else:
            index +=1

#找出每个簇的范围,并画出矩形
    #取最大值,效果不好
# for k,v in clusters.items():
#
#     #第一个点a ax取最小的,ay取最小的
#     #第二个点b bx取最大的,by取最小的
#     #第三个点c cx取最大的,cy取最大的
#     #第四个点d dx取最小的,dy取最大的
#     #宁一种方法,计算平均
#     maxX=-1
#     maxy=-1
#     minx=sys.maxsize
#     miny=sys.maxsize
#
#     for (x1,y1,x2,y2) in v:
#         maxX=max(max(x1,x2),maxX)
#         maxy=max(max(y1,y2),maxy)
#         minx=min(min(x1,x2),minx)
#         miny=min(min(y1,y2),miny)
#     cv2.rectangle(img,(minx,miny),(maxX,maxy),(0,255,0),2)
#     cv2.circle(img,(minx,miny),10,(0,255),4)
#     cv2.circle(img,(minx,maxy),10,(0,255),4)
#     cv2.circle(img,(maxX,miny),10,(0,255),4)
#     cv2.circle(img,(maxX,maxy),10,(0,255),4)
#
#     print("四个点坐标:",minx,miny,maxX,maxy)
#
#     cv_show(img)
#     maxX=-1
#     maxy=-1
#     minx=sys.maxsize
#     miny=sys.maxsize
# #找出每个簇的范围,并画出矩形
# #使用x平均值
#
# maxY=-1
# for k,v in clusters.items():
#     #x值求平均,y值取最大
#     if k!=0 and k!=12:
#         for (x1,y1,x2,y2) in v:
#             maxY=max(max(y1,y2),maxY)
# for k,v in clusters.items():
#     sumx1=0
#     sumx2=0
#     miny=sys.maxsize
#     index2=0
#     for (x1,y1,x2,y2) in v:
#         sumx1 +=x1
#         sumx2 +=x2
#         index2 +=1
#         miny=min(miny,min(y1,y2))
#     sumx1=int( sumx1 / index2)
#     sumx2=int( sumx2 / index2)
#     index2=0
#
#     #四个表记:sumx1 sumx2  miny  maxy
#     cv2.rectangle(img,(sumx1-5,miny),(sumx2+5,maxY),(0,255,0),2)
# cv_show(img)
print()
#对x1,x2求平均,得到矩形两顶点,并画出来
rects = {} #保存最大顶点,和最小顶点坐标
i = 0
print(clusters.keys())
for key in clusters:
    all_list = clusters[key]
    cleaned = list(set(all_list))
    if len(cleaned) > 5:
        cleaned = sorted(cleaned, key=lambda tup: tup[1])
        avg_y1 = cleaned[0][1]
        avg_y2 = cleaned[-1][1]
        avg_x1 = 0
        avg_x2 = 0
        for tup in cleaned:
            avg_x1 += tup[0]
            avg_x2 += tup[2]
        avg_x1 = avg_x1/len(cleaned)
        avg_x2 = avg_x2/len(cleaned)
        rects[i] = (avg_x1, avg_y1, avg_x2, avg_y2)
        i += 1

print("Num Parking Lanes: ", len(rects))

#Step 5: 把列矩形画出来
buff = 7
new_image = np.copy(img)
for key in rects:
    tup_topLeft = (int(rects[key][0] - buff), int(rects[key][1]))
    tup_botRight = (int(rects[key][2] + buff), int(rects[key][3]))
    cv2.rectangle(new_image, tup_topLeft,tup_botRight,(0,255,0),3)
#cv_show(new_image)
#由于得到车位信息不好,所以要对rects中坐标进行微调
adj_y1 = {0: -15, 1:-10, 2:0, 3:-11, 4:28, 5:5, 6:-15, 7:-15, 8:-10, 9:-30, 10:9, 11:-32}
adj_y2 = {0: 30, 1: 50, 2:15, 3:10, 4:-15, 5:15, 6:15, 7:-20, 8:15, 9:15, 10:0, 11:30}

adj_x1 = {0: -8, 1:-15, 2:-15, 3:-15, 4:-15, 5:-15, 6:-15, 7:-15, 8:-10, 9:-10, 10:-10, 11:0}
adj_x2 = {0: 0, 1: 15, 2:15, 3:15, 4:15, 5:15, 6:15, 7:15, 8:10, 9:10, 10:10, 11:0}

spot_dict={} #车位对应字典位置
spot_tot=0

#一个停车位的宽度大概为15
gap=15
new_image=img.copy()
for key in rects:
    tup = rects[key]
    x1 = int(tup[0]+ adj_x1[key])
    x2 = int(tup[2]+ adj_x2[key])
    y1 = int(tup[1] + adj_y1[key])
    y2 = int(tup[3] + adj_y2[key])
    cv2.rectangle(new_image, (x1, y1),(x2,y2),(0,255,0),2)
    #到此处结果,对中间图像进行切分过后在进行微调

    #对每个簇进行切分
    num_splits=int(abs(y2-y1)//gap)
    #从新进行画出横直线
    for i in range(0,num_splits+1):
        y=int(y1+i*gap)
        cv2.line(new_image,(x1,y),(x2,y),[0,0,255],2)
    #画出竖直线
    if(key>0 and key 

训练vgg16代码



from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
import numpy as np
from keras.optimizers import SGD
from keras import regularizers
import os
import matplotlib.pyplot as plt
import cv2
import keras.models
import numpy as np

resize = 64
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def load_data():
    Pathlist=[]
    #1:获取文件下所有的文件名字
    for (filepath,dirnames,filenames)  in os.walk(os.getcwd()+'/data/train/'):
        for filename in filenames:
            Pathlist.append(filepath+'/'+filename)
    data_train=[]
    data_label=[]
    for str in Pathlist:
        path=str.split('mypark/')[1]

        img=cv2.imread(path)
        if img is None:
            continue
        img=cv2.resize(img,(32,32))

        data_train.append(img)
        if path.__contains__("empty"):
            data_label.append(0)
        data_label.append(1)
        img=None
    data_train=np.array(data_train,dtype='float32')

    print(len(data_train))
    trainx=[]
    testx=[]
    trainy=[]
    testy=[]
    for i in range(len(data_train)):
        if i%100!=0:
            trainx.append(data_train[i])
            trainy.append(data_label[i])
        else:
            testx.append(data_train[i])
            testy.append(data_label[i])
    return trainx, trainy, testx, testy

def vgg16():
    weight_decay = 0.0005
    nb_epoch = 100
    batch_size = 32
    # layer1
    model = Sequential()
    model.add(Conv2D(64, (3, 3), padding='same',
                     input_shape=(32, 32, 3), kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.3))
    # layer2
    model.add(Conv2D(64, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer3
    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))
    # layer4
    model.add(Conv2D(128, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer5
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))
    # layer6
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))
    # layer7
    model.add(Conv2D(256, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer8
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))
    # layer9
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))
    # layer10
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    # layer11
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))
    # layer12
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(Dropout(0.4))
    # layer13
    model.add(Conv2D(512, (3, 3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))
    # layer14
    model.add(Flatten())
    model.add(Dense(512, kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    # layer15
    model.add(Dense(512, kernel_regularizer=regularizers.l2(weight_decay)))
    model.add(Activation('relu'))
    model.add(BatchNormalization())
    # layer16
    model.add(Dropout(0.5))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    return model

if __name__ == '__main__':
    # import data
    train_data, train_label, test_data, test_label = load_data()
    print(len(train_data))
    train_data = np.array(train_data,dtype='float32')
    test_data = np.array(test_data,dtype='float32')
    train_label = keras.utils.to_categorical(train_label, 2) #把label转成onehot化
    test_label = keras.utils.to_categorical(test_label, 2) #把label转成onehot化

    model = vgg16()
    #lr设置太小 loss和val_loss会为nan
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True) #设置优化器为SGD
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
    history = model.fit(train_data, train_label,
                        batch_size=40,
                        epochs=40,
                        validation_split=0.2,  #把训练集中的五分之一作为验证集
                        shuffle=True)
    model.save('vgg16dogcat.h5')

    ###huatu
    acc = history.history['acc']  # 获取训练集准确性数据
    val_acc = history.history['val_acc']  # 获取验证集准确性数据
    loss = history.history['loss']  # 获取训练集错误值数据
    val_loss = history.history['val_loss']  # 获取验证集错误值数据
    epochs = range(1, len(acc) + 1)
    plt.plot(epochs, acc, 'bo', label='Trainning acc')  # 以epochs为横坐标,以训练集准确性为纵坐标
    plt.plot(epochs, val_acc, 'b', label='Vaildation acc')  # 以epochs为横坐标,以验证集准确性为纵坐标
    plt.legend()  # 绘制图例,即标明图中的线段代表何种含义

    plt.show()

预测代码


`

import  cv2
import pickle

from keras.models import load_model
import cv2
import numpy as np
import os
os.environ['CUDA_VISIBLE_DEVICES']='1'
resize = 32
load_model = load_model("./vgg16dogcat.h5")  # 读取模型
vc=cv2.VideoCapture('./data/parking_video.mp4')
vc.set(cv2.CAP_PROP_FPS,10000)
#获取视频的帧数
frame_count = vc.get(cv2.CAP_PROP_FRAME_COUNT)
#设置间隔200毫秒获取

print(frame_count)
count=1 #识别帧数

#表示还有多少剩余的车库
#读取通过截图操作出来的图片地图
def platmap(img):
    df=open('./data/spotmap','rb')#注意此处是rb
    count1=0
    #此处使用的是load(目标文件)
    data=pickle.load(df)
    for k,v in data.items():
        (x1,y1,x2,y2) =k
        (x1,y1,x2,y2) =(int(x1),int(y1),int(x2),int(y2))
        #print(x1,y1,x2,y2)
        img1=img[y1:y2,x1:x2]
        result=prediction_def(img1)
        if result==0:
            count1=count1+1
            #通过坐标画出来
            cv2.rectangle(img, (x1, y1),(x2,y2),(0,255,0),2)
    Strw="The vacancy rate:"+str(count1)+" :"+str(len(data))
    cv2.putText(
        img, #numpy array on which text is written
        Strw, #text
        (100,100), #position at which writing has to start
        cv2.FONT_HERSHEY_SIMPLEX, #font family
        2, #font size
        (209, 80, 0, 255), #font color
        2) #font stroke
    cv2.imshow('img',img)

#获取预测结果看是0还是1
def prediction_def(img):
    img = cv2.resize(img, (resize, resize))
    img = img.reshape(1,resize,resize,3)
    img = np.array(img,dtype='float32')
    predicted = load_model.predict(img)  # 输出预测结果
    #print("预测结果:",predicted[0],np.argmax(predicted))
    return  np.argmax(predicted)
while count<=frame_count:
    # get a frame
    ret, frame = vc.read()
    #每二十帧处理一次照片
    if count%3==0:
        platmap(frame)
        #业务逻辑

    count=count+1
    if(cv2.waitKey(100)&0xFF==27):
        break;
print(count)

结果图片

最后结果不是很好,一个是我数据太少了 ,第二点是模型训练迭代次数太少,第三个是模型参数调整·

vgg16那一块代码转载的是:
https://blog.csdn.net/u014453898/article/details/97785190?utm_medium=distribute.pc_relevant.none-task-blog-baidujs_title-13&spm=1001.2101.3001.4242

你可能感兴趣的:(opencv学习,opencv,计算机视觉,人工智能)