#先装包和环境 import cv2 from PIL import Image import numpy as np
#导入视频并自定义 cor_x, cor_y = -1, -1#定义自变量 fourcc = cv2.VideoWriter_fourcc(*'XVID') # 使用XVID编码器 camera = cv2.VideoCapture('红绿灯.mp4') # 从文件读取视频,需要修改成自己的文件的路径即可进行测试 fps = camera.get(cv2.CAP_PROP_FPS) # 获取视频帧率 print('视频帧率:%d fps' % fps) cor = np.array([[1, 1]]) # 初始值并无意义,只是为了能够使用np.row_stack函数
#创建坐标函数,并且赋予变量 def OnMouseAction(event, x, y, flags, param):#创建回调函数(even:指触发阶跃信号,点击鼠标返回实时坐标。 global cor_x, cor_y, cor#将变量设置为全局变量.使其在下面函数外也能调用 if event == cv2.EVENT_LBUTTONDOWN: print("左键点击") print("%s" % x, y) cor_x, cor_y = x, y#鼠标点击的x,y坐标赋予cor_x,cor_y cor_m = [cor_x, cor_y] cor = np.row_stack((cor, cor_m))#将鼠标返回的坐标进行矩形合并,并赋予全局变量 elif event == cv2.EVENT_LBUTTONUP: cv2.line(img, (cor_x, cor_y), (cor_x, cor_y), (255, 255, 0), 7)#当鼠标划定感兴趣区域后从左到右依次是(读入的图片,起始坐标,结束坐标,颜色(b,g,r),线条粗细) elif event == cv2.EVENT_RBUTTONDOWN: print("右键点击") elif flags == cv2.EVENT_FLAG_LBUTTON: print("左鍵拖曳") elif event == cv2.EVENT_MBUTTONDOWN: print("中键点击")
''' 创建回调函数的函数setMouseCallback(); 下面把回调函数与OpenCV窗口绑定在一起 ''' #开始逐帧分析视频,选出目标区域,通过图片来分割 grabbed, img = camera.read() # 逐帧采集视频流 img = cv2.resize(img, (1280, 720)) grabbed = cv2.resize(img, (1280, 720)) cv2.namedWindow("红绿灯") cv2.setMouseCallback("红绿灯", OnMouseAction) while True: cv2.imshow("红绿灯", img) k = cv2.waitKey(1) & 0xFF if k == ord(' '): # 空格退出操作 break cv2.destroyAllWindows() # 关闭页面 width_choose = cor[2, 0] - cor[1, 0] height_choose = cor[2, 1] - cor[1, 1] y_width_choose = cor[4, 0] - cor[3, 0] y_height_choose = cor[4, 1] - cor[3, 1] print("视频选中区域的宽:%d" % width_choose, '\n'"视频选中区域的高:%d" % height_choose) print("黄灯区域的宽:%d" % y_width_choose, '\n'"黄灯区域的高:%d" % y_height_choose) out = cv2.VideoWriter('output_test1.avi', fourcc, fps, (width_choose, height_choose)) # 参数分别是:保存的文件名、编码器、帧率、视频宽高 Video_choose = np.zeros((width_choose, height_choose, 3), np.uint8) while True: grabbed, frame = camera.read() # 逐帧采集视频流 if not grabbed: break frame = cv2.resize(frame, (1280, 720)) frame_data = np.array(frame) # 每一帧循环存入数组 box_data = frame_data[cor[1, 1]:cor[2, 1], cor[1, 0]:cor[2, 0]] # 取矩形目标区域 yellow_box = frame_data[cor[3, 1]:cor[4, 1], cor[3, 0]:cor[4, 0]] cv2.imshow('choose', box_data) yellow_max = np.max(yellow_box) b, g, r = cv2.split(box_data)#将截取的区域分割为b,g,r单通道颜色图片 print(yellow_max)#返回结果 max_r = np.max(r) max_g = np.max(g) max_b = np.max(b)
#筛选颜色的像素点,以此来区分红黄绿,先单独截取黄色的区域做判定,然后判定绿色和红色,如果不是黄色,不是绿色就是红色 if yellow_max >= 255: lwpCV_box = cv2.rectangle(frame, tuple(cor[1, :]), tuple(cor[2, :]), (0, 255, 255), 2) cv2.putText(frame, "yellow", (384, 284), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 255), 2) # 显示yellow文本信息 elif max_g >= 245: lwpCV_box = cv2.rectangle(frame, tuple(cor[1, :]), tuple(cor[2, :]), (0, 255, 0), 2) cv2.putText(frame, "green", (384, 284), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2) # 显示green文本信息 elif max_r >= 245: lwpCV_box = cv2.rectangle(frame, tuple(cor[1, :]), tuple(cor[2, :]), (0, 0, 255), 2) cv2.putText(frame, "red", (384, 284), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2) # 显示red文本信息 cv2.imshow('lwpCVWindow', frame) # 显示采集到的视频流 cv2.imshow('sum', emptyImage) # 显示画出的条形图 cv2.imshow("Red", r) cv2.imshow("Green", g) cv2.imshow("Blue", b) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break #最后就是可视化输出了 out.release() camera.release() cv2.destroyAllWindows()