【opencv】获取视频中的“黑白蓝绿”屏

【诉求】

长视频文件中,检测异常画面帧出现的次数以及截图报错,替代人工检查(附代码)

【学习】

方案一:像素点:(height, width, channels)
方案二:二级图:RGB/HSV颜色

> 颜色区间:https://blog.csdn.net/qq78442761/article/details/83056346

# 定义灰色识别模型
lower_orange = np.array([0, 0, 100])
upper_orange = np.array([180, 43, 220])
> opencv导入: 
import cv2
import numpy as np
from numpy import array

灰屏:
1.RGB与HSB颜色,灰色是一个区域范围
2.cv将灰度图像二值化
3.计算灰色轮廓的模型
4.根据张数计算图片发生的时刻

【实现顺序】

  1. 传入待测文件:mp4、mp3
  2. 对视频文件进行切割/每帧,变成对图片的处理
  3. 关于如何获取文件时长
  4. 关于获取到问题帧/区间:通过像素点占比以及数组区间范围判定
  5. 如何定位时间戳(相对时间点)
  6. 自动化定位/对比、打印信息

【拓展运用】

一个回放文件里,灰屏帧在的时间戳以及统计次数
复现设备概率性出现绿屏或者闪现绿屏等,截图并获取全屏信息

【代码】方案一

def test_video_opencv(url):
    start_time_1 = time.time()

    # 将切割的文件进行cp和move
    rm_cmd = "rm -rf ./tmp/*.png *.txt"
    os.popen(rm_cmd).read()

    pre_cmd1 = "mkdir tmp"
    os.popen(pre_cmd1).read()

    make_file_cmd = "touch video_tmp.txt"
    os.popen(make_file_cmd).read()
    file = open('./video_tmp.txt', 'w', encoding='utf-8')

    # 将视频转化成数据帧-调试
    start_time = time.time()
    cmd = "ffmpeg -i " + url + " -r 2 -s 1080,960 -ss 00:00:00 ./tmp/%d.png"
    print(cmd)
    os.popen(cmd).read()
    end_time = time.time()
    file.write("切割耗时:" + str(end_time-start_time) + "\n\n")

    info_cmd = "ffprobe -v quiet -print_format json -show_format -show_streams " + url
    data_json = os.popen(info_cmd).read()
    # 获取文件时长
    d = json.loads(data_json)
    duration = d["format"]["duration"]
    file.write("视频总时长:" + str(duration) + "\n\n")

    pic_len = len(os.listdir("../tmp"))
    file.write("切割视频图片个数:"+str(pic_len) + "\n\n")

    start_time = time.time()


    for png_num in range(1,pic_len):
        img = cv2.imread("./tmp/" + str(png_num) + ".png")

        # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)  # 转换了灰度化
        # ret, img = cv2.threshold(gray, 160, 255, cv2.THRESH_BINARY)  # 将灰度图像二值化
        # img = 255 - img


        # 定义灰色识别模型
        lower_orange = np.array([0, 0, 100])
        upper_orange = np.array([180, 43, 220])

        #RGB 转 HSV 颜色模型
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

        # 低于lower_orange/高于upper_orange的值,图像值为0
        # 在此之间的值变成255,即涂白色
        mask = cv2.inRange(hsv, lower_orange, upper_orange)
        # cv2.imshow('image', mask)
        # cv2.waitKey(0)

        binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]  # 将灰度图像二值化
        binary = cv2.dilate(binary, None, iterations=2)  # 图像膨胀

        # cv2的版本不同,findContours的返回值个数也不一样,此处是为了兼容不同版本
        if int(cv2.__version__[0]) > 2:
            contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        else:
            _, contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # 3-求轮廓的面积
        pic_sum = 0
        space = img.shape[0] * img.shape[1]
        for cts in contours:
            pic_sum += cv2.contourArea(cts)
        print(pic_sum/space)
        if pic_sum / space > 0.8:  # 这里假定是80%,
            file.write("第 " + str(png_num) + " 张图片灰屏,在第 " + str(float(duration)/pic_len * png_num) + " 秒, 灰屏面积占比:" + str(pic_sum / space) + "\n\n")


    end_time = time.time()
    file.write("循环判断耗时:"+ str(end_time - start_time) + "\n\n")

    end_time_1 = time.time()
    file.write("总耗时:" + str(end_time_1 - start_time_1) + "\n\n")

【代码】方案二

def test_video_pixel(url):
    # 将切割的文件进行cp和move
    rm_cmd = "rm -rf ./tmp/*.png"
    os.popen(rm_cmd).read()

    pre_cmd1 = "mkdir tmp"
    os.popen(pre_cmd1).read()

    make_file_cmd = "touch video_tmp.txt"
    os.popen(make_file_cmd).read()
    file = open('./video_tmp.txt', 'w', encoding='utf-8')

    # 将视频转化成数据帧-调试l
    cmd = "ffmpeg -i " + url + " -r 1 -s 1080,960 -ss 00:00:00 ./tmp/%03d.png"
    os.popen(cmd).read()

    info_cmd = "ffprobe -v quiet -print_format json -show_format -show_streams " + url
    data_json = os.popen(info_cmd).read()
    # 获取文件时长
    d = json.loads(data_json)
    duration = d["format"]["duration"]
    file.write("视频总时长:" + str(duration) + "\n\n")

    pic_len = len(os.listdir("../tmp"))

    # 循环将每张图片转换为numpy数组
    for png_num in range(1,pic_len+1):
        start_time = time.time()
        im = array(Image.open("./tmp/00" + str(png_num) +".png").convert("RGB"))
        image_arr = np.array(im)

        img_red = image_arr[:, :, 0]
        height, width = img_red.shape

        pic_num = 0
        gray_pic_sum = 0
        black_pic_sum = 0
        green_pic_sum = 0

        for i in range(0, height):
            for j in range(0, width):
                # 判断每一点的像素值 == 灰色,有多少个
                gray_pic_sum += 1
                black_pic_sum += 1
                green_pic_sum += 1

                if (image_arr[i, j,] == [140, 143, 146]).all():
                    gray_pic_sum = gray_pic_sum + 1
                if (image_arr[i, j,] == [0, 0, 0]).all():
                    black_pic_sum = black_pic_sum + 1
                if (image_arr[i, j,] == [40, 220, 60]).all():
                    green_pic_sum = green_pic_sum + 1

        file.write("第 " + str(png_num) + " 张图片,一共 " + str(gray_pic_sum) + " 个灰色像素点,共 " + str(gray_pic_sum) + " 个像素点 \n\n")
        file.write("第 " + str(png_num) + " 张图片,一共 " + str(black_pic_sum) + " 个黑色像素点,共 " + str(black_pic_sum) + " 个像素点 \n\n")
        file.write("第 " + str(png_num) + " 张图片,一共 " + str(green_pic_sum) + " 个绿色像素点,共 " + str(black_pic_sum) + " 个像素点 \n\n")

        # 计算问题图片的时长
        if pic_num/gray_pic_sum >= 0.2:
            file.write("!! 第 " + str(png_num) + " 是灰屏问题帧,时长是:" + str(float(duration)/pic_len * png_num) + " \n\n")
        if pic_num/black_pic_sum >= 0.2:
            file.write("!! 第 " + str(png_num) + " 是黑屏问题帧,时长是:" + str(float(duration)/pic_len * png_num) + " \n\n")
        if pic_num/green_pic_sum >= 0.2:
            file.write("!! 第 " + str(png_num) + " 是绿屏问题帧,时长是:" + str(float(duration)/pic_len * png_num) + " \n\n")

        end_time = time.time()
        file.write("该帧判断像素点耗时:" + str(end_time - start_time) + "\n\n")

【效率问题】
方案一像素点判定,因为需要将每张图片像素化,建议是将1080X720P的图片裁剪到360X640P
方案二图片二级值,处理成黑白,明显效率更高,推荐~

你可能感兴趣的:(音视频,python,opencv,音视频,python)