多路视频流方式

第一种,在推流角度做到多路视频流,一般用到ffmpeg工具、nginx等。ffmpeg的命令有
1-四路命令(tcp的方式更加稳定).ffmpeg -rtsp_transport tcp -i "rtsp" -rtsp_transport tcp -i "rtsp" -rtsp_transport tcp -i "rtsp" -rtsp_transport tcp -i "rtsp" -filter_complex "[0:v]pad=iw*2:ih*2[a];[a][1:v]overlay=w[b];[b][2:v]overlay=0:h[c];[c][3:v]overlay=w:h" -vcodec libx264 -f flv rtmp://0.0.0.0:1935/service_stream 2-两路命令:ffmpeg -rtsp_transport tcp -i “rtsp” -rtsp_transport tcp -i “rtsp” -filter_complex “[1]scale=iw/2:ih/2[pip];[0][pip]overlay=main_w-overlay_w-10:main_h-overlay_h-10” -vcodec libx264 -f flv rtmp://127.0.0.1:1935/live/room
`

第二种,用opencv做到多路画面合并

            while (1):
                ret, frame = capture.read()
                ret0, frame_1 = capture_right.read()
                ret1, frame_2 = capture_left_down.read()
                ret2, frame_3 = capture_right_down.read()
                try:
                    rows, cols = frame.shape[:2]
                    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rot_1, 1)
                    dst1 = cv2.warpAffine(frame, M, (cols, rows))
                    frameLeftUp = cv2.resize(dst1, (int(width), int(height)), interpolation=cv2.INTER_CUBIC)
                except:
                    if not ret:
                        capture = cv2.VideoCapture(main_video)
                        ret, frame = capture.read()
                        continue
                try:
                    rows, cols = frame_1.shape[:2]
                    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rot_2, 1)
                    dst2 = cv2.warpAffine(frame_1, M, (cols, rows))
                    frameRightUp = cv2.resize(dst2, (int(width), int(height)), interpolation=cv2.INTER_CUBIC)
                except:
                    if not ret0:
                        capture_right = cv2.VideoCapture(video_1)
                        ret0, frame_1 = capture_right.read()
                        continue
                try:
                    rows, cols = frame_2.shape[:2]
                    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rot_3, 1)
                    dst3 = cv2.warpAffine(frame_2, M, (cols, rows))
                    frameLeftDown = cv2.resize(dst3, (int(width), int(height)), interpolation=cv2.INTER_CUBIC)
                except:
                    if not ret1:
                        capture_left_down = cv2.VideoCapture(video_2)
                        ret1, frame_2 = capture_left_down.read()
                        continue
                try:
                    rows, cols = frame_3.shape[:2]
                    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), rot_4, 1)
                    dst4 = cv2.warpAffine(frame_3, M, (cols, rows))
                    frameRightDown = cv2.resize(dst4, (int(width), int(height)), interpolation=cv2.INTER_CUBIC)
                    frameUp = np.hstack((frameLeftUp, frameRightUp))
                    frameDown = np.hstack((frameLeftDown, frameRightDown))
                    frame_sum = np.vstack((frameUp, frameDown))
                except:
                    if not ret2:
                        capture_right_down = cv2.VideoCapture(video_3)
                        ret2, frame_3 = capture_right_down.read()
                        continue

第三种,用到一种算法,本质上也是opencv读取视频流的处理,与第二种不同在于不用画面合并,多线程夺取视频流

class LoadStreams:  # multiple IP or RTSP cameras
    def __init__(self, sources='streams.txt', img_size=640):
        self.mode = 'images'
        self.img_size = img_size

        if os.path.isfile(sources):
            with open(sources, 'r') as f:
                sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
        else:
            sources = [sources]

        n = len(sources)
        self.imgs = [None] * n
        self.sources = sources
        for i, s in enumerate(sources):
            # Start the thread to read frames from the video stream
            print('%g/%g: %s... ' % (i + 1, n, s), end='')
            cap = cv2.VideoCapture(0 if s == '0' else s)
            assert cap.isOpened(), 'Failed to open %s' % s
            w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = cap.get(cv2.CAP_PROP_FPS) % 100
            _, self.imgs[i] = cap.read()  # guarantee first frame
            thread = Thread(target=self.update, args=([i, cap]), daemon=True)
            print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
            thread.start()
        print('')  # newline

        # check for common shapes
        s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0)  # inference shapes
        self.rect = np.unique(s, axis=0).shape[0] == 1  # rect inference if all shapes equal
        if not self.rect:
            print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')

    def update(self, index, cap):
        # Read next stream frame in a daemon thread
        n = 0
        while cap.isOpened():
            n += 1
            # _, self.imgs[index] = cap.read()
            cap.grab()
            if n == 4:  # read every 4th frame
                _, self.imgs[index] = cap.retrieve()
                n = 0
            time.sleep(0.01)  # wait time

    def __iter__(self):
        self.count = -1
        return self

    def __next__(self):
        self.count += 1
        img0 = self.imgs.copy()
        if cv2.waitKey(1) == ord('q'):  # q to quit
            cv2.destroyAllWindows()
            raise StopIteration

        # Letterbox
        img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]

        # Stack
        img = np.stack(img, 0)

        # Convert
        img = img[:, :, :, ::-1].transpose(0, 3, 1, 2)  # BGR to RGB, to bsx3x416x416
        img = np.ascontiguousarray(img)

        return self.sources, img, img0, None

    def __len__(self):
        return 0  # 1E12 frames = 32 streams at 30 FPS for 30 years

你可能感兴趣的:(opencv,python,人工智能)