flask+ffmpeg实现推流服务---实时流媒体服务

最近做实时换脸项目,用3D人脸重建实现的换脸算法把上传的图片和指定的视频中的每一帧换脸生成的换脸视频,需要通过流媒体服务进行推流给用户展现,所以决定用flask接收post请求和ffmpeg服务搭建实时推流的流媒体服务器。

首先是安装Nginx和配置相应的ffmpeg可以参考yuyuefan002的博客

ffmpeg+nginx+rtmp+web实现视频直播网站 

https://blog.csdn.net/sha1996118/article/details/79717471

直接上我的flask代码吧!!!

# encoding: utf-8
from meinheld import server
from flask import Flask, request
import cv2
import queue
import os
import numpy as np
from threading import Thread
import threading
import json
import base64
import hashlib
import datetime, _thread
import subprocess as sp
import time
import logging
from logging.handlers import TimedRotatingFileHandler

app = Flask(__name__)


def setLog():
    log_fmt = '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    formatter = logging.Formatter(log_fmt)
    fh = TimedRotatingFileHandler(
        filename="log/run_playVideo_server" + str(time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())) + ".log",
        when="H", interval=1,
        backupCount=72)
    fh.setFormatter(formatter)
    logging.basicConfig(level=logging.INFO)
    log = logging.getLogger()
    log.addHandler(fh)


setLog()

# 使用线程锁,防止线程死锁
# mutex = _thread.allocate_lock()
# 存图片的队列
frame_queue = queue.Queue()
# 推流的地址,前端通过这个地址拉流,主机的IP,2019是ffmpeg在nginx中设置的端口号
# rtmpUrl = "rtmp://192.168.40.145:2019/live/1"
rtmpUrl = "rtmp://172.24.45.93:2018/live/1"

# ffmpeg -re -i   /data/xielx/PRNet_deploy/1573551487855473.mp4
# -vcodec libx264 -acodec aac -strict -2 -f flv
# rtmp://172.24.45.93:2018/live

command = ['ffmpeg',
           '-y',
           '-f', 'rawvideo',
           '-vcodec', 'rawvideo',
           '-pix_fmt', 'bgr24',
           '-s', "{}x{}".format(640, 480),  # 图片分辨率
           '-r', str(25.0),  # 视频帧率
           '-i', '-',
           '-c:v', 'libx264',
           '-pix_fmt', 'yuv420p',
           '-preset', 'ultrafast',
           '-f', 'flv',
           rtmpUrl]



class PushFrameLoop(threading.Thread):
    def __init__(self, _command):
        super(PushFrameLoop, self).__init__()
        self.command = _command
        self.p = self.p = sp.Popen(self.command, stdin=sp.PIPE)

    def run(self):
        # 推流函数
        # 防止多线程时 command 未被设置

        while True:
            if len(self.command) > 0:
                # 管道配置,其中用到管道
                self.p = sp.Popen(self.command, stdin=sp.PIPE)
                break

        while True:
            if frame_queue.empty() != True:
                # 从队列中取出图片
                frame = frame_queue.get()
                # process frame
                # 你处理图片的代码
                # 将图片从队列中取出来做处理,然后再通过管道推送到服务器上
                # 增加画面帧率
                # write to pipe

                # 将处理后的图片通过管道推送到服务器上,image是处理后的图片
                self.p.stdin.write(frame.tostring())


class FrameReadLoop(threading.Thread):
    def __init__(self, _cap):
        super(FrameReadLoop, self).__init__()
        self.cap = _cap

    def run(self):
        global Ret, Frame, Running

        while self.cap.isOpened():
            Ret, Frame = self.cap.read()
            if not Ret:
                Running = False
                break
            Frame = cv2.resize(Frame, (640, 480))
            logging.info(f"image shape is:   {str(Frame.shape)}")

            frame_queue.put(Frame)
            time.sleep(0.01)

        self.cap.release()
        logging.info("信号消失")


camera_addr = "/data/test_video/"


@app.route('/ai/v1/PushImage', methods=['POST'])
def startPushIamge():
    try:
        start_time = time.time()
        resParm = request.data
        # 转字符串
        resParm = str(resParm, encoding="utf-8")
        resParm = eval(resParm)

        requestId = resParm.get('requestId')
        # 服务鉴权
        token = resParm.get('token')
        if not token:
            res = {'code': 3, 'msg': 'token fail'}
            logging.error("code: 3 msg:  token fail ")
            return json.dumps(res)
        videoId = resParm.get("videoId")
        if videoId is None or videoId.strip() == '':
            res = {'code': 7, 'msg': 'videoId is null'}
            logging.error("code: 3 msg:  videoId is null")
            return json.dumps(res)

        # 按照debase64进行处理
        modelImg_base64 = resParm.get("inputImage")
        if not modelImg_base64:
            res = {'code': 4, 'msg': ' picture param invalid'}
            logging.error("code: 4  msg:  picture param invalid")
            return json.dumps(res)

        modelImg = base64.b64decode(modelImg_base64)
        recv_time = time.time()
        logging.info(f"recv image cost time:  {str(recv_time - start_time)}")
        modelImg_data = np.fromstring(modelImg, np.uint8)
        modelImg_data_1 = cv2.imdecode(modelImg_data, cv2.IMREAD_COLOR)
        modelImg_data_1 = cv2.cvtColor(modelImg_data_1, cv2.COLOR_BGR2RGB)

        videoName = os.path.basename(videoId)
        videoName = videoName.split(".")[0]
        refImgMd = hashlib.md5(modelImg_data_1).hexdigest()
        videoPath = '../img_video/' + videoName + "/"

        save_res_path = '../img_video/' + videoName + "/" + refImgMd + "/"

        global camera_addr
        # camera_addr = 'split_2min.mp4'
        # camera_addr_new = camera_addr + videoId
        camera_addr_new = save_res_path + videoName + "-" + refImgMd + ".mp4"

        cap = cv2.VideoCapture(camera_addr_new)
        Ret, Frame = cap.read()
        if cap.isOpened() is False or Ret is False:
            res = {'code': 7, 'msg': 'cap ret is False'}
            logging.error("code: 3 msg:  cap ret is False")
            return json.dumps(res)

        t_get_frame_from_stream = FrameReadLoop(cap)
        t_get_frame_from_stream.start()

        t_get_frame_from_stream.join()

        timeUsed = time.time() - start_time
        data = {'requestId': requestId, 'pushImageRes': "ffmpeg push image success", 'timeUsed': str(timeUsed)}
        res = {'code': 0, 'msg': 'success', 'data': data}
        logging.info(f"code:0  msg:success  landmark Detection cost Time is: {str(timeUsed)} ")
        return json.dumps(res)
    except Exception as ex:
        logging.exception(ex)
        res = {'code': 6, 'msg': 'request exception'}
        return json.dumps(res)



def run():
    # 使用两个线程处理

    # thread1 = Thread(target=Video, )
    # thread1.start()
    # thread2 = Thread(target=push_frame, )
    # thread2.start()

    # camera_addr = '/data/test_video/wbsys.mp4'
    camera_addr = '/data/test_video/split_2min.mp4'
    cap = cv2.VideoCapture(camera_addr)
    Ret, Frame = cap.read()

    t_get_frame_from_stream = FrameReadLoop(cap)
    t_get_frame_from_stream.start()

    pushFrameLoop = PushFrameLoop(command)
    pushFrameLoop.start()

    t_get_frame_from_stream.join()
    pushFrameLoop.join()


if __name__ == '__main__':
    # run()
    # logging.info('Starting the server...')

    pushFrameLoop = PushFrameLoop(command)
    pushFrameLoop.start()

    server.listen(("0.0.0.0", 9665))
    server.run(app)

    pushFrameLoop.join()

    # app.run(host='0.0.0.0', port=18885, threaded=True)

 

你可能感兴趣的:(python,ffmpeg,推流)