1、首先搭建Ngnix,参考博客:
https://blog.csdn.net/lishimin1012/article/details/52130683,如遇到问题,百度解决一下就好。
2、在visual studio中配置ffmpeg
https://blog.csdn.net/xuanwolanxue/article/details/72926878,里面需要设置环境变量,记得重启电脑生效。
3、在Visual Studio中配置OpenCV,我这里配置的是3.3.0版本。
https://blog.csdn.net/qq_17550379/article/details/78201442,这里面也有要设置的环境变量,记得重启电脑生效。
4、在Python里实现推流,代码:(用pip安装一下opencv的python包)
import subprocess as sp import cv2 rtmpUrl = "rtmp://192.168.0.216:1935/live/home" camera_path = 0 cap = cv2.VideoCapture(camera_path) # Get video information fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # ffmpeg command command = ['ffmpeg', '-y', '-f', 'rawvideo', '-vcodec', 'rawvideo', '-pix_fmt', 'bgr24', '-s', "{}x{}".format(width, height), '-r', str(fps), '-i', '-', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-preset', 'ultrafast', '-f', 'flv', rtmpUrl] # 管道配置 p = sp.Popen(command, stdin=sp.PIPE) # read webcamera while (cap.isOpened()): ret, frame = cap.read() # print("running......") if not ret: print("Opening camera is failed") break # process frame # your code # process frame # write to pipe p.stdin.write(frame.tostring())
5、拉流方法一:使用python拉流或者是使用vlc播放器拉流,这个网上有很多,但是此方法的延迟率过高,大概会延迟十秒,延迟的问题一直都没解决,后来在网上看到了用C++拉流的方式,延迟在一秒之内。这里先贴出来Python的代码:
import cv2 import threading import time import win32gui, win32con class Producer(threading.Thread): """docstring for Producer""" def __init__(self, rtmp_str): super(Producer, self).__init__() self.rtmp_str = rtmp_str # 通过cv2中的类获取视频流操作对象cap self.cap = cv2.VideoCapture(self.rtmp_str) # 调用cv2方法获取cap的视频帧(帧:每秒多少张图片) # fps = self.cap.get(cv2.CAP_PROP_FPS) self.fps = self.cap.get(cv2.CAP_PROP_FPS) print(self.fps) # 获取cap视频流的每帧大小 self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.size = (self.width, self.height) print(self.size) # 定义编码格式mpge-4 self.fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', '2') # 定义视频文件输入对象 self.outVideo = cv2.VideoWriter('saveDir1.avi', self.fourcc, self.fps, self.size) def run(self): print('in producer') ret, image = self.cap.read() while ret: # if ret == True: self.outVideo.write(image) cv2.imshow('video', image) cv2.waitKey(int(1000 / int(self.fps))) # 延迟 if cv2.waitKey(1) & 0xFF == ord('q'): self.outVideo.release() self.cap.release() cv2.destroyAllWindows() break ret, image = self.cap.read() if __name__ == '__main__': print('run program') # rtmp_str = 'rtmp://live.hkstv.hk.lxdns.com/live/hks' # 经测试,已不能用。可以尝试下面两个。 # rtmp_str = 'rtmp://media3.scctv.net/live/scctv_800' # CCTV # rtmp_str = 'rtmp://58.200.131.2:1935/livetv/hunantv' # 湖南卫视 rtmp_str = "rtmp://192.168.8.99:1935/live/home" producer = Producer(rtmp_str) # 开个线程 producer.start()
6、拉流方法二,使用C++拉流,延迟一秒之内,具体为什么这个C++的延迟低,上面python的延迟高(尝试修改command命令,试了接近一天,没有啥改进),我没搞明白,刚接触这些东西两天,着急用,没有深入研究,知道的麻烦告知,万分感谢。
// Test.cpp : 此文件包含 "main" 函数。程序执行将在此处开始并结束。
//
#include
#include
#include
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/time.h"
#include "libswscale/swscale.h"
#include
}
int main()
{
AVFormatContext* pFormatCtx;
int i, videoindex;
AVCodecContext* pCodecCtx;
AVCodec* pCodec;
AVFrame* pFrame, * pFrameRGB;
uint8_t* out_buffer;
AVPacket* packet;
//int y_size;
int ret, got_picture;
struct SwsContext* img_convert_ctx;
//输入文件路径
// char filepath[] = "rtmp://219.216.87.170/vod/test.flv";
char filepath[] = "rtmp://192.168.0.216:1935/live/home";
int frame_cnt;
printf("wait for playing %s\n", filepath);
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();
printf("size %ld\tduration %ld\n", pFormatCtx->probesize,
pFormatCtx->max_analyze_duration);
pFormatCtx->probesize = 20000000;
pFormatCtx->max_analyze_duration = 2000;
// pFormatCtx->interrupt_callback.callback = timout_callback;
// pFormatCtx->interrupt_callback.opaque = pFormatCtx;
// pFormatCtx->flags |= AVFMT_FLAG_NONBLOCK;
AVDictionary* options = NULL;
av_dict_set(&options, "fflags", "nobuffer", 0);
// av_dict_set(&options, "max_delay", "100000", 0);
// av_dict_set(&options, "rtmp_transport", "tcp", 0);
// av_dict_set(&options, "stimeout", "6", 0);
printf("wating for opening file\n");
if (avformat_open_input(&pFormatCtx, filepath, NULL, &options) != 0) {
printf("Couldn't open input stream.\n");
return -1;
}
av_dict_free(&options);
printf("wating for finding stream\n");
if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
printf("Couldn't find stream information.\n");
return -1;
}
videoindex = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
if (videoindex == -1) {
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Could not open codec.\n");
return -1;
}
/*
* 在此处添加输出视频信息的代码
* 取自于pFormatCtx,使用fprintf()
*/
pFrame = av_frame_alloc();
pFrameRGB = av_frame_alloc();
out_buffer = (uint8_t*)av_malloc(
avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width,
pCodecCtx->height));
avpicture_fill((AVPicture*)pFrameRGB, out_buffer, AV_PIX_FMT_BGR24,
pCodecCtx->width, pCodecCtx->height);
packet = (AVPacket*)av_malloc(sizeof(AVPacket));
//Output Info-----------------------------
printf("--------------- File Information ----------------\n");
av_dump_format(pFormatCtx, 0, filepath, 0);
printf("-------------------------------------------------\n");
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);
CvSize imagesize;
imagesize.width = pCodecCtx->width;
imagesize.height = pCodecCtx->height;
IplImage* image = cvCreateImageHeader(imagesize, IPL_DEPTH_8U, 3);
cvSetData(image, out_buffer, imagesize.width * 3);
cvNamedWindow(filepath, CV_WINDOW_AUTOSIZE);
frame_cnt = 0;
int num = 0;
while (av_read_frame(pFormatCtx, packet) >= 0) {
if (packet->stream_index == videoindex) {
/*
* 在此处添加输出H264码流的代码
* 取自于packet,使用fwrite()
*/
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,
packet);
if (ret < 0) {
printf("Decode Error.\n");
return -1;
}
if (got_picture) {
sws_scale(img_convert_ctx,
(const uint8_t* const*)pFrame->data, pFrame->linesize,
0, pCodecCtx->height, pFrameRGB->data,
pFrameRGB->linesize);
printf("Decoded frame index: %d\n", frame_cnt);
/*
* 在此处添加输出YUV的代码
* 取自于pFrameYUV,使用fwrite()
*/
frame_cnt++;
cvShowImage(filepath, image);
cvWaitKey(30);
}
}
av_free_packet(packet);
}
sws_freeContext(img_convert_ctx);
av_frame_free(&pFrameRGB);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}
这段代码根据https://blog.csdn.net/u011622208/article/details/96122611进行写的,该帖子里没有加C++的头文件,配置和ffmpeg和Opencv,根据缺少什么头文件,加上就可以了。
7、最后就是实测成功,因为是昨天弄得,忘了截图了,今天把昨天的工作整理了一下。