先说下原因,我想用开发板拉取摄像头的网络流,然后显示出来。
共分为两步,
第一步:本地使用ffmpeg播放视频。
第二部:ffmpeg拉取摄像头的数据,播放视频。
现在第一步实现了,视频见B站。
代码实现播放视频的效果
代码我会上传百度网盘,包括了ffmpeg_V3.4.6的库文件,代码,Makefile
链接:https://pan.baidu.com/s/1MGMeW0yAbKGjiv5XMVguqg
提取码:p8qv
后来我发现代码存在内存泄漏的问题,所以后面我修改了,详见下篇文章
拉流RTSP并播放
我说下实现的步骤
按照原子的教程,使用buildroot-2019.02.6.tar.bz2构建一个基本的rootfs,然后在target_packet 增加ffmpeg和其他软件的编译。
我们的目标是ffmpeg代码实现视频解码,所以需要ffmpeg的库和头文件。
但是buildroot自行编译完ffmepg后,我不知道ffmpeg的库和头文件放哪了。
所以我只是让buildroot下载ffmepg代码,然后需要我手动指定编译的条件。ffmpeg的编译过程参考我之前的文章。
ubuntu16.04搭建ffmepg开发环境
由于这里是嵌入式环境,ffmpeg的config命令的参数肯定不能完全照搬PC端的内容,所以我把buildroot编译出来的ffmpeg的参数稍微一改即可。
怎么获取呢,直接在开发板运行ffmpeg,就会打印ffmpeg编译时的配置参数。是否很神奇,哈哈。
这些参数都是类似的,只需要看懂unbuntu下如何编译ffmpeg,这些参数很容易看懂,我修改的参数有这些,–prefix,
--prefix:编译出的库文件和头文件存放的路径,在make install使用
--enable-ffplay:使能ffplay的编译,最后就会编译出ffplay可执行文件
--enable-postproc
--enable-swscale:程序使用到了这两个库,所以要使能编译。
所以修改完以后的ffmpeg配置命令如下:
sudo ./configure \
--prefix="$HOME/alpha_build" \
--pkg-config-flags="--static" \
--extra-cflags="-I$HOME/alpha_build/include" \
--extra-ldflags="-L$HOME/alpha_build/lib" \
--enable-cross-compile \
--cross-prefix=/home/shengy/tool/buildroot-2019.02.6/output/host/bin/arm-linux-gnueabihf- \
--sysroot=/home/shengy/tool/buildroot-2019.02.6/output/host/arm-buildroot-linux-gnueabihf/sysroot \
--host-cc=/usr/bin/gcc --arch=arm --target-os=linux --disable-stripping \
--pkg-config=/home/shengy/tool/buildroot-2019.02.6/output/host/bin/pkg-config \
--disable-static --enable-shared --enable-avfilter --disable-version3 \
--enable-logging --enable-optimizations --disable-extra-warnings --enable-avdevice \
--enable-avcodec --enable-avformat --enable-network --disable-gray --enable-swscale-alpha \
--disable-small --enable-dct --enable-fft --enable-mdct --enable-rdft --disable-crystalhd \
--disable-dxva2 --enable-runtime-cpudetect --disable-hardcoded-tables --disable-mipsdsp \
--disable-mipsdspr2 --disable-msa --enable-hwaccels --disable-cuda --disable-cuvid \
--disable-nvenc --disable-avisynth --disable-frei0r --disable-libopencore-amrnb \
--disable-libopencore-amrwb --disable-libdc1394 --disable-libgsm --disable-libilbc \
--disable-libvo-amrwbenc --disable-symver --disable-doc --disable-gpl --disable-nonfree \
--enable-ffmpeg --enable-ffplay --disable-ffserver --disable-avresample --enable-ffprobe \
--enable-postproc --enable-swscale --enable-indevs --disable-alsa --enable-outdevs \
--enable-pthreads --disable-zlib --disable-bzlib --disable-libfdk-aac --disable-libcdio \
--disable-gnutls --disable-openssl --disable-libdrm --disable-libopenh264 --disable-vaapi \
--disable-vdpau --disable-mmal --disable-omx --disable-omx-rpi --disable-libopencv \
--disable-libopus --disable-libvpx --disable-libass --disable-libbluray --disable-librtmp --disable-libmp3lame \
--disable-libmodplug --disable-libspeex --disable-libtheora --disable-libwavpack --disable-iconv \
--disable-libfreetype --disable-fontconfig --disable-libopenjpeg --disable-libx264 --disable-libx265 \
--disable-x86asm --disable-mmx --disable-sse --disable-sse2 --disable-sse3 --disable-ssse3 --disable-sse4 \
--disable-sse42 --disable-avx --disable-avx2 --enable-armv6 --enable-vfp --enable-neon --disable-altivec \
--extra-libs=-latomic --enable-pic --cpu=cortex-a7
执行完这个命令,就是编译ffmpeg了,
sudo make -j16
make install #make install后,ffmpeg的库和头文件就放到了--prefix指定的路径
这个就参考网上的例程,各种拼接。我这里ffmpeg的版本就是buildroot下载下来的V3.4.6.
主要步骤是解码,和转换。因为解码后的data数据是YUV420P,而且视频分辨率和屏幕不匹配,所以需要转换。 最好是视频分辨率就是屏幕尺寸,这样问题会少一些。
新建test_002_copy.c文件,代码如下:
/*
* Copyright (c) 2015 Ludmila Glinskih
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* H264 codec test.
*/
#include
#include
#include
#include
#include
#include
#include "libavutil/adler32.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/imgutils.h"
#include "libavfilter/avfilter.h"
#include "libavutil/avutil.h"
#include "libavutil/pixfmt.h"
#include "libavdevice/avdevice.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
typedef unsigned char uint8_t;
int fbfd = 0;
static unsigned int* fbp = NULL;
struct fb_var_screeninfo vinfo;
struct fb_fix_screeninfo finfo;
int scrWid = 0;
int scrHeg = 0;
int open_fb()
{
unsigned int screen_size;
/* 打开framebuffer设备 */
if (0 > (fbfd = open("/dev/fb0", O_RDWR))) {
perror("open error");
exit(EXIT_FAILURE);
}
/* 获取参数信息 */
ioctl(fbfd, FBIOGET_VSCREENINFO, &vinfo);
ioctl(fbfd, FBIOGET_FSCREENINFO, &finfo);
screen_size = finfo.line_length * vinfo.yres;
scrWid = vinfo.xres;
scrHeg = vinfo.yres;
/* 将显示缓冲区映射到进程地址空间 */
fbp = mmap(NULL, screen_size, PROT_WRITE, MAP_SHARED, fbfd, 0);
if (MAP_FAILED == (void *)fbp) {
perror("mmap error");
close(fbfd);
exit(EXIT_FAILURE);
}
scrWid = vinfo.xres;
scrHeg = vinfo.yres;
printf("scrWid:%d scrHeg:%d\n", scrWid, scrHeg );
}
void close_fb(void)
{
// 解除映射并关闭framebuffer设备
munmap(fbp, finfo.smem_len);
close(fbfd);
}
#define argb8888_to_rgba888(color) ({ \
unsigned int temp = (color); \
((temp & 0xff0000UL) >> 16) | \
((temp & 0xff00UL) >> 0) | \
((temp & 0xffUL) << 16); \
})
/********************************************************************
* 函数名称: lcd_draw_point
* 功能描述: 打点
* 输入参数: x, y, color
* 返 回 值: 无
********************************************************************/
static void lcd_draw_point(unsigned int x, unsigned int y, unsigned int color)
{
unsigned int rgb565_color = argb8888_to_rgba888(color);//得到RGB565颜色值
/* 填充颜色 */
fbp[y * scrWid + x] = color;
}
void draw_point(int x, int y, uint8_t *color)
{
lcd_draw_point(x, y, *(uint32_t *)color);
}
void clr_scr(int w, int h)
{
static int cnt = 0;
printf("clr scr:%d\n", cnt);
cnt++;
char clor[4] = {0xff, 0xff, 0xff};
for(int i = 0; i < h; i++)
for(int j = 0; j < w; j++)
draw_point(j, i, clor);
}
int32_t VideoConvert(
const AVFrame *pInFrame, // 输入视频帧
enum AVPixelFormat eOutFormat, // 输出视频格式
int32_t nOutWidth, // 输出视频宽度
int32_t nOutHeight, // 输出视频高度
AVFrame **ppOutFrame) // 输出视频帧
{
struct SwsContext *pSwsCtx ;
AVFrame *pOutFrame = NULL;
// 创建格式转换器, 指定缩放算法,转换过程中不增加任何滤镜特效处理
pSwsCtx = sws_getContext(pInFrame->width, pInFrame->height, (enum AVPixelFormat)pInFrame->format,
nOutWidth, nOutHeight, eOutFormat,
SWS_BICUBIC, NULL, NULL, NULL);
if (pSwsCtx == NULL)
{
printf(" [ERROR] fail to sws_getContext()\n" );
return -1;
}
// 创建输出视频帧对象以及分配相应的缓冲区
uint8_t *data[4] = {NULL};
int linesize[4] = {0};
int res = av_image_alloc(data, linesize, nOutWidth, nOutHeight, eOutFormat, 1);
if (res < 0)
{
printf(" [ERROR] fail to av_image_alloc(), res=%d\n" , res);
sws_freeContext(pSwsCtx);
return -2;
}
pOutFrame = av_frame_alloc();
pOutFrame->format = eOutFormat;
pOutFrame->width = nOutWidth;
pOutFrame->height = nOutHeight;
pOutFrame->data[0] = data[0];
pOutFrame->data[1] = data[1];
pOutFrame->data[2] = data[2];
pOutFrame->data[3] = data[3];
pOutFrame->linesize[0] = linesize[0];
pOutFrame->linesize[1] = linesize[1];
pOutFrame->linesize[2] = linesize[2];
pOutFrame->linesize[3] = linesize[3];
// 进行格式转换处理
res = sws_scale(pSwsCtx,
(const uint8_t *const *)(pInFrame->data),
pInFrame->linesize,
0,
pOutFrame->height,
pOutFrame->data,
pOutFrame->linesize);
if (res < 0)
{
printf(" [ERROR] fail to sws_scale(), res=%d\n" , res);
sws_freeContext(pSwsCtx);
av_frame_free(&pOutFrame);
return -3;
}
(*ppOutFrame) = pOutFrame;
sws_freeContext(pSwsCtx); // 释放转换器
return 0;
}
static int video_decode_example(const char *input_filename)
{
AVCodec *codec = NULL;
AVCodecContext *ctx= NULL;
AVCodecParameters *origin_par = NULL;
AVFrame *fr = NULL;
AVFrame *outfrarme = NULL;
uint8_t *byte_buffer = NULL;
AVPacket pkt;
AVFormatContext *fmt_ctx = NULL;
int number_of_written_bytes;
int video_stream;
int got_frame = 0;
int byte_buffer_size;
int i = 0;
int result;
int end_of_stream = 0;
result = avformat_open_input(&fmt_ctx, input_filename, NULL, NULL);
if (result < 0) {
av_log(NULL, AV_LOG_ERROR, "Can't open file, res:%d\n", result);
return result;
}
result = avformat_find_stream_info(fmt_ctx, NULL);
if (result < 0) {
av_log(NULL, AV_LOG_ERROR, "Can't get stream info\n");
return result;
}
video_stream = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (video_stream < 0) {
av_log(NULL, AV_LOG_ERROR, "Can't find video stream in input file\n");
return -1;
}
origin_par = fmt_ctx->streams[video_stream]->codecpar;
codec = avcodec_find_decoder(origin_par->codec_id);
if (!codec) {
av_log(NULL, AV_LOG_ERROR, "Can't find decoder\n");
return -1;
}
ctx = avcodec_alloc_context3(codec);
if (!ctx) {
av_log(NULL, AV_LOG_ERROR, "Can't allocate decoder context\n");
return AVERROR(ENOMEM);
}
result = avcodec_parameters_to_context(ctx, origin_par);
if (result) {
av_log(NULL, AV_LOG_ERROR, "Can't copy decoder context\n");
return result;
}
result = avcodec_open2(ctx, codec, NULL);
if (result < 0) {
av_log(ctx, AV_LOG_ERROR, "Can't open decoder\n");
return result;
}
fr = av_frame_alloc();
if (!fr) {
av_log(NULL, AV_LOG_ERROR, "Can't allocate frame\n");
return AVERROR(ENOMEM);
}
printf("#tb %d: %d/%d\n", video_stream, fmt_ctx->streams[video_stream]->time_base.num,
fmt_ctx->streams[video_stream]->time_base.den);
i = 0;
av_init_packet(&pkt);
do {
if (!end_of_stream)
if (av_read_frame(fmt_ctx, &pkt) < 0)
end_of_stream = 1;
if (end_of_stream) {
pkt.data = NULL;
pkt.size = 0;
}
if (pkt.stream_index == video_stream || end_of_stream) {
got_frame = 0;
if (pkt.pts == AV_NOPTS_VALUE)
pkt.pts = pkt.dts = i;
result = avcodec_decode_video2(ctx, fr, &got_frame, &pkt);
if (result < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n");
return result;
}
if (got_frame) {
int out_w = 800;
int out_h = 480;
VideoConvert(fr, AV_PIX_FMT_BGRA, out_w, out_h, &outfrarme);
for(int h = 0; h < out_h; h++)
for(int w = 0; w < out_w; w++)
{
draw_point(w, h, (outfrarme->data[0])+ ((h * out_w *4 + w * 4)));
}
printf("draw one pic\n");
}
av_frame_free(&outfrarme);
av_packet_unref(&pkt);
av_init_packet(&pkt);
}
i++;
} while (!end_of_stream || got_frame);
av_packet_unref(&pkt);
av_frame_free(&fr);
avcodec_close(ctx);
avformat_close_input(&fmt_ctx);
avcodec_free_context(&ctx);
av_freep(&byte_buffer);
return 0;
}
int main(int argc, char **argv)
{
if (argc < 2)
{
av_log(NULL, AV_LOG_ERROR, "Incorrect input\n");
return 1;
}
avcodec_register_all();
#if CONFIG_AVDEVICE
avdevice_register_all();
#endif
avfilter_register_all();
av_register_all();
open_fb();
clr_scr(scrWid, scrHeg);
usleep(1000 * 1000 * 1);
if (video_decode_example(argv[1]) != 0)
return 1;
close_fb();
return 0;
}
makefile文件内容如下:
FFMPEG=/home/shengy/alpha_build/
CC=arm-linux-gnueabihf-gcc
CFLAGS=-g -I$(FFMPEG)/include
LDFLAGS = -L$(FFMPEG)/lib/ -lswresample -lavformat -lavdevice -lavcodec -lavutil -lswscale -lavfilter -lm
TARGETS=test_002_copy
all:$(TARGETS)
test_002_copy:test_002_copy.c
$(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) -std=c99
clean:
rm -rf $(TARGETS)
编译后,执行./test_002_copy 视频文件.mp4
然后就可以播放视频了。