YUV是视频裸数据流,没有被压缩的,占用内存非常大,不适合在网络上传输,因此咱们平时网络上传输的都是经过压缩的视频流,如咱们平时看的直播都是经过编码压缩的视频流,下载到本地之后进行解码再进行渲染。今天我们就来看一下YUV数据是如何编码成H264数据流的。YUV是裸数据流,所以咱们要实现编码,就必须要知道YUV数据的格式pix_fmt、视频宽高。
一 使用命令行进行编码
ffmpeg -f yuv420p -s 540x960 -i bb1_yuv420p_540x960.yuv output.h264
-f
指定yuv数据格式为yuv420p
-s
指定视频大小为540x960
二 使用代码进行编码
1、通过avformat_alloc_output_context2
函数初始化输出文件上下文
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, h264Path.UTF8String);
2、通过avcodec_find_encoder
函数找到编码器
dec = avcodec_find_encoder(AV_CODEC_ID_H264)
3、通过avcodec_alloc_context3
初始化编码器上下文
dec_ctx = avcodec_alloc_context3(dec);
4、设置编码器上下文的参数,包括码率、时间基、视频宽高、像素等参数
dec_ctx->width = yuvW;
dec_ctx->height = yuvH;
dec_ctx->framerate = av_make_q(25, 1);
dec_ctx->time_base = av_make_q(1, 25);
dec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
dec_ctx->gop_size = 10;
dec_ctx->bit_rate = 800000;
dec_ctx->max_b_frames = 1;
if (dec_ctx->codec_id == AV_CODEC_ID_H264) {
av_opt_set(dec_ctx->priv_data, "preset", "slow", 0);
}
5、通过avformat_new_stream
函数新建一个视频流,并通过avcodec_parameters_from_context
函数把编码器的参数拷贝给视频流
AVStream *st = avformat_new_stream(ofmt_ctx, dec);
ret = avcodec_parameters_from_context(st->codecpar, dec_ctx);
6、通过avcodec_open2
函数打开编码器
avcodec_open2(dec_ctx, dec, NULL);
7、通过avio_open
函数打开输出文件
avio_open(&ofmt_ctx->pb, h264Path.UTF8String, AVIO_FLAG_WRITE);
8、通过avformat_write_header
写文件头
avformat_write_header(ofmt_ctx, NULL);
9、循环从yuv文件中获取视频帧,经过编码后,通过av_interleaved_write_frame
函数写入文件
while (feof(yuv_f)==0) {
size_t size = fread(yuv_buffer, 1, yuvW*yuvH*3/2, yuv_f);
if (sizedata[0] + i*frame->linesize[0], yuv_buffer + yuvW*i, yuvW);
}
for (int i=0; idata[1] + (i*frame->linesize[1]), yuv_buffer+yuvH*yuvW + yuvW/2*i, yuvW/2);
}
for (int i=0; idata[2] + (i*frame->linesize[2]), yuv_buffer+yuvH*yuvW*5/4 + yuvW/2*i, yuvW/2);
}
frame->pts = pts_i++;
ret = avcodec_send_frame(dec_ctx, frame);
if (ret<0) {
printf("avcodec_send_frame fail \n");
break;
}
while (1) {
ret = avcodec_receive_packet(dec_ctx, pkt);
if (ret==AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret<0) {
printf("avcodec_receive_packet fail \n");
break;
}
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
if (ret<0) {
printf("av_interleaved_write_frame fail \n");
break;
}
av_packet_unref(pkt);
}
}
ret = avcodec_send_frame(dec_ctx, NULL);
if (ret<0) {
printf("avcodec_send_frame fail \n");
goto __FAIL;
}
while (1) {
ret = avcodec_receive_packet(dec_ctx, pkt);
if (ret==AVERROR(EINVAL) || ret == AVERROR_EOF) {
break;
} else if (ret<0) {
printf("avcodec_receive_packet fail \n");
break;
}
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
if (ret<0) {
printf("av_interleaved_write_frame fail \n");
break;
}
av_packet_unref(pkt);
}
这里需要注意的是后面还有一个while循环,这是因为视频帧有I、B、P帧的区别,这就导致可能还有数据在缓存中没有编码完,因此使用一个while循环去读出来。
10、通过av_write_trailer
写入文件尾
av_write_trailer(ofmt_ctx);
完整代码如下:
+ (void)convert
{
NSString *yuvPath = [[NSBundle mainBundle] pathForResource:@"bb1_yuv420p_540x960.yuv" ofType:nil];
NSString *h264Path = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES).firstObject stringByAppendingPathComponent:@"bb1.h264"];
NSLog(@"%@", h264Path);
int yuvW = 540;
int yuvH = 960;
int ret;
AVFormatContext *ofmt_ctx = NULL;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
AVPacket *pkt = NULL;
AVFrame *frame = NULL;
FILE *yuv_f = fopen(yuvPath.UTF8String, "rb+");
ret = avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, h264Path.UTF8String);
if (ret<0) {
printf("avformat_alloc_output_context2 fail \n");
goto __FAIL;
}
dec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!dec) {
printf("avcodec_find_encoder fail \n");
goto __FAIL;
}
dec_ctx = avcodec_alloc_context3(dec);
dec_ctx->width = yuvW;
dec_ctx->height = yuvH;
dec_ctx->framerate = av_make_q(25, 1);
dec_ctx->time_base = av_make_q(1, 25);
dec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
dec_ctx->gop_size = 10;
dec_ctx->bit_rate = 800000;
dec_ctx->max_b_frames = 1;
if (dec_ctx->codec_id == AV_CODEC_ID_H264) {
av_opt_set(dec_ctx->priv_data, "preset", "slow", 0);
}
ret = avio_open(&ofmt_ctx->pb, h264Path.UTF8String, AVIO_FLAG_WRITE);
if (ret<0) {
printf("avio_open fail \n");
goto __FAIL;
}
ret = avcodec_open2(dec_ctx, dec, NULL);
if (ret<0) {
printf("avcodec_open2 fail \n");
goto __FAIL;
}
AVStream *st = avformat_new_stream(ofmt_ctx, dec);
ret = avcodec_parameters_from_context(st->codecpar, dec_ctx);
if (ret<0) {
printf("avcodec_parameters_from_context fail \n");
goto __FAIL;
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret<0) {
printf("avformat_write_header fail \n");
goto __FAIL;
}
uint8_t *yuv_buffer = av_malloc(yuvW*yuvH*3/2);
frame = av_frame_alloc();
if (!frame) {
printf("av_frame_alloc fail \n");
goto __FAIL;
}
frame->width = dec_ctx->width;
frame->height = dec_ctx->height;
frame->format = dec_ctx->pix_fmt;
ret = av_frame_get_buffer(frame, 0);
if (ret<0) {
printf("av_frame_get_buffer fail \n");
goto __FAIL;
}
pkt = av_packet_alloc();
if (!pkt) {
printf("av_packet_alloc fail \n");
goto __FAIL;
}
int pts_i = 0;
while (feof(yuv_f)==0) {
size_t size = fread(yuv_buffer, 1, yuvW*yuvH*3/2, yuv_f);
if (sizedata[0] + i*frame->linesize[0], yuv_buffer + yuvW*i, yuvW);
}
for (int i=0; idata[1] + (i*frame->linesize[1]), yuv_buffer+yuvH*yuvW + yuvW/2*i, yuvW/2);
}
for (int i=0; idata[2] + (i*frame->linesize[2]), yuv_buffer+yuvH*yuvW*5/4 + yuvW/2*i, yuvW/2);
}
frame->pts = pts_i++;
ret = avcodec_send_frame(dec_ctx, frame);
if (ret<0) {
printf("avcodec_send_frame fail \n");
break;
}
while (1) {
ret = avcodec_receive_packet(dec_ctx, pkt);
if (ret==AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret<0) {
printf("avcodec_receive_packet fail \n");
break;
}
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
if (ret<0) {
printf("av_interleaved_write_frame fail \n");
break;
}
av_packet_unref(pkt);
}
}
ret = avcodec_send_frame(dec_ctx, NULL);
if (ret<0) {
printf("avcodec_send_frame fail \n");
goto __FAIL;
}
while (1) {
ret = avcodec_receive_packet(dec_ctx, pkt);
if (ret==AVERROR(EINVAL) || ret == AVERROR_EOF) {
break;
} else if (ret<0) {
printf("avcodec_receive_packet fail \n");
break;
}
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
if (ret<0) {
printf("av_interleaved_write_frame fail \n");
break;
}
av_packet_unref(pkt);
}
ret = av_write_trailer(ofmt_ctx);
if (ret<0) {
printf("av_write_trailer fail \n");
}
__FAIL:
if (ofmt_ctx->pb) {
avio_close(ofmt_ctx->pb);
}
if (dec_ctx) {
avcodec_close(dec_ctx);
}
if (yuv_buffer) {
av_free(yuv_buffer);
}
if (ofmt_ctx) {
avformat_free_context(ofmt_ctx);
}
if (frame) {
av_frame_free(&frame);
}
if (pkt) {
av_packet_free(&pkt);
}
}