本文参考他人文章并实践修改而来;
头文件:
//STMPEG.h
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#pragma once
#ifdef __cplusplus
extern "C" {
#endif
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#ifdef __cplusplus
}
#endif
enum MPEG_TYPE{
VCD_PAL,
VCD_NTSC,
SVCD_PAL,
SVCD_NTSC,
DVD_PAL,
DVD_NTSC,
CUSTOM_MPEG1,
CUSTOM_MPEG2
};
class CSTMPEG
{
public:
CSTMPEG();
~CSTMPEG(void);
//打开一个文件写mpeg
bool OpenMPEG(const char* strfile);
//写一帧图象数据到mpeg文件
int AddFrame(int width, int height, int bpp, uint8_t* pRGBBuffer);
//释放资源
void CloseMPEG();
//设置MPEG的类型
void SetMPEGFormat(MPEG_TYPE type);
//设置画面宽度
void SetMPEGWidth(int width);
//设置画面高度
void SetMPEGHeight(int height);
//设置帧率(fps)
void SetFrameRate(float rate);
//设置视频码率(kbits/sec) rate: 3000 4000 6000 8000
void SetMPEGVideoBitRate(float rate);
private:
void InitMPEGData(MPEG_TYPE type);
//把图象颠倒过来
void RGBBuffer2RGBFrame(AVFrame *pRGBFrame, int width, int height, int bpp, uint8_t* pRGBBuffer);
AVFrame *alloc_picture(int pix_fmt, int width, int height);
bool open_video(AVFormatContext *pFormatContext, AVStream *pVideoStream);
AVStream *add_video_stream(AVFormatContext *pFormatContext, int codec_id);
bool write_video_frame(AVFormatContext *pFormatcontext, AVStream *pAudioStream);
void close_video(AVFormatContext *pFormatcontext, AVStream *pVideoStream);
private:
MPEG_TYPE m_MPEGType;
int m_nWidth, m_nHeight;
float m_fFrameRate;
float m_fBitRate;
int m_nFrame;
int srcWidth;
int srcHeight;
AVFrame *m_pRGBFrame; //YUV帧数据
AVFrame *m_pYUVFrame; //RGB帧数据
uint8_t* m_pOutBuf; //将一帧数据编码到这个缓冲区,用于写入到文件
const int m_nOutBufSize; //编码缓冲区的大小
//使用流来读写文件
AVOutputFormat *m_pOutputFormat;
AVFormatContext *m_pFormatContext;
AVStream *m_pVideoStream;
AVStream *m_pAudioStream;
};
实现文件:
//STMPEG.cpp
#include "stmpeg.h"
#define LineWidthBytes(biWidth, biBitCount) ((biWidth * biBitCount + 31) / 32 * 4)
CSTMPEG::CSTMPEG(): m_nOutBufSize(200000)
{
m_MPEGType = VCD_PAL;
m_nWidth = 352;
m_nHeight = 288;
m_fFrameRate = 25;
m_fBitRate = 800000;
m_nFrame = -1;
m_pRGBFrame = NULL;
m_pYUVFrame = NULL;
m_pFormatContext= NULL;
m_pVideoStream = NULL;
m_pAudioStream = NULL;
}
CSTMPEG::~CSTMPEG(void)
{
}
void CSTMPEG::InitMPEGData(MPEG_TYPE type)
{
switch(type) {
case VCD_PAL:
{
m_nWidth = 352;
m_nHeight = 288;
m_fFrameRate = 25;
}
break;
case VCD_NTSC:
{
m_nWidth = 352;
m_nHeight = 240;
m_fFrameRate = 29.97;
}
break;
case SVCD_PAL:
{
m_nWidth = 480;
m_nHeight = 576;
m_fFrameRate = 25;
}
break;
case SVCD_NTSC:
{
m_nWidth = 480;
m_nHeight = 480;
m_fFrameRate = 23.976;
}
break;
case DVD_PAL:
{
m_nWidth = 720;
m_nHeight = 576;
m_fFrameRate = 25;
}
break;
case DVD_NTSC:
{
m_nWidth = 720;
m_nHeight = 480;
m_fFrameRate = 29.97;
}
break;
default:
break;
}
}
void CSTMPEG::SetMPEGFormat(MPEG_TYPE type)
{
if (type != m_MPEGType)
{
m_MPEGType = type;
InitMPEGData(type);
}
}
void CSTMPEG::SetMPEGWidth(int width)
{
if (m_MPEGType == CUSTOM_MPEG1 || m_MPEGType == CUSTOM_MPEG2 )
m_nWidth = width;
}
void CSTMPEG::SetMPEGHeight(int height)
{
if (m_MPEGType == CUSTOM_MPEG1 || m_MPEGType == CUSTOM_MPEG2 )
m_nHeight = height;
}
void CSTMPEG::SetFrameRate(float rate)
{
if (m_MPEGType == CUSTOM_MPEG1 || m_MPEGType == CUSTOM_MPEG2 )
m_fFrameRate = rate;
}
void CSTMPEG::SetMPEGVideoBitRate(float rate)
{
m_fBitRate = rate * 1000;
}
void CSTMPEG::RGBBuffer2RGBFrame(AVFrame *pRGBFrame, int width, int height, int bpp, uint8_t* pRGBBuffer)
{
//ASSERT( pRGBFrame && pRGBBuffer);
int linebytes = LineWidthBytes(width, bpp);
int nPixels = bpp / 8;
int x, y;
for(y=0;y<height;y++)
{
for(x=0;x<width;x++)
{
pRGBFrame->data[0][y * pRGBFrame->linesize[0] + x * 3 ] =
pRGBBuffer[ (height - y - 1) * linebytes + x * nPixels ]; //B
pRGBFrame->data[0][y * pRGBFrame->linesize[0] + x * 3 + 1 ] =
pRGBBuffer[ (height - y - 1) * linebytes + x * nPixels + 1]; //G
pRGBFrame->data[0][y * pRGBFrame->linesize[0] + x * 3 + 2 ] =
pRGBBuffer[ (height - y - 1) * linebytes + x * nPixels + 2]; //R
}
}
}
AVFrame *CSTMPEG::alloc_picture(int pix_fmt, int width, int height)
{
AVFrame* picture = avcodec_alloc_frame();
if (!picture)
return NULL;
int size = avpicture_get_size(pix_fmt, width, height);
uint8_t *picture_buf = (uint8_t *)malloc(size);
if (!picture_buf) {
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, width, height);
return picture;
}
bool CSTMPEG::OpenMPEG(const char* strfile)
{
av_register_all();
switch(m_MPEGType)
{
case VCD_PAL:
case VCD_NTSC:
m_pOutputFormat = guess_format("vcd", NULL, NULL);
break;
case SVCD_PAL:
case SVCD_NTSC:
m_pOutputFormat = guess_format("svcd", NULL, NULL);
break;
case DVD_PAL:
case DVD_NTSC:
m_pOutputFormat = guess_format("dvd", NULL, NULL);
break;
default:
m_pOutputFormat = guess_format(NULL, strfile, NULL);
break;
}
//初始化输出文件格式
if ( NULL == m_pOutputFormat)
m_pOutputFormat = guess_format("mpeg", NULL, NULL);
if ( NULL == m_pOutputFormat)
return false;
//初始化文件格式上下文
m_pFormatContext = av_alloc_format_context();
if ( NULL == m_pFormatContext )
return false;
m_pFormatContext->oformat = m_pOutputFormat;
sprintf_s(m_pFormatContext->filename, sizeof(m_pFormatContext->filename), "%s", strfile);
//创建视频流,初始化编解码器上下文
if (m_pOutputFormat->video_codec != CODEC_ID_NONE)
m_pVideoStream = add_video_stream(m_pFormatContext, m_pOutputFormat->video_codec);
//创建音频流,初始化编解码器上下文
// if (m_pOutputFormat->audio_codec != CODEC_ID_NONE)
// m_pAudioStream = add_audio_stream(m_pFormatContext, m_pOutputFormat->audio_codec);
if (av_set_parameters(m_pFormatContext, NULL) < 0)
return false;
//打开编解码器,给每一帧分配空间
if (m_pVideoStream){
if( !open_video(m_pFormatContext, m_pVideoStream))
return false;
}
// if (m_pAudioStream)
// open_audio(m_pFormatContext, m_pAudioStream);
if ( !(m_pOutputFormat->flags & AVFMT_NOFILE) ) {
if (url_fopen(&m_pFormatContext->pb, strfile, URL_WRONLY) < 0) {
return false;
}
}
//写文件头
av_write_header(m_pFormatContext);
return true;
}
AVStream* CSTMPEG::add_video_stream(AVFormatContext *pFormatContext, int codec_id)
{
//ASSERT(pFormatContext);
AVStream *pStream = av_new_stream(pFormatContext, 0);
if ( NULL == pStream )
return NULL;
AVCodecContext *pCodecContext = pStream->codec;
pCodecContext->codec_id = (CodecID)codec_id;
pCodecContext->codec_type = CODEC_TYPE_VIDEO;
pCodecContext->bit_rate = 1150000;//800000;//m_fBitRate;
pCodecContext->width = m_nWidth;
pCodecContext->height = m_nHeight;
pCodecContext->time_base.den = 25;//m_fFrameRate;
pCodecContext->time_base.num = 1;
pCodecContext->gop_size = 18;
pCodecContext->pix_fmt = PIX_FMT_YUV420P;
if (pCodecContext->codec_id == CODEC_ID_MPEG2VIDEO)
pCodecContext->max_b_frames = 2;
if (pCodecContext->codec_id == CODEC_ID_MPEG1VIDEO)
pCodecContext->mb_decision=2;
if(!strcmp(pFormatContext->oformat->name, "mp4") || !strcmp(pFormatContext->oformat->name, "mov") ||
!strcmp(pFormatContext->oformat->name, "3gp"))
pCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
return pStream;
}
bool CSTMPEG::open_video(AVFormatContext *pFormatContext, AVStream *pVideoStream)
{
AVCodec *pCodec = NULL;
AVCodecContext *pCodecContext = NULL;
pCodecContext = pVideoStream->codec;
// find the video encoder
pCodec = avcodec_find_encoder(pCodecContext->codec_id);
if (NULL == pCodec)
return false;
// open the codec
if (avcodec_open(pCodecContext, pCodec) < 0) return false;
m_pYUVFrame = alloc_picture(pCodecContext->pix_fmt, pCodecContext->width, pCodecContext->height);
if (!m_pYUVFrame) return false;
m_pRGBFrame = alloc_picture(PIX_FMT_BGR24, pCodecContext->width, pCodecContext->height);
if (!m_pRGBFrame) return false;
m_pOutBuf = (uint8_t*)malloc(m_nOutBufSize);
if (!m_pOutBuf) return false;
return true;
}
int CSTMPEG::AddFrame(int width, int height, int bpp, uint8_t* pRGBBuffer)
{
srcWidth = width;
srcHeight = height;
RGBBuffer2RGBFrame(m_pRGBFrame, width, height, bpp, pRGBBuffer);
if ( false == write_video_frame(m_pFormatContext, m_pVideoStream) ) {
return -1;
}
m_nFrame++;
return 0;
}
bool CSTMPEG::write_video_frame(AVFormatContext *pFormatcontext, AVStream *pVideoStream)
{
int ret;
AVCodecContext *pCodecContext = pVideoStream->codec;
SwsContext *ctx=NULL;
if(ctx==NULL)
{
ctx = sws_getContext(pCodecContext->width,
pCodecContext->height,
PIX_FMT_BGR24,
pCodecContext->width,
pCodecContext->height,
pCodecContext->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
}
if(ctx == NULL)
{
printf("Cannot get resampling context\n");
return false;
}
sws_scale(ctx,m_pRGBFrame->data,m_pRGBFrame->linesize,0,pCodecContext->height,m_pYUVFrame->data,m_pYUVFrame->linesize); //原文中该处使用的函数为img_convert,在新版本的ffmpeg库中该函数已经被舍弃。
int out_size = avcodec_encode_video(pCodecContext, m_pOutBuf, m_nOutBufSize, m_pYUVFrame);
if (out_size > 0){
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts = av_rescale_q(pCodecContext->coded_frame->pts, pCodecContext->time_base,
pVideoStream->time_base);
if( pCodecContext->coded_frame->key_frame )
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index = pVideoStream->index;
pkt.data= m_pOutBuf;
pkt.size= out_size;
ret = av_write_frame(pFormatcontext, &pkt);
}else{
ret = 0;
}
if (ret != 0) return false;
return true;
}
void CSTMPEG::close_video(AVFormatContext *pFormatcontext, AVStream *pVideoStream)
{
avcodec_close(pVideoStream->codec);
if (m_pYUVFrame) {
free(m_pYUVFrame->data[0]);
av_free(m_pYUVFrame);
}
if (m_pRGBFrame) {
free(m_pRGBFrame->data[0]);
av_free(m_pRGBFrame);
}
free(m_pOutBuf);
}
void CSTMPEG::CloseMPEG()
{
//关闭编码器,释放帧占用的内存;
close_video(m_pFormatContext, m_pVideoStream);
av_write_trailer(m_pFormatContext);
//释放编码器和流缓冲区。
for(int i = 0; i <(int)m_pFormatContext->nb_streams; i++) {
av_freep(&m_pFormatContext->streams[i]->codec);
av_freep(&m_pFormatContext->streams[i]);
}
if ( !(m_pOutputFormat->flags & AVFMT_NOFILE) )
url_fclose( m_pFormatContext->pb );
//释放文件。
av_free(m_pFormatContext);
}
使用实例:
源材料由ffmpeg解码视频所得。
#include "stmpeg.h"
int main (void)
{
unsigned int i = 0, videoStream = -1;
AVCodecContext *pCodecCtx;
AVFormatContext *pFormatCtx;
AVCodec *pCodec;
AVFrame *pFrame, *pFrameRGB;
struct SwsContext *pSwsCtx;
const char *filename = "D:/My Documents/Visual Studio 2008/Projects/WriteVideo/Debug/DELTA.MPG";
AVPacket packet;
int frameFinished;
int PictureSize;
uint8_t *buf;
av_register_all();
if ( av_open_input_file(&pFormatCtx, filename, NULL, 0, NULL) != 0 )
{
printf ("av open input file failed!\n");
exit (1);
}
if ( av_find_stream_info(pFormatCtx) < 0 )
{
printf ("av find stream info failed!\n");
exit (1);
}
for ( i=0; i<pFormatCtx->nb_streams; i++ )
if ( pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO )
{
videoStream = i;
break;
}
if (videoStream == -1)
{
printf ("find video stream failed!\n");
exit (1);
}
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
pCodec = avcodec_find_decoder (pCodecCtx->codec_id);
if (pCodec == NULL)
{
printf ("avcode find decoder failed!\n");
exit (1);
}
if ( avcodec_open(pCodecCtx, pCodec)<0 )
{
printf ("avcode open failed!\n");
exit (1);
}
pFrame = avcodec_alloc_frame();
pFrameRGB = avcodec_alloc_frame();
if ( (pFrame==NULL)||(pFrameRGB==NULL) )
{
printf("avcodec alloc frame failed!\n");
exit (1);
}
PictureSize = avpicture_get_size (PIX_FMT_BGR24,
pCodecCtx->width, pCodecCtx->height);
buf = (uint8_t*)av_malloc(PictureSize);
if ( buf == NULL )
{
printf( "av malloc failed!\n");
exit(1);
}
avpicture_fill ( (AVPicture *)pFrameRGB, buf, PIX_FMT_BGR24,
pCodecCtx->width, pCodecCtx->height);
pSwsCtx = sws_getContext (pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
PIX_FMT_BGR24,
SWS_BICUBIC,
NULL, NULL, NULL);
CSTMPEG *stmpeg = new CSTMPEG();
stmpeg->SetMPEGFormat(CUSTOM_MPEG2);
stmpeg->SetMPEGWidth(pCodecCtx->width);
stmpeg->SetMPEGHeight(pCodecCtx->height);
stmpeg->SetFrameRate(25);
//stmpeg->SetMPEGVideoBitRate(800);
const char *dirfile="D:/My Documents/Visual Studio 2008/Projects/WriteVideo/Debug/DELT.MPG";;
bool bRet = stmpeg->OpenMPEG(dirfile);
i = 0;
while(av_read_frame(pFormatCtx, &packet) >= 0)
{
if(packet.stream_index==videoStream)
{
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
if(frameFinished)
{
//反转图像
//*(pFrame->data[0]) = pCodecCtx->width * (pCodecCtx->height-1);
//pFrame ->linesize[0] = -(pCodecCtx->height);
pFrame->data[0] += pFrame->linesize[0] * (pCodecCtx->height - 1);
pFrame->linesize[0] *= -1;
pFrame->data[1] += pFrame->linesize[1] * (pCodecCtx->height / 2 - 1);
pFrame->linesize[1] *= -1;
pFrame->data[2] += pFrame->linesize[2] * (pCodecCtx->height / 2 - 1);
pFrame->linesize[2] *= -1;
sws_scale (pSwsCtx, pFrame->data, pFrame->linesize, 0,
pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
stmpeg->AddFrame(pCodecCtx->width,pCodecCtx->height,24,buf);
}
}
av_free_packet(&packet);
}
sws_freeContext (pSwsCtx);
av_free (pFrame);
av_free (pFrameRGB);
avcodec_close (pCodecCtx);
av_close_input_file (pFormatCtx);
return 0;
}