以前的一个android工程, 把普通视频解码成yuv同时编成mjpeg, 把音频解码成pcm并调整参数。
av_register_all();
if(avformat_open_input(&pFormatCtx, filename, NULL, NULL) != 0 )
{
LOGE("ERROR:avformat_open_input file: %s", filename);
return -1;
}
m2mjpeg_adpcm->pFormatCtx = pFormatCtx;
if(avformat_find_stream_info(m2mjpeg_adpcm->pFormatCtx, NULL)<0){
LOGE(" Couldn't find stream information");
return -1;
}
// av_dump_format(m2mjpeg_adpcm->pFormatCtx, 0, filename, 0);
videoStream = -1;
audioStream = -1;
int found = 0;
for(i=0; i<pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
pStream[MJA_VIDEO] = pFormatCtx->streams[i];
}else if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
audioStream = i;
}
}
if(videoStream==-1){
LOGE("Didn't find a video stream");
return -1;
}
if(audioStream==-1){
LOGE("Didn't find a audio stream");
return -1;
}
m2mjpeg_adpcm->vdid = videoStream;
m2mjpeg_adpcm->auid = audioStream;
pCodecCtx[MJA_VIDEO]=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec[MJA_VIDEO]=avcodec_find_decoder(
pCodecCtx[MJA_VIDEO]->codec_id);
if(pCodec[MJA_VIDEO]==NULL) {
LOGE("Unsupported codec!\n");
return -1; // Codec not found
}
// Open VIdeo codec
if(avcodec_open2(pCodecCtx[MJA_VIDEO],
pCodec[MJA_VIDEO], &optionsDict)<0){
LOGE("Could not open codec");
return -1;
}
m2mjpeg_adpcm->pCodecCtx[MJA_VIDEO] = pCodecCtx[MJA_VIDEO];
// Allocate video frame
m2mjpeg_adpcm->pDecodeFrame = av_frame_alloc();
if (m2mjpeg_adpcm->pDecodeFrame == NULL){
LOGE("Error Allocate an AVFrame structure");
return -1;
}
//初始化缩放结构
m2mjpeg_adpcm->pSwsFrame = av_frame_alloc();
if(m2mjpeg_adpcm->pSwsFrame == NULL){
LOGE("Error Allocate an AVFrame structure");
return -1;
}
LOGI("AVFrame Format pit %dx%d", width, height);
//设置分辨率
m2mjpeg_adpcm->width = width;
m2mjpeg_adpcm->height = height;
// Determine required buffer size and allocate buffer
int numBytes=avpicture_get_size(AV_PIX_FMT_YUV420P,
pCodecCtx[MJA_VIDEO]->width,
pCodecCtx[MJA_VIDEO]->height);
uint8_t *buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
//分辨率调整
m2mjpeg_adpcm->sws_ctx =
sws_getContext
(
pCodecCtx[MJA_VIDEO]->width,
pCodecCtx[MJA_VIDEO]->height,
pCodecCtx[MJA_VIDEO]->pix_fmt,
width,
height,
AV_PIX_FMT_YUVJ420P,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
// Assign appropriate parts of buffer to image planes in AVPicture
avpicture_fill((AVPicture *)m2mjpeg_adpcm->pSwsFrame,
buffer, AV_PIX_FMT_YUVJ420P,
width, height);
//音频解码器
pStream[MJA_AUDIO] = pFormatCtx->streams[audioStream];
m2mjpeg_adpcm->pCodecCtx[MJA_AUDIO] = pCodecCtx[MJA_AUDIO] = pStream[MJA_AUDIO]->codec;
pCodec[MJA_AUDIO] = avcodec_find_decoder(
m2mjpeg_adpcm->pCodecCtx[MJA_AUDIO]->codec_id) ;
if(pCodec[MJA_AUDIO] != NULL){
if(avcodec_open2(m2mjpeg_adpcm->pCodecCtx[MJA_AUDIO],
pCodec[MJA_AUDIO], NULL)<0){
LOGE("Could not open codec");
return -1;
}
}else{
LOGE("NO AUDIO DATA");
return -1;
}
//获取MJPEG编码器
AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
if (codec == NULL){
LOGE("ERROR:EnCoder Codec not found");
return -1;
}
m2mjpeg_adpcm->pEnCodec = codec;
//申请Context
AVCodecContext *ctx = avcodec_alloc_context3(m2mjpeg_adpcm->pEnCodec);
if (!ctx){
LOGE("ERROR: alloc encode context failed");
return -1;
}
ctx->bit_rate = bitrate;
ctx->width = width;
ctx->height = height;
ctx->time_base = (AVRational ){1, 16};
ctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
m2mjpeg_adpcm->pSwsFrame->format = ctx->pix_fmt;
m2mjpeg_adpcm->pSwsFrame->width = width;
m2mjpeg_adpcm->pSwsFrame->height = height;
//打开编码器
if (avcodec_open2(ctx, m2mjpeg_adpcm->pEnCodec, NULL) < 0) {
LOGE("ERROR: Could not open codec");
return -1;
}
m2mjpeg_adpcm->pEnCodecCtx = ctx;
int ret = av_image_alloc(m2mjpeg_adpcm->pSwsFrame->data,
m2mjpeg_adpcm->pSwsFrame->linesize,
ctx->width, ctx->height,ctx->pix_fmt, 32);
if (ret < 0){
LOGE("ERROR:Could not Alloc Image");
return -1;
}
//设置音频参数
m2mjpeg_adpcm->samplerate = 32000;
m2mjpeg_adpcm->sample_fmt = AV_SAMPLE_FMT_S16;
m2mjpeg_adpcm->ch_layout = AV_CH_LAYOUT_STEREO;
struct SwrContext *swr_ctx = swr_alloc();
if (!swr_ctx){
LOGE("ERROR:Cound alloc audio swr\n");
return -1;
}
av_opt_set_int(swr_ctx, "in_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt",c->sample_fmt, 0);
av_opt_set_int(swr_ctx, "out_channel_layout", layout, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", sample_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", sample_fmt, 0);
av_opt_set_int(swr_ctx, "in_channel_layout",
av_get_default_channel_layout(c->channels), 0);
if (swr_init(swr_ctx) < 0){
LOGE("ERROR:Count not init audio swr\n");
swr_free(swr_ctx);
return -1;
}
*swr = swr_ctx;
*nb_samples = av_rescale_rnd(c->frame_size, sample_rate, c->sample_rate, AV_ROUND_UP);
*nb_channels = av_get_channel_layout_nb_channels(layout);
LOGI("nb_samples:%d, nb_channels:%d", *nb_samples, *nb_channels);
if(av_samples_alloc_array_and_samples(&m2mjpeg_adpcm->abuf, &m2mjpeg_adpcm->linesize,
m2mjpeg_adpcm->nb_channels,m2mjpeg_adpcm->nb_samples,
m2mjpeg_adpcm->sample_fmt, 0) < 0 ){
LOGE("Could not allocate destination samples\n");
return -1;
}
LOGI("dst_linesize:%d, dst_nb_channels:%d, dst_nb_samples:%d\n",
m2mjpeg_adpcm->linesize, m2mjpeg_adpcm->nb_channels, m2mjpeg_adpcm->nb_samples);
double vfs = pMja->fps*pMja->duration/1000000;
double vfi = 1000000/pMja->fps;
double oldts = 1000000/pMja->src_fps;
double newts = vfi;
uint32_t oldcount = 0;
uint32_t newcount = 0;
mja_file_header_t file_header = {
.audio_sample_rate = pMja->samplerate,
.video_frames = (uint32_t)vfs,
.video_frame_interval = (uint32_t)vfi,
.crc_32 = 0,
};
file_header.crc_32 = crc32(&file_header, sizeof(file_header)-sizeof(uint32_t));
#if MJA_DEBUG
//dump_file_header(&file_header);
#endif
queue_push_data(queue, &file_header, sizeof(file_header));
pthread_mutex_lock(&pMja->mutex);
pMja->ctl = MJA_START;
pthread_mutex_unlock(&pMja->mutex);
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx[MJA_VIDEO],
pFDec, &frameFinished, &packet);
// Save the frame to disk
if (frameFinished)
{
double pts = av_frame_get_best_effort_timestamp(pFDec);
pts = av_rescale_q ( pts, *(pMja->time_base), AV_TIME_BASE_Q );
int percent = (int)(pts/pMja->duration *100);
//LOGI("pts: %f/%" PRId64 " = %d ", pts, pMja->duration , percent);
}
sws_scale(sws_ctx,
(uint8_t const * const *)pFDec->data,
pFDec->linesize,
0,
pCodecCtx[MJA_VIDEO]->height,
pFSws->data,
pFSws->linesize);
ret = avcodec_encode_video2(pMja->pEnCodecCtx, &pkt,
pFSws, &got_encode_frame);
ret = avcodec_decode_audio4(pCodecCtx[MJA_AUDIO],
pFDec, &frameFinished, &packet);
if (ret < 0){
LOGE("ERROR:Decoding Audio Frame (%s)\n", av_err2str(ret));
break;
}
if (frameFinished){
ret = swr_convert(pMja->audio_swr_ctx, abuf, pMja->nb_samples,
(const uint8_t **)pFDec->data, pFDec->nb_samples);
if (ret < 0) {
LOGE( "Error while converting\n");
break;
}
abuf_size = av_samples_get_buffer_size(&pMja->linesize, pMja->nb_channels,
ret, pMja->sample_fmt, 1);
if (abuf_size < 0) {
LOGE( "Could not get sample buffer size\n");
break;
}
}
gist