if(!inpf)
{
goto Decodereturn;
}
nalLen = 0;
Buf = (unsigned char*)calloc ( 1000000, sizeof(char)); //准备解码文件缓冲
//2.注册解码器,并且找到H264解码器
avcodec_init();
avcodec_register_all();
codec = avcodec_find_decoder(CODEC_ID_H264);
if (!codec) {
return 0;
}
//allocate codec context
//分配解码器内存
c = avcodec_alloc_context();
if(!c){
return 0;
}
//open codec
//3.打开解码器
if (avcodec_open(c, codec) < 0) {
return 0;
}
//allocate frame buffer
//分配解码器用的帧缓冲
picture = avcodec_alloc_frame();
if(!picture){
return 0;
}
rgbdatanew = (unsigned char *)malloc(sizeof(unsigned char)*(3 * width * height));
while(!feof(inpf))
{
//4.获取下一个NAL的长度,并且将NAL放入Buf
nalLen = getNextNal(inpf, Buf);
//5.对改NAL解码,解码的YUV数据存在picture中
consumed_bytes= avcodec_decode_video(c, picture, &got_picture, Buf, nalLen);
if(consumed_bytes > 0)
{
//6.将picture中的YUV数据显示或者保存到文件
p=0;
for(i=0; i<c->height; i++)
{
memcpy(DisplayBuf+p,picture->data[0] + i * picture->linesize[0], c->width);
p+=c->width;
}
for(i=0; i<c->height/2; i++)
{
memcpy(DisplayBuf+p,picture->data[1] + i * picture->linesize[1], c->width/2);
p+=c->width/2;
}
for(i=0; i<c->height/2; i++)
{
memcpy(DisplayBuf+p,picture->data[2] + i * picture->linesize[2], c->width/2);
p+=c->width/2;
}
//显示画面
DisplayVideo(DisplayBuf);
}
}
//7.关闭输入文件
if(inpf)
fclose(inpf);
Decodereturn:
//8.关闭解码器,释放解码器内存
if(c) {
avcodec_close(c);
av_free(c);
c = NULL;
}
//9.释放解码画面内存
if(picture) {
av_free(picture);
picture = NULL;
}
//10.释放解码文件缓冲
if(Buf)
{
free(Buf);
Buf = NULL;
}
free(DisplayBuf);
}
static int H264Decode() { FILE * inpf; int nalLen=0; unsigned char* Buf = NULL; int got_picture, consumed_bytes; inpf = fopen("c://test.mp4", "rb"); if(!inpf) { goto DecodeReturn; } Buf = (unsigned char*)calloc(100000, sizeof(char)); avcodec_init(); codec = avcodec_find_decoder(CODEC_ID_H264); if (!codec) { return 0; } c = avcodec_alloc_context(); if(!c) { return 0; } if (avcodec_open(c, codec) < 0) { return 0; } picture = avcodec_alloc_frame(); if(!picture) { return 0; } while(!feof(inpf)) { nalLen = getNextNal(inpf, Buf); consumed_bytes = decode_frame(c, picture, &got_picture, Buf, nalLen); if(consumed_bytes > 0) { VideoOutputBitmap->LockHeap(); unsigned short* VideoBuffer = (unsigned short*)VideoOutputBitmap->DataAddress(); DisplayYUV((unsigned int*)VideoBuffer, picture->data[0], picture->data[1], picture->data[2], c->width, c->height, picture->linesize[0], picture->linesize[1], 240); VideoOutputBitmap->UnlockHeap(); externViewPointer->DrawNow(); } } if(inpf) { fclose(inpf); } DecodeReturn: if(c) { avcodec_close(c); av_free(c); c = NULL; } if(picture) { av_free(picture); picture = NULL; } if(Buf) { av_free(Buf); Buf = NULL; } return 0; } int getNextNal(FILE* inpf, unsigned char* Buf) { int pos = 0; int StartCodeFound = 0; int info2 = 0; int info3 = 0; while(!feof(inpf) && (Buf[pos++]=fgetc(inpf))==0); while (!StartCodeFound) { if (feof (inpf)) { //return -1; return pos-1; } Buf[pos++] = fgetc (inpf); info3 = FindStartCode(&Buf[pos-4], 3); if(info3 != 1) { info2 = FindStartCode(&Buf[pos-3], 2); } StartCodeFound = (info2 == 1 || info3 == 1); } fseek (inpf, -4, SEEK_CUR); return pos - 4; } int decode_frame(AVCodecContext *avctx, void *data, int *data_size, const uint8_t *buf, int buf_size) { H264Context *h = avctx->priv_data; MpegEncContext *s = &h->s; AVFrame *pict = data; int buf_index; s->flags= avctx->flags; s->flags2= avctx->flags2; /* end of stream, output what is still in the buffers */ if (buf_size == 0) { Picture *out; int i, out_idx; //FIXME factorize this with the output code below out = h->delayed_pic[0]; out_idx = 0; for(i=1; h->delayed_pic && (h->delayed_pic->poc && !h->delayed_pic->key_frame); i++) if(h->delayed_pic->poc < out->poc){ out = h->delayed_pic; out_idx = i; } for(i=out_idx; h->delayed_pic; i++) h->delayed_pic = h->delayed_pic[i+1]; if(out){ *data_size = sizeof(AVFrame); *pict= *(AVFrame*)out; } return 0; } if(h->is_avc && !h->got_avcC) { int i, cnt, nalsize; unsigned char *p = avctx->extradata; if(avctx->extradata_size < 7) { av_log(avctx, AV_LOG_ERROR, "avcC too short/n"); return -1; } if(*p != 1) { av_log(avctx, AV_LOG_ERROR, "Unknown avcC version %d/n", *p); return -1; } /* sps and pps in the avcC always have length coded with 2 bytes, so put a fake nal_length_size = 2 while parsing them */ h->nal_length_size = 2; // Decode sps from avcC cnt = *(p+5) & 0x1f; // Number of sps p += 6; for (i = 0; i < cnt; i++) { nalsize = AV_RB16(p) + 2; if(decode_nal_units(h, p, nalsize) < 0) { av_log(avctx, AV_LOG_ERROR, "Decoding sps %d from avcC failed/n", i); return -1; } p += nalsize; } // Decode pps from avcC cnt = *(p++); // Number of pps for (i = 0; i < cnt; i++) { nalsize = AV_RB16(p) + 2; if(decode_nal_units(h, p, nalsize) != nalsize) { av_log(avctx, AV_LOG_ERROR, "Decoding pps %d from avcC failed/n", i); return -1; } p += nalsize; } // Now store right nal length size, that will be use to parse all other nals h->nal_length_size = ((*(((char*)(avctx->extradata))+4))&0x03)+1; // Do not reparse avcC h->got_avcC = 1; } if(!h->got_avcC && !h->is_avc && s->avctx->extradata_size){ if(decode_nal_units(h, s->avctx->extradata, s->avctx->extradata_size) < 0) return -1; h->got_avcC = 1; } buf_index=decode_nal_units(h, buf, buf_size); if(buf_index < 0) return -1; if(!(s->flags2 & CODEC_FLAG2_CHUNKS) && !s->current_picture_ptr){ if (avctx->skip_frame >= AVDISCARD_NONREF || s->hurry_up) return 0; av_log(avctx, AV_LOG_ERROR, "no frame!/n"); return -1; } if(!(s->flags2 & CODEC_FLAG2_CHUNKS) || (s->mb_y >= s->mb_height && s->mb_height)){ Picture *out = s->current_picture_ptr; Picture *cur = s->current_picture_ptr; int i, pics, cross_idr, out_of_order, out_idx; s->mb_y= 0; s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_H264; s->current_picture_ptr->pict_type= s->pict_type; if(!s->dropable) { execute_ref_pic_marking(h, h->mmco, h->mmco_index); h->prev_poc_msb= h->poc_msb; h->prev_poc_lsb= h->poc_lsb; } h->prev_frame_num_offset= h->frame_num_offset; h->prev_frame_num= h->frame_num; if (avctx->hwaccel) { if (avctx->hwaccel->end_frame(avctx) < 0) av_log(avctx, AV_LOG_ERROR, "hardware accelerator failed to decode picture/n"); } /* * FIXME: Error handling code does not seem to support interlaced * when slices span multiple rows * The ff_er_add_slice calls don't work right for bottom * fields; they cause massive erroneous error concealing * Error marking covers both fields (top and bottom). * This causes a mismatched s->error_count * and a bad error table. Further, the error count goes to * INT_MAX when called for bottom field, because mb_y is * past end by one (callers fault) and resync_mb_y != 0 * causes problems for the first MB line, too. */ if (!FIELD_PICTURE) ff_er_frame_end(s); MPV_frame_end(s); if (cur->field_poc[0]==INT_MAX || cur->field_poc[1]==INT_MAX) { /* Wait for second field. */ *data_size = 0; } else { cur->repeat_pict = 0; /* Signal interlacing information externally. */ /* Prioritize picture timing SEI information over used decoding process if it exists. */ if(h->sps.pic_struct_present_flag){ switch (h->sei_pic_struct) { case SEI_PIC_STRUCT_FRAME: cur->interlaced_frame = 0; break; case SEI_PIC_STRUCT_TOP_FIELD: case SEI_PIC_STRUCT_BOTTOM_FIELD: case SEI_PIC_STRUCT_TOP_BOTTOM: case SEI_PIC_STRUCT_BOTTOM_TOP: cur->interlaced_frame = 1; break; case SEI_PIC_STRUCT_TOP_BOTTOM_TOP: case SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM: // Signal the possibility of telecined film externally (pic_struct 5,6) // From these hints, let the applications decide if they apply deinterlacing. cur->repeat_pict = 1; cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE; break; case SEI_PIC_STRUCT_FRAME_DOUBLING: // Force progressive here, as doubling interlaced frame is a bad idea. cur->interlaced_frame = 0; cur->repeat_pict = 2; break; case SEI_PIC_STRUCT_FRAME_TRIPLING: cur->interlaced_frame = 0; cur->repeat_pict = 4; break; } }else{ /* Derive interlacing flag from used decoding process. */ cur->interlaced_frame = FIELD_OR_MBAFF_PICTURE; } if (cur->field_poc[0] != cur->field_poc[1]){ /* Derive top_field_first from field pocs. */ cur->top_field_first = cur->field_poc[0] < cur->field_poc[1]; }else{ if(cur->interlaced_frame || h->sps.pic_struct_present_flag){ /* Use picture timing SEI information. Even if it is a information of a past frame, better than nothing. */ if(h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM || h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM_TOP) cur->top_field_first = 1; else cur->top_field_first = 0; }else{ /* Most likely progressive */ cur->top_field_first = 0; } } //FIXME do something with unavailable reference frames /* Sort B-frames into display order */ if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames < h->sps.num_reorder_frames){ s->avctx->has_b_frames = h->sps.num_reorder_frames; s->low_delay = 0; } if( s->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT && !h->sps.bitstream_restriction_flag){ s->avctx->has_b_frames= MAX_DELAYED_PIC_COUNT; s->low_delay= 0; } pics = 0; while(h->delayed_pic[pics]) pics++; assert(pics <= MAX_DELAYED_PIC_COUNT); h->delayed_pic[pics++] = cur; if(cur->reference == 0) cur->reference = DELAYED_PIC_REF; out = h->delayed_pic[0]; out_idx = 0; for(i=1; h->delayed_pic && (h->delayed_pic->poc && !h->delayed_pic->key_frame); i++) if(h->delayed_pic->poc < out->poc){ out = h->delayed_pic; out_idx = i; } cross_idr = !h->delayed_pic[0]->poc || !!h->delayed_pic || h->delayed_pic[0]->key_frame; out_of_order = !cross_idr && out->poc < h->outputed_poc; if(h->sps.bitstream_restriction_flag && s->avctx->has_b_frames >= h->sps.num_reorder_frames) { } else if((out_of_order && pics-1 == s->avctx->has_b_frames && s->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) || (s->low_delay && ((!cross_idr && out->poc > h->outputed_poc + 2) || cur->pict_type == FF_B_TYPE))) { s->low_delay = 0; s->avctx->has_b_frames++; } if(out_of_order || pics > s->avctx->has_b_frames){ out->reference &= ~DELAYED_PIC_REF; for(i=out_idx; h->delayed_pic; i++) h->delayed_pic = h->delayed_pic[i+1]; } if(!out_of_order && pics > s->avctx->has_b_frames){ *data_size = sizeof(AVFrame); h->outputed_poc = out->poc; *pict= *(AVFrame*)out; }else{ av_log(avctx, AV_LOG_DEBUG, "no picture/n"); } } } assert(pict->data[0] || !*data_size); ff_print_debug_info(s, pict); //printf("out %d/n", (int)pict->data[0]); #if 0 //? /* Return the Picture timestamp as the frame number */ /* we subtract 1 because it is added on utils.c */ avctx->frame_number = s->picture_number - 1; #endif return get_consumed_bytes(s, buf_index, buf_size); }