NDK(4-2)FFMPEG重要结构体源码注释

这几篇有点重复,不过想表达的不太一样,主要是为了自己理解。

AVPacket

只能分析源码了。索性都不太长,还算是比较容易
AVPacket:
比较重要的几个参数:
int size:data的大小
int64_t pts:显示时间戳
int64_t dts:解码时间戳
int stream_index:标识该AVPacket所属的视频/音频流

//用来存储压缩编码数据相关信息
//作为解码器的输入 或 编码器的输出
//当作为解码器的输入时,它由demuxer生成,然后传递给解码器 
 //当作为编码器的输出时,由编码器生成,然后传递给muxer 
//视频:最多包含一帧的内容,但是视频一帧的内容可能需要多个AVPacket,AVPacket < AVFrame 
//音频:包含多帧
//AVPacket 是ffmpeg中少数的几个公共ABI,它只能被libavcodec和libformat在栈上分配 

typedef struct AVPacket {
    /**
      *AVPacket 存储在“引用计数缓冲区”,,这个指针就指向一块引用计数缓冲区 
*可以为空,不算引用计数
     * A reference to the reference-counted buffer where the packet data is
     * stored.
     * May be NULL, then the packet data is not reference-counted.
     */
    AVBufferRef *buf;
    /**
    *显示时间戳 单位是 AVStream->time_base units 
     */
    int64_t pts;
   /**
   *解压时间戳,在这个时刻该包需要被解码 
     */ 
    int64_t dts;
    uint8_t *data;
    int   size;
//标识该AVPacket所属的视频/音频流。
    int   stream_index;
    /**
     * A combination of AV_PKT_FLAG values
     */
    int   flags;
    /**
     *存放额外的包信息 
     */
    AVPacketSideData *side_data;
    int side_data_elems;

    /**
   * 这个包的时间长度 in AVStream->time_base units, 设置0 表示未知.
     */
    int64_t duration;
 ///< 在数据流中的字节偏移量, -1 if unknown  
    int64_t pos;                            ///< byte position in stream, -1 if unknown

#if FF_API_CONVERGENCE_DURATION
    /**
     * @deprecated Same as the duration field, but as int64_t. This was required
     * for Matroska subtitles, whose duration values could overflow when the
     * duration field was still an int.
     */
    attribute_deprecated
    int64_t convergence_duration;
#endif
} AVPacket;

AVPicture

#define AV_NUM_DATA_POINTERS 8
typedef struct AVPicture {  
    uint8_t *data[AV_NUM_DATA_POINTERS];    ///< pointers to the image data planes  
    int linesize[AV_NUM_DATA_POINTERS];     ///< number of bytes per line  
}  

对应AVPicture里面有data[8]和linesize[8]其中data是一个指向指针的指针(二级、二维指针),也就是指向视频数据缓冲区的首地址,而data[0]~data[7]是一级指针,当pix_fmt=PIX_FMT_YUV420P时,data中的数据是按照YUV的格式存储的,也就是:
data -->YYYYYYYYYYYYYYUUUUUUUUUUUUUVVVVVVVVVVVV
--------- ^ ------------------- ^ ------------------------- ^
--------- | -------------------- | -------------------------- |
------- data[0] ------------ data[1] ------------------- data[2]

linesize是指对应于每一行的大小,为什么需要这个变量,是因为在 YUV格式和RGB格式时,每行的大小不一定等于图像的宽度,对于RGB格式输出时,只有一个通道(bgrbgrbgr......)可用,即 linesize[0],和data[0],so RGB24 : data[0] = packet rgb//bgrbgrbgr......

linesize[0] = width*3
其他的如data[1][2][3]与linesize[1][2][3]无任何意义.

而对于YUV格式输出时,有三个通道可用,即data[0][1][2],与linesize[0][1][2],而yuv格式对于运动估计时,需要填充padding(right, bottom),故:

linesize=width+padding size(16+16).

AVFrame

AVFrame结构体一般用于存储原始数据(即非压缩数据,例如对视频来说是YUV,RGB,对音频来说是PCM),此外还包含了一些相关的信息。比如说,解码的时候存储了宏块类型表,QP表,运动矢量表等数据。编码的时候也存储了相关的数据。因此在使用FFMPEG进行码流分析的时候,AVFrame是一个很重要的结构体。

uint8_t *data[AV_NUM_DATA_POINTERS]:解码后原始数据(对视频来说是YUV,RGB,对音频来说是PCM)
int linesize[AV_NUM_DATA_POINTERS]:data中“一行”数据的大小。注意:未必等于图像的宽,一般大于图像的宽。
int width, height:视频帧宽和高(1920x1080,1280x720...)
int nb_samples:音频的一个AVFrame中可能包含多个音频帧,在此标记包含了几个
int format:解码后原始数据类型(YUV420,YUV422,RGB24...)
int key_frame:是否是关键帧
enum AVPictureType pict_type:帧类型(I,B,P...)
AVRational sample_aspect_ratio:宽高比(16:9,4:3...)
int64_t pts:显示时间戳
int coded_picture_number:编码帧序号
int display_picture_number:显示帧序号
int8_t *qscale_table:QP表
uint8_t mbskip_table:跳过宏块表
int16_t (
motion_val[2])[2]:运动矢量表
uint32_t *mb_type:宏块类型表
short*dct_coeff:DCT系数,这个没有提取过
int8_t *ref_index[2]:运动估计参考帧列表(貌似H.264这种比较新的标准才会涉及到多参考帧)
int interlaced_frame:是否是隔行扫描
uint8_t motion_subsample_log2:一个宏块中的运动矢量采样个数,取log的


/**
 * AVFrame表示解码过后的一个数据帧
 *
 * AVFrame 通过使用 av_frame_alloc()来创建. 这个函数只是创建了AVFrame结构本身,在结构
 * 中定义的指向其他内存块的缓冲区指针要用其他方法来分配
 * 使用 av_frame_free()来释放AVFrame.
 *
 */
typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8
    /**
*指向图片/通道 平面 的指针
     * pointer to the picture/channel planes.
     */
    uint8_t *data[AV_NUM_DATA_POINTERS];

    /**
     * For video, size in bytes of each picture line.
     * For audio, size in bytes of each plane.
     */
    int linesize[AV_NUM_DATA_POINTERS];

    /**
     * pointers to the data planes/channels.
     */
    uint8_t **extended_data;

    /**
     * width and height of the video frame
     */
    int width, height;

    /**
     * 此帧描述的音频样本数(每个通道)
     */
    int nb_samples;

    /**
     * format of the frame, -1 if unknown or unset
     */
    int format;

    /**
     * 1 -> keyframe, 0-> not
     */
    int key_frame;

    /**
     * Picture type of the frame.
     */
    enum AVPictureType pict_type;

    /**
     * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
     */
    AVRational sample_aspect_ratio;

    /**
     * Presentation timestamp in time_base units (time when frame should be shown to user).
     */
    int64_t pts;

    /**
     * PTS copied from the AVPacket that was decoded to produce this frame.
     */
    int64_t pkt_pts;

    /**
     * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isnt used)
     * This is also the Presentation time of this AVFrame calculated from
     * only AVPacket.dts values without pts values.
     */
    int64_t pkt_dts;

    /**
     * picture number in bitstream order
     */
    int coded_picture_number;
    /**
     * picture number in display order
     */
    int display_picture_number;

    /**
     * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
     */
    int quality;

    /**
     * for some private data of the user
     */
    void *opaque;

    /**
     * error
     */
    uint64_t error[AV_NUM_DATA_POINTERS];

    /**
     * When decoding, this signals how much the picture must be delayed.
     * extra_delay = repeat_pict / (2*fps)
     */
    int repeat_pict;

    /**
     * The content of the picture is interlaced.
     */
    int interlaced_frame;

    /**
     * If the content is interlaced, is top field displayed first.
     */
    int top_field_first;

    /**
     * Tell user application that palette has changed from previous frame.
     */
    int palette_has_changed;

    /**
     * Sample rate of the audio data.
     */
    int sample_rate;

    /**
     * Channel layout of the audio data.
     */
    uint64_t channel_layout;

    /**
     * 指向这个帧要用到的AVBuffer中的数据缓冲区.
     *
     * 一般每个图像位面(就时data[0],data[1] data[2])只有一个指向AVBuffer的缓冲区, so for video this array
     * always contains all the references. For planar audio with more than
     * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
     * this array. Then the extra AVBufferRef pointers are stored in the
     * extended_buf array.
     */
    AVBufferRef *buf[AV_NUM_DATA_POINTERS];

    /**
     * For planar audio which requires more than AV_NUM_DATA_POINTERS
     * AVBufferRef pointers, this array will hold all the references which
     * cannot fit into AVFrame.buf.
     */
    AVBufferRef **extended_buf;
    /**
     * Number of elements in extended_buf.
     */
    int        nb_extended_buf;

    AVFrameSideData **side_data;
    int            nb_side_data;

/**
 * 可能因为解码错误,数据帧Frame会成为无效的帧,下面的结构就是当数据帧无效时使用的
 */
#define AV_FRAME_FLAG_CORRUPT       (1 << 0)

    /**
     * Frame flags, a combination of AV_FRAME_FLAG_*
     */
    int flags;

    
    int64_t best_effort_timestamp;

   
    int64_t pkt_pos;

    
    int64_t pkt_duration;

   
    AVDictionary *metadata;

    
    int decode_error_flags;
#define FF_DECODE_ERROR_INVALID_BITSTREAM   1
#define FF_DECODE_ERROR_MISSING_REFERENCE   2

   
    int channels;

    
    int pkt_size;

    
    enum AVColorSpace colorspace;

   
    enum AVColorRange color_range;


    /**
     * Not to be accessed directly from outside libavutil
     */
    AVBufferRef *qp_table_buf;
} AVFrame;

你可能感兴趣的:(NDK(4-2)FFMPEG重要结构体源码注释)