AVpacket AVframe

来自http://blog.csdn.net/leixiaohua1020/article/details/14214577的博客


AVFrame:( This structure describes decoded (raw) audio or video data. AVFrame must be allocated using av_frame_alloc(). Note that this only allocates the AVFrame itself, the buffers for the data must be managed  through other means (see below).  AVFrame must be freed with av_frame_free().)

typedef struct AVFrame {
#define AV_NUM_DATA_POINTERS 8
    /**
     * pointer to the picture/channel planes.
     * This might be different from the first allocated byte
     *
     * Some decoders access areas outside 0,0 - width,height, please
     * see avcodec_align_dimensions2(). Some filters and swscale can read
     * up to 16 bytes beyond the planes, if these filters are to be used,
     * then 16 extra bytes must be allocated.
     */
    uint8_t *data[AV_NUM_DATA_POINTERS];

    /**
     * For video, size in bytes of each picture line.
     * For audio, size in bytes of each plane.
     *
     * For audio, only linesize[0] may be set. For planar audio, each channel
     * plane must be the same size.
     *
     * For video the linesizes should be multiplies of the CPUs alignment
     * preference, this is 16 or 32 for modern desktop CPUs.
     * Some code requires such alignment other code can be slower without
     * correct alignment, for yet other it makes no difference.
     *
     * @note The linesize may be larger than the size of usable data -- there
     * may be extra padding present for performance reasons.
     */
    int linesize[AV_NUM_DATA_POINTERS];

    /**
     * pointers to the data planes/channels.
     *
     * For video, this should simply point to data[].
     *
     * For planar audio, each channel has a separate data pointer, and
     * linesize[0] contains the size of each channel buffer.
     * For packed audio, there is just one data pointer, and linesize[0]
     * contains the total size of the buffer for all channels.
     *
     * Note: Both data and extended_data should always be set in a valid frame,
     * but for planar audio with more channels that can fit in data,
     * extended_data must be used in order to access all channels.
     */
    uint8_t **extended_data;

    /**
     * width and height of the video frame
     */
    int width, height;

    /**
     * number of audio samples (per channel) described by this frame
     */
    int nb_samples;

    /**
     * format of the frame, -1 if unknown or unset
     * Values correspond to enum AVPixelFormat for video frames,
     * enum AVSampleFormat for audio)
     */
    int format;

    /**
     * 1 -> keyframe, 0-> not
     */
    int key_frame;

    /**
     * Picture type of the frame.
     */
    enum AVPictureType pict_type;

#if FF_API_AVFRAME_LAVC
    attribute_deprecated
    uint8_t *base[AV_NUM_DATA_POINTERS];
#endif

    /**
     * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
     */
    AVRational sample_aspect_ratio;

    /**
     * Presentation timestamp in time_base units (time when frame should be shown to user).
     */
    int64_t pts;

    /**
     * PTS copied from the AVPacket that was decoded to produce this frame.
     */
    int64_t pkt_pts;

    /**
     * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
     * This is also the Presentation time of this AVFrame calculated from
     * only AVPacket.dts values without pts values.
     */
    int64_t pkt_dts;

    /**
     * picture number in bitstream order
     */
    int coded_picture_number;
    /**
     * picture number in display order
     */
    int display_picture_number;

    /**
     * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
     */
    int quality;

#if FF_API_AVFRAME_LAVC
    attribute_deprecated
    int reference;

    /**
     * QP table
     */
    attribute_deprecated
    int8_t *qscale_table;
    /**
     * QP store stride
     */
    attribute_deprecated
    int qstride;

    attribute_deprecated
    int qscale_type;

    /**
     * mbskip_table[mb]>=1 if MB didn't change
     * stride= mb_width = (width+15)>>4
     */
    attribute_deprecated
    uint8_t *mbskip_table;

    /**
     * motion vector table
     * @code
     * example:
     * int mv_sample_log2= 4 - motion_subsample_log2;
     * int mb_width= (width+15)>>4;
     * int mv_stride= (mb_width << mv_sample_log2) + 1;
     * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
     * @endcode
     */
    int16_t (*motion_val[2])[2];

    /**
     * macroblock type table
     * mb_type_base + mb_width + 2
     */
    attribute_deprecated
    uint32_t *mb_type;

    /**
     * DCT coefficients
     */
    attribute_deprecated
    short *dct_coeff;

    /**
     * motion reference frame index
     * the order in which these are stored can depend on the codec.
     */
    attribute_deprecated
    int8_t *ref_index[2];
#endif

    /**
     * for some private data of the user
     */
    void *opaque;

    /**
     * error
     */
    uint64_t error[AV_NUM_DATA_POINTERS];

#if FF_API_AVFRAME_LAVC
    attribute_deprecated
    int type;
#endif

    /**
     * When decoding, this signals how much the picture must be delayed.
     * extra_delay = repeat_pict / (2*fps)
     */
    int repeat_pict;

    /**
     * The content of the picture is interlaced.
     */
    int interlaced_frame;

    /**
     * If the content is interlaced, is top field displayed first.
     */
    int top_field_first;

    /**
     * Tell user application that palette has changed from previous frame.
     */
    int palette_has_changed;

#if FF_API_AVFRAME_LAVC
    attribute_deprecated
    int buffer_hints;

    /**
     * Pan scan.
     */
    attribute_deprecated
    struct AVPanScan *pan_scan;
#endif

    /**
     * reordered opaque 64bit (generally an integer or a double precision float
     * PTS but can be anything).
     * The user sets AVCodecContext.reordered_opaque to represent the input at
     * that time,
     * the decoder reorders values as needed and sets AVFrame.reordered_opaque
     * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
     * @deprecated in favor of pkt_pts
     */
    int64_t reordered_opaque;

#if FF_API_AVFRAME_LAVC
    /**
     * @deprecated this field is unused
     */
    attribute_deprecated void *hwaccel_picture_private;

    attribute_deprecated
    struct AVCodecContext *owner;
    attribute_deprecated
    void *thread_opaque;

    /**
     * log2 of the size of the block which a single vector in motion_val represents:
     * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
     */
    uint8_t motion_subsample_log2;
#endif

    /**
     * Sample rate of the audio data.
     */
    int sample_rate;

    /**
     * Channel layout of the audio data.
     */
    uint64_t channel_layout;

    /**
     * AVBuffer references backing the data for this frame. If all elements of
     * this array are NULL, then this frame is not reference counted.
     *
     * There may be at most one AVBuffer per data plane, so for video this array
     * always contains all the references. For planar audio with more than
     * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
     * this array. Then the extra AVBufferRef pointers are stored in the
     * extended_buf array.
     */
    AVBufferRef *buf[AV_NUM_DATA_POINTERS];

    /**
     * For planar audio which requires more than AV_NUM_DATA_POINTERS
     * AVBufferRef pointers, this array will hold all the references which
     * cannot fit into AVFrame.buf.
     *
     * Note that this is different from AVFrame.extended_data, which always
     * contains all the pointers. This array only contains the extra pointers,
     * which cannot fit into AVFrame.buf.
     *
     * This array is always allocated using av_malloc() by whoever constructs
     * the frame. It is freed in av_frame_unref().
     */
    AVBufferRef **extended_buf;
    /**
     * Number of elements in extended_buf.
     */
    int        nb_extended_buf;

    AVFrameSideData **side_data;
    int            nb_side_data;

/**
 * @defgroup lavu_frame_flags AV_FRAME_FLAGS
 * Flags describing additional frame properties.
 *
 * @{
 */

/**
 * The frame data may be corrupted, e.g. due to decoding errors.
 */
#define AV_FRAME_FLAG_CORRUPT       (1 << 0)
/**
 * @}
 */

    /**
     * Frame flags, a combination of @ref lavu_frame_flags
     */
    int flags;

#if FF_API_AVFRAME_COLORSPACE
    /**
     * MPEG vs JPEG YUV range.
     * It must be accessed using av_frame_get_color_range() and
     * av_frame_set_color_range().
     * - encoding: Set by user
     * - decoding: Set by libavcodec
     */
    enum AVColorRange color_range;

    enum AVColorPrimaries color_primaries;

    enum AVColorTransferCharacteristic color_trc;

    /**
     * YUV colorspace type.
     * It must be accessed using av_frame_get_colorspace() and
     * av_frame_set_colorspace().
     * - encoding: Set by user
     * - decoding: Set by libavcodec
     */
    enum AVColorSpace colorspace;

    enum AVChromaLocation chroma_location;
#endif

    /**
     * frame timestamp estimated using various heuristics, in stream time base
     * Code outside libavcodec should access this field using:
     * av_frame_get_best_effort_timestamp(frame)
     * - encoding: unused
     * - decoding: set by libavcodec, read by user.
     */
    int64_t best_effort_timestamp;

    /**
     * reordered pos from the last AVPacket that has been input into the decoder
     * Code outside libavcodec should access this field using:
     * av_frame_get_pkt_pos(frame)
     * - encoding: unused
     * - decoding: Read by user.
     */
    int64_t pkt_pos;

    /**
     * duration of the corresponding packet, expressed in
     * AVStream->time_base units, 0 if unknown.
     * Code outside libavcodec should access this field using:
     * av_frame_get_pkt_duration(frame)
     * - encoding: unused
     * - decoding: Read by user.
     */
    int64_t pkt_duration;

    /**
     * metadata.
     * Code outside libavcodec should access this field using:
     * av_frame_get_metadata(frame)
     * - encoding: Set by user.
     * - decoding: Set by libavcodec.
     */
    AVDictionary *metadata;

    /**
     * decode error flags of the frame, set to a combination of
     * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
     * were errors during the decoding.
     * Code outside libavcodec should access this field using:
     * av_frame_get_decode_error_flags(frame)
     * - encoding: unused
     * - decoding: set by libavcodec, read by user.
     */
    int decode_error_flags;
#define FF_DECODE_ERROR_INVALID_BITSTREAM   1
#define FF_DECODE_ERROR_MISSING_REFERENCE   2

    /**
     * number of audio channels, only used for audio.
     * Code outside libavcodec should access this field using:
     * av_frame_get_channels(frame)
     * - encoding: unused
     * - decoding: Read by user.
     */
    int channels;

    /**
     * size of the corresponding packet containing the compressed
     * frame. It must be accessed using av_frame_get_pkt_size() and
     * av_frame_set_pkt_size().
     * It is set to a negative value if unknown.
     * - encoding: unused
     * - decoding: set by libavcodec, read by user.
     */
    int pkt_size;

    /**
     * Not to be accessed directly from outside libavutil
     */
    AVBufferRef *qp_table_buf;
} AVFrame;

AVFrame 是用来描述 解码后的数据;即存储原始数据;(非压缩数据),对视频来说是YUV RGB,对音频来说是PCM;此外还包括一些相关的信息;,解码的时候存储了宏块类型表,QP表,运动矢量表等数据。编码的时候也存储了相关的数据。因此在使用FFMPEG进行码流分析的时候,

uint8_t *data[AV_NUM_DATA_POINTERS]:解码后原始数据(对视频来说是YUV,RGB,对音频来说是PCM)

int linesize[AV_NUM_DATA_POINTERS]:data中“一行”数据的大小。注意:未必等于图像的宽,一般大于图像的宽。

nt width, height:视频帧宽和高(1920x1080,1280x720...)

int nb_samples:音频的一个AVFrame中可能包含多个音频帧,在此标记包含了几个

int format:解码后原始数据类型(YUV420,YUV422,RGB24...)

int key_frame:是否是关键帧

enum AVPictureType pict_type:帧类型(I,B,P...)

AVRational sample_aspect_ratio:宽高比(16:9,4:3...)

int64_t pts:显示时间戳

int coded_picture_number:编码帧序号

int display_picture_number:显示帧序号

int8_t *qscale_table:QP表

uint8_t *mbskip_table:跳过宏块表

int16_t (*motion_val[2])[2]:运动矢量表

uint32_t *mb_type:宏块类型表

short *dct_coeff:DCT系数,这个没有提取过

int8_t *ref_index[2]:运动估计参考帧列表(貌似H.264这种比较新的标准才会涉及到多参考帧)

int interlaced_frame:是否是隔行扫描

uint8_t motion_subsample_log2:一个宏块中的运动矢量采样个数,取log的

1.data[]
对于packed格式的数据(例如RGB24),会存到data[0]里面。
对于planar格式的数据(例如YUV420P),则会分开成data[0],data[1],data[2]...(YUV420P中data[0]存Y,data[1]存U,data[2]存V)

2.pict_type

<strong>enum AVPictureType {
    AV_PICTURE_TYPE_NONE = 0, ///< Undefined
    AV_PICTURE_TYPE_I,     ///< Intra
    AV_PICTURE_TYPE_P,     ///< Predicted
    AV_PICTURE_TYPE_B,     ///< Bi-dir predicted
    AV_PICTURE_TYPE_S,     ///< S(GMC)-VOP MPEG4
    AV_PICTURE_TYPE_SI,    ///< Switching Intra
    AV_PICTURE_TYPE_SP,    ///< Switching Predicted
    AV_PICTURE_TYPE_BI,    ///< BI type
};</strong>

3.sample_aspect_ratio

宽高比是一个分数,FFMPEG中用AVRational表达分数:

/**
 * rational number numerator/denominator
 */
typedef struct AVRational{
    int num; ///< numerator
    int den; ///< denominator
} AVRational;
4.qscale_table

QP表指向一块内存,里面存储的是每个宏块的QP值。宏块的标号是从左往右,一行一行的来的。每个宏块对应1个QP。

qscale_table[0]就是第1行第1列宏块的QP值;qscale_table[1]就是第1行第2列宏块的QP值;qscale_table[2]就是第1行第3列宏块的QP值。以此类推...

宏块的个数用下式计算:

注:宏块大小是16x16的。

每行宏块数:int mb_stride = pCodecCtx->width/16+1

宏块的总数:int mb_sum = ((pCodecCtx->height+15)>>4)*(pCodecCtx->width/16+1)
5.motion_subsample_log2

1个运动矢量所能代表的画面大小(用宽或者高表示,单位是像素),注意,这里取了log2。

代码注释中给出以下数据:

4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2

即1个运动矢量代表16x16的画面的时候,该值取4;1个运动矢量代表8x8的画面的时候,该值取3...以此类推

6.motion_val

运动矢量表存储了一帧视频中的所有运动矢量。

该值的存储方式比较特别:int16_t (*motion_val[2])[2];

int mv_sample_log2= 4 - motion_subsample_log2;
int mb_width= (width+15)>>4;
int mv_stride= (mb_width << mv_sample_log2) + 1;
motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
解析:

1.首先分为两个列表L0和L1

2.每个列表(L0或L1)存储了一系列的MV(每个MV对应一个画面,大小由motion_subsample_log2决定)

3.每个MV分为横坐标和纵坐标(x,y)

注意,在FFMPEG中MV和MB在存储的结构上是没有什么关联的,第1个MV是屏幕上左上角画面的MV(画面的大小取决于motion_subsample_log2),第2个MV是屏幕上第1行第2列的画面的MV,以此类推。因此在一个宏块(16x16)的运动矢量很有可能如下图所示(line代表一行运动矢量的个数):

//例如8x8划分的运动矢量与宏块的关系:
				//-------------------------
				//|          |            |
				//|mv[x]     |mv[x+1]     |
				//-------------------------
				//|          |	          |
				//|mv[x+line]|mv[x+line+1]|
				//-------------------------

7.mb_type

宏块类型表存储了一帧视频中的所有宏块的类型。其存储方式和QP表差不多。只不过其是uint32类型的,而QP表是uint8类型的。每个宏块对应一个宏块类型变量。

宏块类型如下定义所示:

//The following defines may change, don't expect compatibility if you use them.
#define MB_TYPE_INTRA4x4   0x0001
#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
#define MB_TYPE_INTRA_PCM  0x0004 //FIXME H.264-specific
#define MB_TYPE_16x16      0x0008
#define MB_TYPE_16x8       0x0010
#define MB_TYPE_8x16       0x0020
#define MB_TYPE_8x8        0x0040
#define MB_TYPE_INTERLACED 0x0080
#define MB_TYPE_DIRECT2    0x0100 //FIXME
#define MB_TYPE_ACPRED     0x0200
#define MB_TYPE_GMC        0x0400
#define MB_TYPE_SKIP       0x0800
#define MB_TYPE_P0L0       0x1000
#define MB_TYPE_P1L0       0x2000
#define MB_TYPE_P0L1       0x4000
#define MB_TYPE_P1L1       0x8000
#define MB_TYPE_L0         (MB_TYPE_P0L0 | MB_TYPE_P1L0)
#define MB_TYPE_L1         (MB_TYPE_P0L1 | MB_TYPE_P1L1)
#define MB_TYPE_L0L1       (MB_TYPE_L0   | MB_TYPE_L1)
#define MB_TYPE_QUANT      0x00010000
#define MB_TYPE_CBP        0x00020000
//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
一个宏块如果包含上述定义中的一种或两种类型,则其对应的宏块变量的对应位会被置1。
注:一个宏块可以包含好几种类型,但是有些类型是不能重复包含的,比如说一个宏块不可能既是16x16又是8x8。



ffmpeg使用AVPacket来暂存解复用之后,解码之前的媒体数据(一个音/视频帧、一个字母包等)以及附加信息(解码时间、显示时间、时长等)

  •  dts 表示解码时间戳,pts表示显示时间戳,它们的单位是所属媒体流的时间基准。
  •     stream_index 给出所属媒体流的索引;
  •     data 为数据缓冲区指针,size为长度;
  •     duration 为数据的时长,也是以所属媒体流的时间基准为单位;
  •     pos 表示该数据在媒体流中的字节偏移量;
  •     destruct 为用于释放数据缓冲区的函数指针;
  •     flags 为标志域,其中,最低为置1表示该数据是一个关键帧。

 AVPacket 结构本身只是个容器,它使用data成员指向实际的数据缓冲区,这个缓冲区可以通过av_new_packet创建,可以通过     av_dup_packet 拷贝,也可以由FFMPEG的API产生(如av_read_frame),使用之后需要通过调用av_free_packet释放。            

av_free_packet调用的是结构体本身的destruct函数,它的值有两种情况:(1)av_destruct_packet_nofree或 0;(2)av_destruct_packet,其中,前者仅仅是将data和size的值清0而已,后者才会真正地释放缓冲区。FFMPEG内部使用 AVPacket结构建立缓冲区装载数据,同时提供destruct函数,如果FFMPEG打算自己维护缓冲区,则将destruct设为 av_destruct_packet_nofree,用户调用av_free_packet清理缓冲区时并不能够将其释放;如果FFMPEG不会再使用 该缓冲区,则将destruct设为av_destruct_packet,表示它能够被释放。对于缓冲区不能够被释放的AVPackt,用户在使用之前 最好调用av_dup_packet进行缓冲区的克隆,将其转化为缓冲区能够被释放的AVPacket,以免对缓冲区的不当占用造成异常错误。而 av_dup_packet会为destruct指针为av_destruct_packet_nofree的AVPacket新建一个缓冲区,然后将原 缓冲区的数据拷贝至新缓冲

区,置data的值为新缓冲区的地址,同时设destruct指针为av_destruct_packet。

时间信息

 时间信息用于实现多媒体同步。

 同步的目的在于展示多媒体信息时,能够保持媒体对象之间固有的时间关系。同步有两类,一类是流内同步,其主要任务是保证单个媒体流内的时间关系,以满足感知 要求,如按照规定的帧率播放一段视频;另一类是流间同步,主要任务是保证不同媒体流之间的时间关系,如音频和视频之间的关系(lipsync)。

 对于固定速率的媒体,如固定帧率的视频或固定比特率的音频,可以将时间信息(帧率或比特率)置于文件首部(header),如AVI的hdrl List、MP4的moov box,还有一种相对复杂的方案是将时间信息嵌入媒体流的内部,如MPEG TS和Real video,这种方案可以处理变速率的媒体,亦可有效避免同步过程中的时间漂移。

 FFMPEG会为每一个数据包打上时间标 签,以更有效地支持上层应用的同步机制。时间标签有两种,一种是DTS,称为解码时间标签,另一种是PTS,称为显示时间标签。对于声音来说 ,这两个时间标签是相同的,但对于某些视频编码格式,由于采用了双向预测技术,会造成DTS和PTS的不一致。

时间信息的获取:

 通过调用av_find_stream_info,多媒体应用可以从AVFormatContext对象中拿到媒体文件的时间信息:主要是总时间长度和开始时间,此外还有与时间信息相关的比特率和文件大小。其中时间信息的单位是AV_TIME_BASE:微秒。

/**
 * This structure stores compressed data. It is typically exported by demuxers
 * and then passed as input to decoders, or received as output from encoders and
 * then passed to muxers.
 *
 * For video, it should typically contain one compressed frame. For audio it may
 * contain several compressed frames.
 *
 * AVPacket is one of the few structs in FFmpeg, whose size is a part of public
 * ABI. Thus it may be allocated on stack and no new fields can be added to it
 * without libavcodec and libavformat major bump.
 *
 * The semantics of data ownership depends on the buf or destruct (deprecated)
 * fields. If either is set, the packet data is dynamically allocated and is
 * valid indefinitely until av_free_packet() is called (which in turn calls
 * av_buffer_unref()/the destruct callback to free the data). If neither is set,
 * the packet data is typically backed by some static buffer somewhere and is
 * only valid for a limited time (e.g. until the next read call when demuxing).
 *
 * The side data is always allocated with av_malloc() and is freed in
 * av_free_packet().
 */
typedef struct AVPacket {
    /**
     * A reference to the reference-counted buffer where the packet data is
     * stored.
     * May be NULL, then the packet data is not reference-counted.
     */
    AVBufferRef *buf;
    /**
     * Presentation timestamp in AVStream->time_base units; the time at which
     * the decompressed packet will be presented to the user.
     * Can be AV_NOPTS_VALUE if it is not stored in the file.
     * pts MUST be larger or equal to dts as presentation cannot happen before
     * decompression, unless one wants to view hex dumps. Some formats misuse
     * the terms dts and pts/cts to mean something different. Such timestamps
     * must be converted to true pts/dts before they are stored in AVPacket.
     */
    int64_t pts;
    /**
     * Decompression timestamp in AVStream->time_base units; the time at which
     * the packet is decompressed.
     * Can be AV_NOPTS_VALUE if it is not stored in the file.
     */
    int64_t dts;
    uint8_t *data;
    int   size;
    int   stream_index;
    /**
     * A combination of AV_PKT_FLAG values
     */
    int   flags;
    /**
     * Additional packet data that can be provided by the container.
     * Packet can contain several types of side information.
     */
    AVPacketSideData *side_data;
    int side_data_elems;

    /**
     * Duration of this packet in AVStream->time_base units, 0 if unknown.
     * Equals next_pts - this_pts in presentation order.
     */
    int   duration;
#if FF_API_DESTRUCT_PACKET
    attribute_deprecated
    void  (*destruct)(struct AVPacket *);
    attribute_deprecated
    void  *priv;
#endif
    int64_t pos;                            ///< byte position in stream, -1 if unknown

    /**
     * Time difference in AVStream->time_base units from the pts of this
     * packet to the point at which the output from the decoder has converged
     * independent from the availability of previous frames. That is, the
     * frames are virtually identical no matter if decoding started from
     * the very first frame or from this keyframe.
     * Is AV_NOPTS_VALUE if unknown.
     * This field is not the display duration of the current packet.
     * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
     * set.
     *
     * The purpose of this field is to allow seeking in streams that have no
     * keyframes in the conventional sense. It corresponds to the
     * recovery point SEI in H.264 and match_time_delta in NUT. It is also
     * essential for some types of subtitle streams to ensure that all
     * subtitles are correctly displayed after seeking.
     */
    int64_t convergence_duration;
} AVPacket;

在AVPacket结构体中,重要的变量有以下几个:

uint8_t *data:压缩编码的数据。

例如对于H.264来说。1个AVPacket的data通常对应一个NAL。

注意:在这里只是对应,而不是一模一样。他们之间有微小的差别:使用FFMPEG类库分离出多媒体文件中的H.264码流

因此在使用FFMPEG进行视音频处理的时候,常常可以将得到的AVPacket的data数据直接写成文件,从而得到视音频的码流文件。

int   size:data的大小

int64_t pts:显示时间戳

int64_t dts:解码时间戳

int   stream_index:标识该AVPacket所属的视频/音频流。


你可能感兴趣的:(ffmpeg,avpacket,AVFrame)