FFmpeg编码 camera2 yuv数据

说明:
本文使用ffmpeg将camera2中取出的YUV_420_888数据,编码生成mp4或ts文件。对于camera2,Android建议使用格式为YUV_420_888,数据分为3个plane,分别对应y,u,v分量。

具体如下:
1.涉及的参数配置:

**
 * 参数设置
 */
typedef struct {
    int video_width;//1280
    int video_height;//720
    int video_frame_rate;//帧率:20fps
    int64_t video_bit_rate;//码率

    int64_t audio_bit_rate;//码率
    AVSampleFormat audio_sample_fmt;//默认16bit AV_SAMPLE_FMT_S16;
    int audio_sample_rate;//44100 HZ
    uint64_t audio_channel_layout;//默认AV_CH_LAYOUT_MONO;
}RecorderParams;

**
 * 存放YUV数据
 */
typedef struct YuvData{
    uint8_t *y_buffer;
    uint8_t *u_buffer;
    uint8_t *v_buffer;
    int y_len;
    int u_len;
    int v_len;
    int64_t pts;//pts 显示时间设置
} YuvVideo;

2.初始化:

    //...
    av_register_all();
    AVFormatContext *formatContext;

    //encoded_video_path:
    // ../video_test_temp.mp4 保存为mp4文件
    // ../video_test_temp.ts 保存为ts文件
    avformat_alloc_output_context2(&formatContext, NULL, NULL, encoded_video_path);
    outputFormatContext = formatContext;
    LOG("avOutputFormat:%s", formatContext->oformat->name)

    if(avio_open(&formatContext->pb, encoded_video_path, AVIO_FLAG_WRITE) < 0){
        LOG("open output file fail")
        return;
    }

    //使用集成的x264编码
    const char* codec_name = "libx264";
    AVCodec *avCodec = avcodec_find_encoder_by_name(codec_name);
    if(!avCodec){
        LOG("could not find encoder.")
        return;
    }

    LOG("find encoder:%s", avCodec->name)
    AVStream *avStream = avformat_new_stream(videoFormatContext, avCodec);
    //设置视频流
    avStream->id = videoFormatContext->nb_streams - 1;
    
    AVCodecContext *avCodecContext = avStream->codec;
    video_width = params->video_width;
    video_height = params->video_height;
    avCodecContext->codec_id = avCodec->id;
    avCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
    avCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;//format设置
    avCodecContext->width = video_width;//1280
    avCodecContext->height = video_height;//720

    avCodecContext->time_base = (AVRational){1, params->video_frame_rate};
    avStream->time_base = (AVRational){1, params->video_frame_rate};
    avCodecContext->framerate = (AVRational){params->video_frame_rate, 1};
    avCodecContext->bit_rate = params->video_bit_rate;//1Mbp/s 码率越大越清晰,视频文件越大
    avCodecContext->gop_size = 12;
    avCodecContext->thread_count = 8;
    avCodecContext->qmin = 10;
    avCodecContext->qmax = 51;
    avCodecContext->max_b_frames = 0;

    av_opt_set(avCodecContext->priv_data, "profile", "baseline", 0);
    AVDictionary *param = 0;
    av_dict_set(&param, "preset", "superfast", 0);
    av_dict_set(&param, "tune", "zerolatency", 0);

    //设置视频的旋转角度
    //原始数据是旋转的,此处设置角度,使播放器可以自动调整
    //貌似ts文件设置无效
    const char* rotate = recorder->camera_type ? "90" : "270";
    av_dict_set(&avStream->metadata, "rotate", rotate, 0);

    int error;
    char buf[] = "";
    if((error = avcodec_open2(avCodecContext, avCodec, &param)) < 0){
        LOG("open codec fail")
        av_strerror(error, buf, 1024);
        LOG("Could not open codec: %d(%s)", error, buf);
        return;
    }

    LOG("open codec success.")
    av_dump_format(videoFormatContext, 0, encoded_video_path, 1);

    int buffer_size = av_image_get_buffer_size(avCodecContext->pix_fmt,
                                               avCodecContext->width, avCodecContext->height, 1);
    uint8_t *out_buffer = (uint8_t *)av_malloc(buffer_size);

    AVFrame *avFrame = av_frame_alloc();
    avFrame->format = avCodecContext->pix_fmt;
    avFrame->width = avCodecContext->width;
    avFrame->height = avCodecContext->height;
    av_image_fill_arrays(avFrame->data,
                         avFrame->linesize,
                         out_buffer,
                         avCodecContext->pix_fmt,
                         avCodecContext->width,
                         avCodecContext->height,
                         1);
    videoFrame = avFrame;
    
    //初始化滤镜,用于编码时添加滤镜效果
    AVFrame *filter_frame = av_frame_alloc();
    videoFilter->init_filter(avCodecContext);
    videoFilterFrame = filter_frame;

    videoCodecContext = avCodecContext;
    videoStream = avStream;
    videoOutBuffer = out_buffer;
    video_frame_index = 0;

    //写入头部信息
    AVDictionary *opt = 0;
    av_dict_set_int(&opt, "video_track_timescale", recorderParams->video_frame_rate, 0);
    avformat_write_header(formatContext, &opt);

3.开始编码:
a.解析数据

void VideoEncoder::encode(JNIEnv* env,
                          jint width, jint height,
                          //y数据
                          jbyteArray yArray, jint yRowStride,
                          //u数据
                          jbyteArray uArray, jint uRowStride, jint uPixStride,
                          //v数据
                          jbyteArray vArray, jint vRowStride, jint vPixStride) {
    YuvVideo *videoData = new YuvVideo();
    jbyte* y_data = env->GetByteArrayElements(yArray, JNI_FALSE);
    videoData->y_len = width * height;
    videoData->y_buffer = (uint8_t *)malloc(videoData->y_len);

    //解析yuv数据
    uint8_t *srcIndex = (uint8_t *)y_data;
    uint8_t *destIndex = videoData->y_buffer;
    for(int i = 0; i < height; i++){
        memcpy(destIndex, srcIndex, width);
        srcIndex += yRowStride;
        destIndex += width;
    }
    env->ReleaseByteArrayElements(yArray, y_data, JNI_FALSE);

    jbyte* u_data = env->GetByteArrayElements(uArray, JNI_FALSE);
    videoData->u_len = width * height / 4;
    videoData->u_buffer = (uint8_t *)malloc(videoData->u_len);

    srcIndex = (uint8_t *)u_data;
    destIndex = videoData->u_buffer;
    for(int i = 0; i < height / 2; i++){
        for(int j = 0; j < width / 2; j++){
            *destIndex = *srcIndex;
            destIndex++;
            srcIndex += uPixStride;
        }

        srcIndex += uRowStride - width / 2 * uPixStride;
    }
    env->ReleaseByteArrayElements(uArray, u_data, JNI_FALSE);

    jbyte* v_data = env->GetByteArrayElements(vArray, JNI_FALSE);
    videoData->v_len = width * height / 4;
    videoData->v_buffer = (uint8_t *)malloc(videoData->v_len);

    srcIndex = (uint8_t *)v_data;
    destIndex = videoData->v_buffer;
    for(int i = 0; i < height / 2; i++){
        for(int j = 0; j < width / 2; j++){
            *destIndex = *srcIndex;
            destIndex++;
            srcIndex += vPixStride;
        }

        srcIndex += vRowStride - width / 2 * vPixStride;
    }
    env->ReleaseByteArrayElements(vArray, v_data, JNI_FALSE);
    
    //设置pts,加入队列
    ++video_frame_index;
    videoData->pts = video_frame_index;
    dataQueue.push(videoData);
}

b.编码数据

AVPacket pkt = {0};
av_init_packet(&pkt);
while(1){
    //...
    //...
    YuvVideo *data = dataQueue.front();
    pretreatQueue.pop();

    videoFrame->data[0] = data->y_buffer;
    videoFrame->data[1] = data->u_buffer;
    videoFrame->data[2] = data->v_buffer;
    videoFrame->pts = data->pts;
    videoFilterFrame->pts = videoFrame->best_effort_timestamp;

    //添加滤镜
    int ret = videoFilter->add_frame_filter(videoFrame, videoFilterFrame);
    if(ret < 0){
        avcodec_send_frame(videoCodecContext, videoFrame);
    }else{
        //带滤镜编码
        avcodec_send_frame(videoCodecContext, videoFilterFrame);
    }
    int result = avcodec_receive_packet(videoCodecContext, &pkt);

    free(data->y_buffer);
    free(data->u_buffer);
    free(data->v_buffer);
    delete data;

    if(result == 0){
        pkt.stream_index = videoStream->index;
        av_packet_rescale_ts(&pkt, videoCodecContext->time_base, videoStream->time_base);
        pkt.pos = -1;
        int result = av_interleaved_write_frame(outputFormatContext, &pkt);
        if(result < 0){
            LOG("write frame fail.")
        }
    }
    else{
        char buf[] = "";
        av_strerror(result, buf, 1024);
        LOG("encode fail:result=%d, %s", result, buf)
    }

    av_packet_unref(&pkt);
}

4.结束编码
a.flush

int ret = avcodec_send_frame(videoCodecContext, NULL);
if(ret < 0){
    LOG("flush:send frame:ret=%d", ret)
    return 0;
}

AVPacket pkt = {0};
av_init_packet(&pkt);

while(1){
    int result = avcodec_receive_packet(videoCodecContext, &pkt);
    if(result == 0){
        pkt.stream_index = videoStream->index;
        av_packet_rescale_ts(&pkt, videoCodecContext->time_base, videoStream->time_base);
        pkt.pos = -1;
        int result = av_interleaved_write_frame(outputFormatContext, &pkt);
        if(result < 0){
            LOG("write frame fail.")
        }
        av_packet_unref(&pkt);
    }
    else{
        av_packet_unref(&pkt);
        break;
    }
}

b.end

avcodec_close(videoCodecContext);
av_free(videoFrame);
av_free(videoOutBuffer);

//写入尾部信息
av_write_trailer(outputFormatContext);
avio_close(outputFormatContext->pb);
avformat_free_context(outputFormatContext);

delete(recorderParams);
recorderParams = NULL;

5.另外,camera2的yuv数据输入如下:

private void encodeVideo(Image img){
    //YUV_420_888 = 35
//        Log.i(TAG, "onImageAvailable:format=" + img.getFormat() + ",width=" + img.getWidth() + ",height=" + img.getHeight());
    Image.Plane[] planes = img.getPlanes();
    Image.Plane yPlane = planes[0];
    Image.Plane uPlane = planes[1];
    Image.Plane vPlane = planes[2];
    final int width = img.getWidth();//1280
    final int height = img.getHeight();//720

    ByteBuffer buffer;
    buffer = yPlane.getBuffer();
    byte[] y_bytes = new byte[buffer.capacity()];
    buffer.get(y_bytes);

    buffer = uPlane.getBuffer();
    byte[] u_bytes = new byte[buffer.capacity()];
    buffer.get(u_bytes);

    buffer = vPlane.getBuffer();
    byte[] v_bytes = new byte[buffer.capacity()];
    buffer.get(v_bytes);

    //cameraType 前置、后置
    encode(cameraType, width, height,
            y_bytes, yPlane.getRowStride(),
            u_bytes, uPlane.getRowStride(), uPlane.getPixelStride(),
            v_bytes, vPlane.getRowStride(), vPlane.getPixelStride());
}

你可能感兴趣的:(视频编码)