转码的四个案例

总结
1,获取一帧视频所占内存大小(byte):avpicture_get_size(outPix,witd,height);
2,获取一个像素点所占内存大小(bit):av_get_bits_per_pixel(av_pix_fmt_desc_get(src_pix));
      根据以上的数据计算一帧视频所占的内存大小:uint8_t *src_buffer = (uint8_t *)malloc(src_w * src_h * src_bpp / 8);等价于 uint8_t *src_buffer = (uint8_t *)malloc(src_w * src_h * 3 /2);
3,初始化AVFrame结构体变量:
        给AVFrame结构体的data、linesize分配内存空间:avpicture_fill((AVPicture *)outFrame,outBuffer,outPix,witd,height);
        分配内存空间并给data赋值:avpicture_fill((AVPicture *)inFrame, inbuffer, inPix, witd, height);
4,不用AVFrame结构体,自定义data、linesize
    uint8_t *src_data[4];//声明长度为4的数组(yuv420p,数组src_data第一个元素存着y的数据,第二个元素存着u的数据,第三个元素存着v的数据)
    int src_linesize[4];//跟src_data对应
    uint8_t *dst_data[4];
    int dst_linesiz[4];
    av_image_alloc(src_data,src_linesize,src_w,src_h,src_pix,1);//根据像素格式分配相应的内存大小
    av_image_alloc(dst_data,dst_linesiz,dst_w,dst_h,dst_pix,1);//根据像素格式分配相应的内存大小
    memcpy(src_data[0], src_buffer, src_w*src_h );//给src_data的第一个元素赋值y的数据
    memcpy(src_data[1], src_buffer + src_w*src_h, src_w*src_h / 4);//给src_data的第二个元素赋值u的数据
    memcpy(src_data[2], src_buffer + src_w*src_h + src_w*src_h/4, src_w*src_h/4);//给src_data的第三个元素赋值v的数据
5, SwsContext:提供一帧视频数据像素格式转换的上下文
      1,初始化
              1.1 sws_getContext初始化
        SwsContext *sws_ctx = sws_getContext(
        src_w,src_h,src_pix,
        dst_w,dst_h,dst_pix,
        SWS_BICUBIC, NULL, NULL, NULL
        );
              1.2 复杂但是更灵活的初始化方法
        struct SwsContext *img_convert_ctx;
       img_convert_ctx =sws_alloc_context();
    //Show AVOption
    av_opt_show2(img_convert_ctx,stdout,AV_OPT_FLAG_VIDEO_PARAM,0);
    //Set Value
    av_opt_set_int(img_convert_ctx,"sws_flags",SWS_BICUBIC|SWS_PRINT_INFO,0);
    av_opt_set_int(img_convert_ctx,"srcw",src_w,0);
    av_opt_set_int(img_convert_ctx,"srch",src_h,0);
    av_opt_set_int(img_convert_ctx,"src_format",src_pixfmt,0);
    //'0' for MPEG (Y:0-235);'1' for JPEG (Y:0-255)
    av_opt_set_int(img_convert_ctx,"src_range",1,0);
    av_opt_set_int(img_convert_ctx,"dstw",dst_w,0);
    av_opt_set_int(img_convert_ctx,"dsth",dst_h,0);
    av_opt_set_int(img_convert_ctx,"dst_format",dst_pixfmt,0);
    av_opt_set_int(img_convert_ctx,"dst_range",1,0);
    sws_init_context(img_convert_ctx,NULL,NULL);
      2,一帧数据转换函数:sws_scale(根据提供的像素格式来转化为相应的数据)
                sws_scale(sws_ctx,inFrame->data,inFrame->linesize,0,height,outFrame->data,outFrame->linesize);//借助AVFrame结构体的变量来保存数据
                sws_scale(sws_ctx,src_data,src_linesize,0,src_h,dst_data,dst_linesiz);//自定义变量来保存数据
      3,关闭上下文: sws_freeContext(sws_ctx);
6,像素格式:AVPixelFormat
      AV_PIX_FMT_YUV420P //平面类型
      AV_PIX_FMT_RGB24。//打包类型
7,图像拉伸
      SWS_POINT:邻域插值,1个点确定插值的点
      SWS_BILINEAR:双线性插值,4个点确定插值的点
      SWS_BICUBIC:双三次插值,16个点确定插值的点
8,yuv取值范围
      Y的取值范围是16-235,U、V的取值范围是16-240。FFmpeg中称之为“mpeg”范围。(0)
       以JPEG为代表的标准中,Y、U、V的取值范围都是0-255。FFmpeg中称之为“jpeg” 范围。(1)
       src_range
       dst_range

一,swcale实现rgb24转yuv420p

int main(int argc, char **argv){
    av_register_all();

    FILE *infile = fopen("sucai.bmp","rb+");
    int  witd = 400;
    int height = 400;
    uint8_t *inbuffer = (uint8_t *)malloc(3 * witd * height);
    printf("%x---%x", inbuffer, &inbuffer);
    fread(inbuffer, witd*height * 3, 1, infile);
    fclose(infile);

    enum AVPixelFormat outPix = AV_PIX_FMT_YUV420P, inPix = AV_PIX_FMT_BGR24;
    AVFrame *outFrame = av_frame_alloc();
    uint8_t *outBuffer = new uint8_t[avpicture_get_size(outPix,witd,height)];
    avpicture_fill((AVPicture *)outFrame,outBuffer,outPix,witd,height);

    AVFrame *inFrame = av_frame_alloc();
    avpicture_fill((AVPicture *)inFrame, inbuffer, inPix, witd, height);

    SwsContext *sws_ctx = sws_getContext(
        witd,height,inPix,
        witd,height,outPix,
        SWS_BILINEAR,NULL,NULL,NULL
        );
    sws_scale(sws_ctx,inFrame->data,inFrame->linesize,0,height,outFrame->data,outFrame->linesize);
    sws_freeContext(sws_ctx);

    FILE *outFile = fopen("output444.yuv","wb+");
    fwrite(outFrame->data[0],witd*height,1,outFile);
    fwrite(outFrame->data[1], witd*height / 4, 1, outFile);
    fwrite(outFrame->data[2], witd*height / 4, 1, outFile);
    fclose(outFile);
    return 0;
}

二,swcale实现YUV转RGB

版本一
int main(int argc, char **argv){
    av_register_all();

    AVPixelFormat src_pix = AV_PIX_FMT_YUV420P, dst_pix = AV_PIX_FMT_RGB24;

    FILE *src_file = fopen("sintel_480x272_yuv420p.yuv","rb+");
    int src_w = 480, src_h = 272;
    uint8_t *src_buffer = (uint8_t *)malloc(src_w * src_h * 3 /2);
    

    FILE *dst_file = fopen("sintel_1280x720_rgb42-6.rgb","wb+");
    int dst_w = 1280, dst_h = 720;
    //uint8_t *dst_buffer = (uint8_t *)malloc(dst_w * dst_h * 3);
    uint8_t *dst_buffer = new uint8_t[avpicture_get_size(dst_pix, dst_w, dst_h)];//可以使用api提供的方法来设置dst_buffer所占内存的大小

    AVFrame *src_frame = av_frame_alloc();
    AVFrame *dst_frame = av_frame_alloc();
    avpicture_fill((AVPicture *)src_frame, src_buffer, src_pix, src_w, src_h);
    avpicture_fill((AVPicture *)dst_frame, dst_buffer, dst_pix, dst_w, dst_h);

    SwsContext *sws_ctx = sws_getContext(
        src_w,src_h,src_pix,
        dst_w,dst_h,dst_pix,
        SWS_BICUBIC, NULL, NULL, NULL
        );
    int i = 0;
    while (1)
    {
        
        if (fread(src_buffer, 1, src_w*src_h * 3 / 2, src_file) != src_w*src_h * 3 / 2){
            break;
        }
        sws_scale(sws_ctx,src_frame->data,src_frame->linesize,0,src_h,dst_frame->data,dst_frame->linesize);
        printf("%d------%d\n", i, sizeof(src_buffer));
        fwrite(dst_frame->data[0], 1, dst_w * dst_h * 3, dst_file);
        i++;
    }
    sws_freeContext(sws_ctx);
    fclose(src_file);
    fclose(dst_file);
    return getchar();
}
版本二
int main(int argc, char **argv){
    av_register_all();

    AVPixelFormat src_pix = AV_PIX_FMT_YUV420P, dst_pix = AV_PIX_FMT_RGB24;

    FILE *src_file = fopen("sintel_480x272_yuv420p.yuv","rb+");
    int src_w = 480, src_h = 272;
    int src_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(src_pix));
    uint8_t *src_buffer = (uint8_t *)malloc(src_w * src_h * src_bpp / 8);
    
    FILE *dst_file = fopen("sintel_1280x720_rgb42-9.rgb","wb+");
    int dst_w = 1280, dst_h = 720;
    int dst_bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pix));

    uint8_t *src_data[4];
    int src_linesize[4];
    uint8_t *dst_data[4];
    int dst_linesiz[4];
    av_image_alloc(src_data,src_linesize,src_w,src_h,src_pix,1);
    av_image_alloc(dst_data,dst_linesiz,dst_w,dst_h,dst_pix,1);
    SwsContext *sws_ctx = sws_getContext(
        src_w,src_h,src_pix,
        dst_w,dst_h,dst_pix,
        SWS_BICUBIC, NULL, NULL, NULL
        );
    int i = 0;
    while (1)
    {
        if (fread(src_buffer, 1, src_w*src_h * src_bpp / 8, src_file) != src_w*src_h * src_bpp / 8){
            break;
        }
        memcpy(src_data[0], src_buffer, src_w*src_h );
        memcpy(src_data[1], src_buffer + src_w*src_h, src_w*src_h / 4);
        memcpy(src_data[2], src_buffer + src_w*src_h + src_w*src_h/4, src_w*src_h/4);
        sws_scale(sws_ctx,src_data,src_linesize,0,src_h,dst_data,dst_linesiz);
        printf("%d------%d\n", i, sizeof(src_buffer));
        fwrite(dst_data[0], 1, dst_w * dst_h * dst_bpp / 8, dst_file);
        i++;
    }
    sws_freeContext(sws_ctx);
    fclose(src_file);
    fclose(dst_file);
    return getchar();
}

你可能感兴趣的:(转码的四个案例)