Android 使用ffmpeg软编码 将摄像头采集视频编码成视频文件

Android 使用ffmpeg软编码 将摄像头采集视频编码成视频文件。
这次代码实现的是视频采集的功能,Android 通过jni 调用ffmpeg 编码yuv数据变成视频文件。
先上代码:

//编码器上下文保存的实体
struct EnCodeBean {
    FILE *f;
    AVFrame *frame;
    AVPacket *pkt;
    AVCodecContext *c = NULL;
    int  width=0;
    int height=0;
};

EnCodeBean *videoEncodeObj = NULL;

//编码每一帧
static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
                   FILE *outfile);

//初始化编码器
static jint initEnCodec(JNIEnv *env, jobject jobject1, jstring path, jint w, jint h) {
    videoEncodeObj = new EnCodeBean;
    char filename[300], *codec_name;
    const AVCodec *codec;
    int i, ret, x, y;
    uint8_t endcode[] = {0, 0, 1, 0xb7};
    char errorArr[500];

    videoEncodeObj->width=w;
    videoEncodeObj->height=h;

//    filename = "/storage/emulated/0/Download/jason_video.h265";
    //获取路径参数
    const char *_path = env->GetStringUTFChars(path, 0);
    sprintf(filename, "%s", _path);
    env->ReleaseStringUTFChars(path, _path);

    AVCodecID videoCodec = AV_CODEC_ID_MPEG2VIDEO; // 编码器类型

    /* 查找编码器 */
    codec = avcodec_find_encoder(videoCodec);
    LOGJASON(FMT_TAG, avcodec_get_name(videoCodec));
    if (!codec) {
        sprintf(errorArr, "Codec '%s' not found\n", avcodec_get_name(videoCodec));
        LOGJASON(FMT_TAG, errorArr);
        return -1;
    }

    //编码上下文
    videoEncodeObj->c = avcodec_alloc_context3(codec);
    if (!videoEncodeObj->c) {
        sprintf(errorArr, "Could not allocate video codec context\n");
        LOGJASON(FMT_TAG, errorArr);
        return -1;
    }

    //编码后存储的包packet
    videoEncodeObj->pkt = av_packet_alloc();
    if (!videoEncodeObj->pkt) {
        sprintf(errorArr, "packet init error\n");
        LOGJASON(FMT_TAG, errorArr);
        return -1;
    }

    /* 设置参数 */
    videoEncodeObj->c->bit_rate = 639 * 1000;//码率 质量
    /* resolution must be a multiple of two */
    videoEncodeObj->c->width = w;
    videoEncodeObj->c->height = h;
    /* frames per second */
    videoEncodeObj->c->time_base = (AVRational) {1, 25};//时间单位 时基
    videoEncodeObj->c->framerate = (AVRational) {25, 1};//频率

    videoEncodeObj->c->gop_size = 10;
    videoEncodeObj->c->max_b_frames = 1;
    videoEncodeObj->c->pix_fmt = AV_PIX_FMT_YUV420P;//图像格式

    if (codec->id == AV_CODEC_ID_H264)
        av_opt_set(videoEncodeObj->c->priv_data, "preset", "slow", 0);

    /* 打开编码器 */
    ret = avcodec_open2(videoEncodeObj->c, codec, NULL);
    if (ret < 0) {
        sprintf(errorArr, "Could not open codec: %s\n", av_err2str(ret));
        LOGJASON(FMT_TAG, errorArr);
        return -1;
    }

    //打开创建文件
    videoEncodeObj->f = fopen(filename, "wb");
    if (!videoEncodeObj->f) {
        sprintf(errorArr, "Could not open %s\n", filename);
        LOGJASON(FMT_TAG, errorArr);
        return -1;
    }

    //每帧储存内存创建
    videoEncodeObj->frame = av_frame_alloc();
    if (!videoEncodeObj->frame) {
        sprintf(errorArr, "Could not allocate video frame\n");
        LOGJASON(FMT_TAG, errorArr);
        return -1;
    }
    videoEncodeObj->frame->format = videoEncodeObj->c->pix_fmt;
    videoEncodeObj->frame->width = videoEncodeObj->c->width;
    videoEncodeObj->frame->height = videoEncodeObj->c->height;

    //根据参数构建buf
    ret = av_frame_get_buffer(videoEncodeObj->frame, 32);//32
    if (ret < 0) {
        sprintf(errorArr, "Could not allocate the video frame data\n");
        LOGJASON(FMT_TAG, errorArr);
        return -1;
    }
    LOGJASON("init ok %p", videoEncodeObj);
    return 0;
}

//编码每一帧
static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
                   FILE *outfile) {
    int ret;
    char errorStr[500];

    /* send the frame to the encoder */
    if (frame)
        LOGJASON("Send frame %lld\n", frame->pts);

    //开始编码
    ret = avcodec_send_frame(enc_ctx, frame);
    if (ret < 0) {
        sprintf(errorStr, "Error sending a frame for encoding\n");
        LOGJASON(FMT_TAG, errorStr);
        return;
    }

    while (ret >= 0) {
        //接受编码
        ret = avcodec_receive_packet(enc_ctx, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            return;
        else if (ret < 0) {
            sprintf(errorStr, "Error during encoding\n");
            LOGJASON(FMT_TAG, errorStr);
            return;
        }

        LOGJASON("Write packet %lld (size=%5d)\n", pkt->pts, pkt->size);
        //写入文件
        fwrite(pkt->data, 1, pkt->size, outfile);
        av_packet_unref(pkt);
    }
}

//Java推送流的编码方法
static jint pushFrame(JNIEnv *env, jobject jobject1, jlong pts, jbyteArray jbyteArray1) {

    //获取yuv 流
    int len=env->GetArrayLength(jbyteArray1);
    unsigned char *buf = new unsigned char[len];
    env->GetByteArrayRegion(jbyteArray1, 0, len, reinterpret_cast<jbyte *>(buf));

    /* make sure the frame data is writable */
    int i, ret;
    char errorArr[500];
    i = pts;

    ret = av_frame_make_writable(videoEncodeObj->frame);
    if (ret < 0) {
        sprintf(errorArr, "av_frame_make_writable is error");
        LOGJASON(FMT_TAG, errorArr);
        return -1;
    }

    int frameSize = videoEncodeObj->width*videoEncodeObj->height;

    //将流数据复制进入frame buffer
    memcpy(videoEncodeObj->frame->data[0], buf, frameSize);
    memcpy(videoEncodeObj->frame->data[1], buf+frameSize, frameSize/4);
    memcpy(videoEncodeObj->frame->data[2], buf+frameSize+frameSize/4, frameSize/4);

    videoEncodeObj->frame->pts = i;

    /* 开始编码 */
    encode(videoEncodeObj->c, videoEncodeObj->frame, videoEncodeObj->pkt, videoEncodeObj->f);

    //释放引用
    env->ReleaseByteArrayElements(jbyteArray1, reinterpret_cast<jbyte *>(buf), 0);
    return 0;
}

//关闭编码功能
static jint endClose(JNIEnv *env, jobject jobject1) {
    uint8_t endcode[] = {0, 0, 1, 0xb7};
    /* 清空缓存区 */
    encode(videoEncodeObj->c, NULL, videoEncodeObj->pkt, videoEncodeObj->f);

    /* 写入文件尾部 */
    fwrite(endcode, 1, sizeof(endcode), videoEncodeObj->f);
    fclose(videoEncodeObj->f);

    //释放上下文
    avcodec_free_context(&videoEncodeObj->c);
    av_frame_free(&videoEncodeObj->frame);
    av_packet_free(&videoEncodeObj->pkt);
    LOGJASON("is end ok");

    delete videoEncodeObj;
    videoEncodeObj = NULL;

    LOGJASON("free out is ok");
    return 0;
}

static jint showMsg(JNIEnv *env, jobject jobject1, jstring jstring1) {
    const char *instr = env->GetStringUTFChars(jstring1, 0);
    LOGJASON(FMT_TAG, instr);
    env->ReleaseStringUTFChars(jstring1, instr);
    return 0;
}

//----------------------------------jni 动态注册方法-----------------------------------------

static JNINativeMethod javaMethods[] = {
        {"initEnCodec", "(Ljava/lang/String;II)I", (void *) initEnCodec},
        {"pushFrame",   "(J[B)I",                  (void *) pushFrame},
        {"showMsg",     "(Ljava/lang/String;)I",   (void *) showMsg},
        {"endClose",    "()I",                     (void *) endClose}
};

jint JNI_OnLoad(JavaVM *vm, void *unused) {
    JNIEnv *env = NULL;
    if (vm->GetEnv((void **) &env, JNI_VERSION_1_4) != JNI_OK) {
        LOGJASON(FMT_TAG, "和获取env异常");
        return -1;
    }

    const char *className = "com/liyihang/jason/VideoEnCodec";
    int methodNum = sizeof(javaMethods) / sizeof(JNINativeMethod);

    jclass jclass1 = env->FindClass(className);
    if (jclass1 == NULL) {
        LOGJASON(FMT_TAG, "find class error");
        return -1;
    }

    int ret = env->RegisterNatives(jclass1, javaMethods, methodNum);
    if (ret < 0) {
        env->DeleteLocalRef(jclass1);
        return -1;
    }
    env->DeleteLocalRef(jclass1);
    return JNI_VERSION_1_4;
}

Java调用 so库文件:

public class VideoEnCodec {

    static {
        System.loadLibrary("native-lib");
    }


    public native int initEnCodec(String path, int w, int h);
    public native int pushFrame(long pts,byte[] arr);
    public native int endClose();
    public native int showMsg(String msg);
}

调用方法:

    //必须在子线程中运行
    private void handle() {
        int w=320;
        int h=240;
        VideoEnCodec videoEnCodec = new VideoEnCodec();
        //初始化 和 关闭必须成对出现
        videoEnCodec.initEnCodec("/storage/emulated/0/Download/jason_video3.h265", w,h);
        for (int i = 0; i < 90; i++) {
            //推送摄像头采集的每一帧 例如:camera 采集的nv21 数据   i 是pts
            byte[] arr = makeBuf(w,h, i);
            videoEnCodec.pushFrame(i,arr);
        }
        //关闭编码器
        videoEnCodec.endClose();
    }


    //构建yuv假数据
    private byte[] makeBuf(int w, int h, int i){
        /* prepare a dummy image */
        int y,x;
        byte[] arr=new byte[w*h+(w*h)/2];
        /* Y */
        int offset=0;
        for (y = 0; y < h; y++) {
            for (x = 0; x < w; x++) {
                arr[offset]= (byte)
                        (x + y + i * 3);
                offset++;
            }
        }

        /* Cb and Cr */
        for (y = 0; y < h / 2; y++) {
            for (x = 0; x < w / 2; x++) {
                arr[offset]= (byte) (128 + y + i * 2);
                arr[offset+(w*h/4)]= (byte) (64 + x + i * 5);
                offset++;
            }
        }
        return arr;
    }

这里为了方便理解没有适用任何封装,采集摄像头的数据可以看看我的博客中关于摄像头采集这块的文章,这里我们方便理解手动造出了每帧的图像数据。
代码中也有有详细主是可以方便理解。

代码补充:

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

extern "C" {
#include "include/libavutil/imgutils.h"
#include "include/libavutil/samplefmt.h"
#include "include/libavformat/avformat.h"
#include "include/libavutil/frame.h"
#include "include/libavutil/mem.h"
#include "include/libswscale/swscale.h"
#include "include/libswresample/swresample.h"
#include "include/libavutil/opt.h"
#include "include/libavfilter/avfilter.h"
#include "include/libavcodec/avcodec.h"
#include "include/libavfilter/buffersink.h"
#include "include/libavfilter/buffersrc.h"
}

// log标签
#define  FMT_TAG    "%s"
// 定义info信息
#define LOGJASON(...) __android_log_print(ANDROID_LOG_INFO,"jason_jni",__VA_ARGS__)

#if defined(__arm64__) || defined(__aarch64__)
#define JSONT 1
#else
#define JSONT 2
#endif

接下来还会更新 声音采集,如果有兴趣可以继续关注我的博客。

关于Android jni ffmpeg 环境搭建可以参考我博客中其他文章, 网上这类文章很多就不在重复了

转载时候一定要注明出处 尊重原创 谢谢!

你可能感兴趣的:(FFmpeg音视频编程,ffmpeg,android,java,ndk)