实验环境:树莓派3.
库FFMPEG。
写在前边的声明,该文章所发代码全部为网上收集,本人经过整合,精心去除一些冗余,然后而成。
下边什么都不说了,直接上代码
#include
#include
#include
#include
#include
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavdevice/avdevice.h"
#include
#include
char* input_name= "video4linux2";
char* file_name = "/dev/video0";
char* out_file = "11yuv420.yuv";
static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;
typedef struct StreamContext {
AVCodecContext *dec_ctx;
AVCodecContext *enc_ctx;
} StreamContext;
static StreamContext *stream_ctx;
const char* jpeg_file = "cuc_view_encode.jpg";
unsigned int g_stream_index = -1;
void release_frame(AVFrame *frame)
{
if(frame)
{
av_frame_unref(frame);
av_frame_free(&frame);
}
}
void release_packet(AVPacket *packet)
{
if(packet)
{
av_packet_unref(packet);
av_packet_free(&packet);
}
}
int open_input_file()
{
int i = 0;
int ret = -1;
int videoindex = -1;
ifmt_ctx = NULL;
if ((ret = avformat_open_input (&ifmt_ctx, file_name, NULL, NULL)) < 0){
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
av_dump_format(ifmt_ctx, 0, file_name, 0);
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return;
}
stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
if (!stream_ctx)
{
return AVERROR(ENOMEM);
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *stream = ifmt_ctx->streams[i];
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
AVCodecContext *codec_ctx;
if (!dec) {
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
return AVERROR_DECODER_NOT_FOUND;
}
codec_ctx = avcodec_alloc_context3(dec);
if (!codec_ctx) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
return AVERROR(ENOMEM);
}
ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
"for stream #%u\n", i);
return ret;
}
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
/* Open decoder */
ret = avcodec_open2(codec_ctx, dec, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
stream_ctx[i].dec_ctx = codec_ctx;
}
videoindex= -1;
for(i=0; i
if(ifmt_ctx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
videoindex=i;
break;
}
if(videoindex==-1){
printf("Didn't find a video stream.\n");
return -1;
}
g_stream_index = videoindex;
printf("video index is %d, nb_stream is %d\n", videoindex, ifmt_ctx->nb_streams);
printf("picture width = %d \n", stream_ctx[videoindex].dec_ctx->width);
printf("picture height = %d \n", stream_ctx[videoindex].dec_ctx->height);
printf("Pixel Format = %d \n", stream_ctx[videoindex].dec_ctx->pix_fmt);
return ret;
}
int open_output_file()
{
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *dec_ctx, *enc_ctx;
AVCodec *encoder;
unsigned int i;
int ret = -1;
ofmt_ctx = NULL;
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, jpeg_file);
if (!ofmt_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
printf("codec is is %d---%d----", ofmt_ctx->video_codec_id, ofmt_ctx->oformat->video_codec, AV_CODEC_ID_MJPEG);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
in_stream = ifmt_ctx->streams[i];
dec_ctx = stream_ctx[i].dec_ctx;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder( ofmt_ctx->oformat->video_codec);
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
}
enc_ctx = avcodec_alloc_context3(encoder);
if (!enc_ctx) {
av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
return AVERROR(ENOMEM);
}
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
if (encoder->pix_fmts)
enc_ctx->pix_fmt = encoder->pix_fmts[0];
else
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
} else {
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
/* take first format from list of supported formats */
enc_ctx->sample_fmt = encoder->sample_fmts[0];
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
}
/* Third parameter can be used to pass settings to encoder */
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
return ret;
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
out_stream->time_base = enc_ctx->time_base;
stream_ctx[i].enc_ctx = enc_ctx;
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
return AVERROR_INVALIDDATA;
} else {
/* if this stream must be remuxed */
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
return ret;
}
out_stream->time_base = in_stream->time_base;
}
}
av_dump_format(ofmt_ctx, 0, jpeg_file, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, jpeg_file, AVIO_FLAG_WRITE);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", jpeg_file);
return ret;
}
}
return ret;
}
int save_to_jpeg(const AVFrame * frame)
{
AVPacket *packet;
int ret;
unsigned int i;
AVCodecContext * enc_ctx = stream_ctx[g_stream_index].enc_ctx;
/* init muxer, write output file header */
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
packet = av_packet_alloc();
av_init_packet(packet);
ret = avcodec_send_frame(enc_ctx, frame);
if (ret < 0) {
fprintf(stderr, "Error sending a frame for encoding\n");
exit(1);
}
ret = avcodec_receive_packet(enc_ctx, packet);
if(ret)
{
printf("receive packet is error\n");
return -1;
}
packet->stream_index = g_stream_index;
av_packet_rescale_ts(packet, enc_ctx->time_base,
ofmt_ctx->streams[g_stream_index]->time_base);
printf("stream index is %d\n", packet->stream_index);
printf("write frame\n");
ret = av_write_frame(ofmt_ctx, packet);
//Write Trailer
av_write_trailer(ofmt_ctx);
release_packet(packet);
printf("Encode Successful.\n");
return 0;
}
int captureOneFrame(void){
struct SwsContext *sws_ctx;
AVPacket *packet;
AVFrame *frame;
AVFrame *yuvFrame;
FILE *fp;
int i;
int ret = 0;
enum AVPixelFormat dst_pix_fmt = AV_PIX_FMT_YUV420P;
uint8_t *dst_data[4];
int dst_linesize[4];
int dst_bufsize;
uint8_t *dst_buffer= NULL;
AVCodecContext * dec_ctx = stream_ctx[g_stream_index].dec_ctx;
int dst_bpp=av_get_bits_per_pixel(av_pix_fmt_desc_get(dst_pix_fmt));
fp = fopen(out_file, "wb");
if (fp < 0) {
printf("open frame data file failed\n");
return ;
}
sws_ctx = sws_getContext(dec_ctx->width, dec_ctx->height,
dec_ctx->pix_fmt,
dec_ctx->width,
dec_ctx->height,
dst_pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);
dst_bufsize = av_image_alloc(dst_data, dst_linesize,
dec_ctx->width,
dec_ctx->height,
dst_pix_fmt, 1);
packet= av_packet_alloc();
av_read_frame(ifmt_ctx, packet);
frame = av_frame_alloc();
yuvFrame = av_frame_alloc();
ret = avcodec_send_packet(dec_ctx, packet);
if (ret < 0) {
fprintf(stderr, "Error sending a packet for decoding\n");
exit(1);
}
ret = avcodec_receive_frame(dec_ctx, frame);
if(ret)
{
printf("decoding is error\n");
return -1;
}
yuvFrame = av_frame_clone(frame);
yuvFrame->format = dst_pix_fmt;
// av_image_fill_arrays(src_data, src_linesize, packet->data,dec_ctx->pix_fmt , dec_ctx->width, dec_ctx->height ,1 );
sws_scale(sws_ctx, (const uint8_t * const *)frame->data, frame->linesize, 0, dec_ctx->height, dst_data, dst_linesize);
dst_buffer = av_malloc(dec_ctx->width*dec_ctx->height*dst_bpp/8);
av_image_copy_to_buffer(dst_buffer,
dec_ctx->width*dec_ctx->height*dst_bpp/8,
(const uint8_t * const *)dst_data,
(const int *)dst_linesize,dst_pix_fmt,
dec_ctx->width,
dec_ctx->height
,1);
av_image_fill_arrays(yuvFrame->data, yuvFrame->linesize, dst_buffer, yuvFrame->format, yuvFrame->width, yuvFrame->height ,1 );
fwrite(dst_buffer, 1, dec_ctx->width*dec_ctx->height*dst_bpp/8, fp);
fclose(fp);
sws_freeContext(sws_ctx);
save_to_jpeg(yuvFrame);
if(dst_buffer)
{
av_free(dst_buffer);
}
release_frame(frame);
release_frame(yuvFrame);
release_packet(packet);
av_freep(&dst_data[0]);
return ret;
}
void close_stream()
{
int i = 0;
if(ifmt_ctx)
{
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
avcodec_close(stream_ctx[i].dec_ctx);
avcodec_free_context(&stream_ctx[i].dec_ctx);
}
}
if(ofmt_ctx)
{
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
avcodec_close(stream_ctx[i].enc_ctx);
avcodec_free_context(&stream_ctx[i].enc_ctx);
}
}
if(stream_ctx)
{
av_free(stream_ctx);
}
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
{
avio_closep(&ofmt_ctx->pb);
}
if(ofmt_ctx)
{
avformat_free_context(ofmt_ctx);
}
if(ifmt_ctx)
{
avformat_close_input(&ifmt_ctx);
avformat_free_context(ifmt_ctx);
}
}
int main(void){
avdevice_register_all();
av_register_all();
if(open_input_file() < 0)
{
printf("Opening input file is fail\n");
return -1;
}
if(open_output_file() < 0)
{
printf("Opening output file is fail\n");
return -1;
}
captureOneFrame();
close_stream();
return 0;
}