实例代码
int IpCamera()
{
AVDictionary* options = NULL;
av_dict_set(&options, "buffer_size", "4096000", 0);
av_dict_set(&options, "rtsp_transport", "udp", 0);
av_dict_set(&options, "stimeout", "20000000", 0);
av_dict_set(&options, "max_delay", "500000", 0);
int ret = 0;
const char* url = "rtsp://192.168.0.188:554/stream/main";
AVFormatContext* m_fmt_ctx = avformat_alloc_context();
ret = avformat_open_input(&m_fmt_ctx, url, NULL, &options);
if (ret != 0) {
cout << "avformat_open_input is fail !!!" << endl;
return -1;
}
m_fmt_ctx->probesize = 1000;
m_fmt_ctx->max_analyze_duration = 2048;
ret = avformat_find_stream_info(m_fmt_ctx, NULL);
if (ret < 0) {
cout << "avformat_find_stream_info is fail !!!" << endl;
return -1;
}
av_dump_format(m_fmt_ctx, 0, url, 0);
int video_stream = -1;
video_stream = av_find_best_stream(m_fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if (video_stream < 0) {
return -1;
}
AVCodecContext* _codec_ctx = m_fmt_ctx->streams[video_stream]->codec;
AVCodec* _codec = avcodec_find_decoder(_codec_ctx->codec_id);
if (_codec == NULL) {
return -1;
}
ret = avcodec_open2(_codec_ctx, _codec, NULL);
if (ret != 0) {
return -1;
}
int width = m_fmt_ctx->streams[video_stream]->codecpar->width;
int height = m_fmt_ctx->streams[video_stream]->codecpar->height;
int pts = m_fmt_ctx->streams[video_stream]->codec->framerate.den;
AVPixelFormat inputFormat = m_fmt_ctx->streams[video_stream]->codec->pix_fmt;
ret = SDL_Init(SDL_INIT_VIDEO);
SDL_Window* window = SDL_CreateWindow("FFmpeg_Demo", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, 800, 600, SDL_WINDOW_SHOWN);
SDL_Renderer* renderer = NULL;
if (window) {
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
if (!renderer) {
av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
renderer = SDL_CreateRenderer(window, -1, 0);
}
}
SDL_Texture* texture = NULL;
Uint32 format = FFmpegFmt_2_SDLFmt(inputFormat);
if (!(texture = SDL_CreateTexture(renderer, format, SDL_TEXTUREACCESS_STREAMING, width, height)))
return -1;
int frameIndex = 0;
int got_picture = 0;
AVPacket* packet = av_packet_alloc();
AVFrame *frame_yuv = av_frame_alloc();
int64_t pre_pts = -1;
int64_t start_time = av_gettime();
while (frameIndex < 500)
{
ret = av_read_frame(m_fmt_ctx, packet);
if (ret < 0) {
break;
}
if (packet->stream_index == video_stream)
{
if (packet->pts == AV_NOPTS_VALUE) {
AVRational time_base1 = m_fmt_ctx->streams[video_stream]->time_base;
int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(m_fmt_ctx->streams[video_stream]->r_frame_rate);
packet->pts = (double)(frameIndex * calc_duration) / (double)(av_q2d(time_base1) * AV_TIME_BASE);
packet->dts= packet->pts;
packet->duration = (double)calc_duration / (double)(av_q2d(time_base1) * AV_TIME_BASE);
}
ret = avcodec_decode_video2(_codec_ctx, frame_yuv, &got_picture, packet);
if (ret < 0) {
printf("Decode Error.\n");
return ret;
}
if (got_picture) {
if (pre_pts == -1)
pre_pts = frame_yuv->pts;
AVRational time_base = m_fmt_ctx->streams[video_stream]->time_base;
AVRational time_base_q = { 1,AV_TIME_BASE };
int64_t pts_time = av_rescale_q(frame_yuv->pts, time_base, time_base_q);
int64_t now_time = av_gettime() - start_time;
if (pts_time > now_time) {
cout << "delay time : " << pts_time - now_time << endl;
av_usleep(pts_time - now_time );
}
cout << "index: " << frameIndex << " pts:" << frame_yuv->pts <<" size: "<<packet->size <<" playTime :" << pts_time << " key" << frame_yuv->key_frame<< endl;
ret = SDL_UpdateYUVTexture(texture, NULL,
frame_yuv->data[0], frame_yuv->linesize[0],
frame_yuv->data[1], frame_yuv->linesize[1],
frame_yuv->data[2], frame_yuv->linesize[2]);
if (ret < 0)
break;
ret = SDL_RenderClear(renderer);
if (ret < 0)
break;
ret = SDL_RenderCopy(renderer, texture, NULL, NULL);
if (ret < 0)
break;
SDL_RenderPresent(renderer);
frameIndex++;
}
}
av_packet_unref(packet);
}
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
av_packet_free(&packet);
avformat_close_input(&m_fmt_ctx);
return 0;
}