终于可以说从“探”进入到了“入”的文章
说实话已经搞了两个月,中间不断地有各种需求实际做的时间很少,而且这俩月还考了两个证,软件设计师和期货投资分析。所以拖了我这么久才弄好一个小demo,当然,大部分时间都浪费掉用来划水了也是原因之一(炉石传说真尼玛好玩)
编码端:
1、调用ffmpeg进行截屏,截屏数据是BMP,需要进行解码,解码之后转为XXX,还需要转换为YUV420,然后对YUV进行编码为H265,然后进行拆包发送。
2、H265比H264压缩率高出好多倍,很适合用来进行p2p发送。因为数据越少越好。
3、拆包发送是难点,因为编码后的一帧也比较大,特别是关键帧有几万字节,普通帧也几千字节,UDP局域网剑1400,公网建议500,所以必须要拆吧
4、本来是想通过rtp发的,但是考虑到P2P还是纯手撸了个拆包发送。
解码端:
1、收到一帧后判断,这个帧是否拆分,若拆分是否一手打所有拆包
2、解码器要自己设置参数,但是目前来看解码器参数无用(如比特率),只有编码器参数有意义,这里还需要再研究
3、收到一帧后立即解码为XXX,然后进行转换为YUV420,调用SDL进行播放。后续可能用别的库了,SDL比较麻烦
4、并不缓存帧数据,所以编码时候就设置前后预测帧个数为0,即只有关键帧和前向预测帧
5、接下来要做的事:
1、优化代码,面向对象化,设计规则和协议
2、封装起来,用C#调用这个C++
3、使用UDP的内网穿透。
4、用EPOLL和UDP结合起来,包装成类似TCP的。
5、鼠标和键盘操作
6、服务器端和前端分离,验证用户组进行内网穿透登陆
发送端代码:
/**
* ref. 雷霄骅 Lei Xiaohua
//packet->pFrame->pFrameYUV->pPacket->send
*/
#include
#include
#include
#include
#include
#include
#pragma comment(lib,"ws2_32.lib")
#include
#include
#define __STDC_CONSTANT_MACROS
#define SIZE_BUF 1200
extern "C"
{
#include "include/libavcodec/avcodec.h"
#include "include/libavformat/avformat.h"
#include "include/libswscale/swscale.h"
#include "include/libavdevice/avdevice.h"
#include "include/libavutil/imgutils.h"
#include "include/libavutil/opt.h"
#include "include/libavutil/imgutils.h"
#include "include/libavutil/mathematics.h"
#include "include/libavutil/time.h"
//#include "includes/SDL.h"
};
#pragma pack(push,1)
struct sendbuf {
AVPacket b_pPacket;
int b_count; //拆分个数从1开始
int b_lenth;
int b_frameid;
int b_serialid;//拆分次序从1开始
char b_buf[SIZE_BUF];
};
struct sockaddr_in G_Servaddr;
#pragma pack(pop)
int main(int argc, char* argv[])
{
AVFormatContext *pFormatContext = NULL;
AVCodecContext *pCodecContext = NULL;
AVCodec *pCodec = NULL;
AVPacket pPacket;
struct SwsContext *img_convert_ctx;
AVFormatContext *pFormatContextEncod = NULL;
AVCodecContext *pCodecContextEncod = NULL;
AVCodec *pCodecEncod = NULL;
AVCodecID codec_id = AV_CODEC_ID_H265;//H.265和HEVC是同一个东西
int in_w = 1920, in_h = 1080;
int framenum = 100;
int ret, got_output, xy_size, got_picture;
int y_size;
int i = 0, videoindex;
sendbuf sendbuf;
FILE *fp_out;
fp_out = fopen("ds.h265", "wb");
FILE *fp_yuv = fopen("output.yuv", "wb+");
/*socket--------------------------------*/
WSADATA wsa;
if (WSAStartup(MAKEWORD(2, 2), &wsa) != 0)
{
printf("WSAStartup failed!\n");
return 1;
}
int connfd;
//socklen_t addrlen(0);
SOCKET ServerS = socket(AF_INET, SOCK_DGRAM, 0);
SOCKADDR_IN DistAddr;
DistAddr.sin_family = AF_INET;
DistAddr.sin_port = htons(8800);
DistAddr.sin_addr.s_addr = inet_addr("127.0.0.1"); //inet_addr("192.168.23.129");
if (DistAddr.sin_addr.s_addr == INADDR_NONE)
{
printf("不可用地址!\n");
return -1;
}
int time_out = 2000;
//ret = setsockopt(ServerS, SOL_SOCKET, SO_RCVTIMEO, (char*)&time_out, sizeof(time_out))
/*socket--------------------------------↑*/
av_register_all();
avformat_network_init();
pFormatContext = avformat_alloc_context();
avdevice_register_all();
AVDictionary* options = NULL;
av_dict_set(&options, "video_size", "1920*1080", 0);//设定捕捉范围
av_dict_set(&options, "framerate", "25", 0);
AVInputFormat *ifmt = av_find_input_format("gdigrab");
if (avformat_open_input(&pFormatContext, "desktop", ifmt, &options) != 0)//输入流
{
printf("Couldn't open input stream.\n"); return -1;
}
if (avformat_find_stream_info(pFormatContext, NULL) < 0)//找到流
{
printf("Couldn't find stream information.\n"); return -1;
}
videoindex = -1;
for (i = 0; i < pFormatContext->nb_streams; i++)
{
if (pFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)//流的数目计数
{
videoindex = i; break;
}
}
if (videoindex == -1)
{
printf("Didn't find a video stream.\n"); return -1;
}
av_dump_format(pFormatContext, 0, 0, 0);
pCodecContext = pFormatContext->streams[videoindex]->codec;//编码器参数直接取数据流的参数(桌面流
pCodec = avcodec_find_decoder(pCodecContext->codec_id);//解码器 BMP解码
if (pCodec == NULL)
{
printf("Codec not found.\n");
return -1;
}
if (avcodec_open2(pCodecContext, pCodec, NULL) < 0)//在环境中打开这个编码器
{
printf("Could not open codec.\n");
return -1;
}
AVFrame *pFrame, *pFrameYUV;
pFrame = av_frame_alloc();
pFrameYUV = av_frame_alloc();
uint8_t *out_buffer;
AVPacket *packet;
out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecContext->width, pCodecContext->height, 1));
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecContext->width, pCodecContext->height, 1);
packet = (AVPacket *)av_malloc(sizeof(AVPacket));
printf("--------------- gdigrab Information ----------------\n");
//手工调试函数,输出tbn、tbc、tbr、PAR、DAR的含义
av_dump_format(pFormatContext, 0, 0, 0);
printf("------------------------ -----------------------\n");
// encode-----------------------------------
pCodecEncod = avcodec_find_encoder(codec_id);
if (!pCodecEncod) {
printf("Codec not found\n");
return -1;
}
pCodecContextEncod = avcodec_alloc_context3(pCodecEncod);
if (!pCodecContextEncod) {
printf("Could not allocate video codec context\n");
return -1;
}
pCodecContextEncod->bit_rate = 2000000;/*比特率越高,传送的数据越大,越清晰265可以设400k,264需要800k,网络传输变得不清晰了,需要加大到1500K*/
pCodecContextEncod->width = in_w;
pCodecContextEncod->height = in_h;
pCodecContextEncod->time_base.num = 1;
pCodecContextEncod->time_base.den = 25;
pCodecContextEncod->max_b_frames = 0;//两个非B帧之间的B帧的最大数目 0则没有B帧
pCodecContextEncod->pix_fmt = AV_PIX_FMT_YUV420P;
pCodecContextEncod->gop_size = 20;
/* 关键帧的周期,也就是两个IDR帧之间的距离,一个帧组的最大帧数,一般而言,每一秒视频至少需要使用 1 个关键帧。
增加关键帧个数可改善质量,但是同时增加带宽和网络负载。*/
av_opt_set(pCodecContextEncod->priv_data, "preset", "fast", 0);//ultrafast/veryfast、faster、fast、medium、slow
av_opt_set(pCodecContextEncod->priv_data, "tune", "zerolatency", 0);
/*声明了编码速度,值有ultrafast、superfast、veryfast、faster、fast、medium、slow、slower、veryslow、placebo,
越快视频质量则越差,但如果想实现实时编码(即编码速度不慢于输入速度)就要zerolatency-0延迟*/
//fast - 2000000 medium - 150
if (avcodec_open2(pCodecContextEncod, pCodecEncod, NULL) < 0) {
printf("Could not open codec\n");
return -1;
}
pFrameYUV->format = pCodecContextEncod->pix_fmt;
pFrameYUV->width = pCodecContextEncod->width;
pFrameYUV->height = pCodecContextEncod->height;
//encode-----------------------------------↑
img_convert_ctx = sws_getContext(pCodecContext->width, pCodecContext->height, pCodecContext->pix_fmt,
pCodecContext->width, pCodecContext->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
i = 0;
int frame_index = 0;
//读取数据
int64_t start_time = av_gettime();
while (av_read_frame(pFormatContext, packet) >= 0 && i < 200)
{
av_init_packet(&pPacket);
pPacket.data = NULL;
pPacket.size = 0;
pFrame->pts = i;
ret = avcodec_decode_video2(pCodecContext, pFrame, &got_picture, packet);
if (ret < 0)
{
printf("Decode Error.\n");
return -1;
}
AVStream *in_stream, *out_stream;
if (got_picture >= 1)
{
//成功解码一帧
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecContext->height,
pFrameYUV->data, pFrameYUV->linesize);
//转换图像格式为YUV 注意linesize是实际长度,因为data里有冗余数据
/* y_size = pCodecContext->width*pCodecContext->height;//先写一个YUV文件康康
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U Cb
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V Cr */
//printf("Succeed to decode-scale-fwrite 1 frame!\n");
pFrameYUV->width = 1920;
pFrameYUV->height = 1080;
pFrameYUV->format = AV_PIX_FMT_YUV420P;
pFrameYUV->pts = i;
/*encode-----------------------*/
ret = avcodec_encode_video2(pCodecContextEncod, &pPacket, pFrameYUV, &got_output);
if (ret < 0) {
printf("Error encoding frame\n");
return -1;
}
if (got_output) {
//成功编码一帧
framenum++;
//fwrite(pPacket.data, 1, pPacket.size, fp_out);//再写一个.h264文件康康,直接写pPacket.data。
/*sendudp----------------------*/
memset(&sendbuf, 0, sizeof(sendbuf));
int RemainPsize = pPacket.size;
sendbuf.b_count = pPacket.size / SIZE_BUF + 1;//不能整除则+1,排除整除情况。为分割的次数。
sendbuf.b_frameid = i;
sendbuf.b_pPacket.dts = pPacket.dts;
sendbuf.b_pPacket.pos = pPacket.pos;
sendbuf.b_pPacket.pts = pPacket.pts;
sendbuf.b_pPacket.flags = pPacket.flags;
sendbuf.b_pPacket.size = pPacket.size;
sendbuf.b_pPacket.stream_index = pPacket.stream_index;
int ccount = sendbuf.b_count;
if (pPacket.size > SIZE_BUF)
{
for (int k = 0; k < ccount;)
{
sendbuf.b_serialid = k + 1;
if (RemainPsize > SIZE_BUF)
{
sendbuf.b_lenth = SIZE_BUF;//剩余长度大于SIZE_BUF则发送SIZE_BUF
memcpy(sendbuf.b_buf, &pPacket.data[k * SIZE_BUF], SIZE_BUF);
}
else
{
sendbuf.b_lenth = RemainPsize;
memcpy(sendbuf.b_buf, &pPacket.data[k * SIZE_BUF], RemainPsize);
}
k++;
RemainPsize = pPacket.size - k * SIZE_BUF;
char buff[sizeof(sendbuf)] = { 0 };
memcpy(buff, &sendbuf, sizeof(sendbuf));
int result = sendto(ServerS, buff, sizeof(buff), 0, (SOCKADDR *)&DistAddr, sizeof(DistAddr));
printf("send buff ok ,size: %d,i:%d k:%d \n", pPacket.size, i, k);
}
}
else
{
char buff[sizeof(sendbuf)] = { 0 };
sendbuf.b_lenth = RemainPsize;
memcpy(sendbuf.b_buf, &pPacket.data[0], RemainPsize);
memcpy(buff, &sendbuf, sizeof(sendbuf));
int result = sendto(ServerS, buff, sizeof(buff), 0, (SOCKADDR *)&DistAddr, sizeof(DistAddr));
printf("send buff ok ,size: %d,i:%d flag:%d \n", pPacket.size, i, pPacket.flags);
}
//char buff[sizeof(sendbuf)] = { 0 };
//memcpy(buff, &sendbuf, sizeof(sendbuf));
//pPacket.pos = -1;
printf("send ok ,size: %d,i:%d flag:%d \n", pPacket.size, i, pPacket.flags);
//int ret = av_interleaved_write_frame(output_format_context_, &pPacket);//最后进行推流RTP
//av_interleaved_write_frame 将对 packet 进行缓存和 pts 检查,这是区别于 av_write_frame 的地方。
//int result = sendto(ServerS, buff, sizeof(buff), 0, (SOCKADDR *)&DistAddr, sizeof(DistAddr));
//int a = WSAGetLastError();
// printf("send ok ,size: %d\n", pPacket.size);
frame_index++;
av_packet_unref(&pPacket);
av_free_packet(&pPacket);
}
// pFrameYUV->pts=i;
i++;
}
else
{
printf("未解码到一帧,可能时结尾B帧或延迟帧,在后面做flush decoder处理");
}
av_packet_unref(packet);
av_free_packet(packet);
}
av_dump_format(pFormatContext, 0, 0, 0);
//Flush Decoder
while (true)
{
if (!(pCodec->capabilities & AV_CODEC_CAP_DELAY))
break;
ret = avcodec_decode_video2(pCodecContext, pFrame, &got_picture, NULL);
if (ret < 0)
{
break;
}
if (!got_picture)
{
break;
}
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecContext->height,
pFrameYUV->data, pFrameYUV->linesize);
y_size = pCodecContext->width*pCodecContext->height;
/*fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V*/
printf("Flush Decoder: Succeed to decode 1 frame!\n");
}
//Flush Encoder
for (got_output = 1; got_output; i++) {
ret = avcodec_encode_video2(pCodecContextEncod, &pPacket, NULL, &got_output);
if (ret < 0) {
printf("Error encoding frame\n");
return -1;
}
if (got_output) {
printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", pPacket.size);
fwrite(pPacket.data, 1, pPacket.size, fp_out);//av_write_frame(pFormatCtx, &pPacket);
av_free_packet(&pPacket);
}
}
//av_write_trailer(output_format_context_);
printf("--------------- End Information ----------------\n");
closesocket(ServerS);
WSACleanup();
sws_freeContext(img_convert_ctx);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecContext);
avformat_close_input(&pFormatContext);
fclose(fp_yuv);
fclose(fp_out);
avcodec_close(pCodecContextEncod);
av_free(pCodecEncod);
//if (output_format_context_ && !(ofmt->flags & AVFMT_NOFILE))
// avio_close(output_format_context_->pb);
//avformat_free_context(output_format_context_);
//av_freep(&pFrame->data[0]);
//av_frame_free(&pFrame);
scanf("%d", &i);
return 0;
}
接收端:
#pragma once
#include
#include
#pragma comment(lib,"ws2_32.lib")
#include
#include
#define __STDC_CONSTANT_MACROS
extern "C"
{
#include "include/libavcodec/avcodec.h"
#include "include/libavformat/avformat.h"
#include "include/libswscale/swscale.h"
#include "include/libavutil/imgutils.h"
#include "include/libavutil/mathematics.h"
#include "include/libavutil/time.h"
#include "include/libavutil/opt.h"
#include "includes/SDL.h"
};
#pragma pack(push,1)
#define SIZE_BUF 1200
#define MAXLINE 128
#define SERV_PORT 7788
#define CLIENT1_PORT 4003
#define CLIENT2_PORT 4004
#define SERV_IP "1.1.1.1"
#define SIZE 128
#define SDL_INIT_AUDIO 0x00000010
#define SDL_INIT_VIDEO 0x00000020
#define SDL_YV12_OVERLAY 0x32315659
#define SDL_INIT_TIMER 0x00000001
#define SFM_REFRESH_EVENT (SDL_USEREVENT + 1)
#define SFM_BREAK_EVENT (SDL_USEREVENT + 2)
struct sockaddr_in G_Servaddr;
struct sendbuf {
AVPacket b_pPacket;
//int b_chain;
int b_count;
int b_lenth;
int b_frameid;
int b_serialid;
char b_buf[SIZE_BUF];
};
int thread_exit = 0;
////Refresh Event
void * recvfd(void * sockfd) {
char buf[MAXLINE] = { 0 };
sockaddr_in remote;
int nAddrLen = sizeof(remote);
while (1) {
recvfrom(*(int*)sockfd, buf, sizeof(buf), 0, (sockaddr *)& remote, &nAddrLen);
printf("recvfrom b :%s\n", buf);
// sleep(5);
}
}
int sfp_refresh_thread(void *opaque)
{
thread_exit = 0;
while (!thread_exit) {
SDL_Event event;
event.type = SFM_REFRESH_EVENT;
SDL_PushEvent(&event);
SDL_Delay(40);
}
thread_exit = 0;
//Break
SDL_Event event;
event.type = SFM_BREAK_EVENT;
SDL_PushEvent(&event);
return 0;
}
int fill_iobuffer(void * opaque, uint8_t *buf, int bufsize) {
/*if (!feof(fp_open)) {
int true_size = fread(buf, 1, buf_size, fp_open);
return true_size;
}
else {
return -1;
}*/
memcpy(buf, opaque, sizeof(opaque));
return sizeof(opaque);
}
#pragma pack(pop)
int main()
{
//文件格式上下文
AVFormatContext *pFormatCtx;
int i = 0, videoindex = 0;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame, *pFrameYUV;
unsigned char *out_buffer;
AVPacket packet;
av_init_packet(&packet);
sendbuf sendbuffer;
/*WINsocket--------------------------------*/
WSADATA wsa;
if (WSAStartup(MAKEWORD(2, 2), &wsa) != 0)
{
printf("WSAStartup failed!\n");
return 1;
}
int connfd;
//socklen_t addrlen(0);
SOCKET ServerS = socket(AF_INET, SOCK_DGRAM, 0);
SOCKADDR_IN DistAddr;
DistAddr.sin_family = AF_INET;
DistAddr.sin_port = htons(8800);
DistAddr.sin_addr.s_addr = INADDR_ANY;
//ret = setsockopt(ServerS, SOL_SOCKET, SO_RCVTIMEO, (char*)&time_out, sizeof(time_out));
if (bind(ServerS, (SOCKADDR*)&DistAddr, sizeof(DistAddr)) == SOCKET_ERROR)
{
return 0;
}
int nAddrLen = sizeof(DistAddr);
/*socket--------------------------------↑*/
int y_size;
int ret, got_picture;
struct SwsContext *img_convert_ctx;
AVCodecParserContext *pCodecParserCtx = NULL;
//char filepath[] = "ds.h264";
//char filepath[] = "rtp://192.168.23.63:4004";
//char filepath[] = "rtp://127.0.0.1:4004";
FILE *fp_yuv = fopen("output.yuv", "wb+");
av_register_all();
avformat_network_init();
/*AVFormatContext *ic = NULL;
ic = avformat_alloc_context();
unsigned char * iobuffer = (unsigned char *)av_malloc(100*.1024);
AVIOContext *avio = avio_alloc_context(iobuffer, 32768, 1, NULL, fill_iobuffer, NULL, NULL);
ic->pb = avio;
int err = avformat_open_input(&ic, "nothing", NULL, NULL);
/*示例代码开辟了一块空间iobuffer作为AVIOContext的缓存。*/
AVCodecID codec_id = AV_CODEC_ID_H265; //AV_CODEC_ID_HEVC;
pCodec = avcodec_find_decoder(codec_id);
if (!pCodec) {
printf("Codec not found\n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx) {
printf("Could not allocate video codec context\n");
return -1;
}
av_opt_set(pCodecCtx->priv_data, "preset", "medium", 0);
av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);
pCodecParserCtx = av_parser_init(codec_id);
if (!pCodecParserCtx) {
printf("Could not allocate video parser context\n");
return -1;
}
if (pCodec->capabilities&AV_CODEC_CAP_TRUNCATED)
pCodecCtx->flags |= AV_CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
printf("Could not open codec\n");
return -1;
}
//编解码上下文
//解码器的设置貌似没什么用,因为以编码器为准
pCodecCtx->width = 1920;
pCodecCtx->height = 1080;
pCodecCtx->bit_rate = 2500000;/*比特率越高,传送的数据越大,越清晰265可以设400k,264需要800k*/
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 25;
pCodecCtx->max_b_frames = 0;//两个非B帧之间的B帧的最大数目 0则没有B帧
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
pCodecCtx->gop_size = 20;
//申请AVFrame,用于原始视频
pFrame = av_frame_alloc();
//申请AVFrame,用于yuv视频
pFrameYUV = av_frame_alloc();
//分配内存,用于图像格式转换
out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);
//packet = (AVPacket *)av_malloc(sizeof(AVPacket));
//Output Info-----------------------------
printf("--------------- File Information ----------------\n");
//手工调试函数,输出tbn、tbc、tbr、PAR、DAR的含义
//av_dump_format(pFormatCtx, 0, filepath, 0);
printf("-------------------------------------------------\n");
//申请转换上下文
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
//SDL----------------------------
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER) != 0) {//(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
printf("Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
int screen_w = 1920, screen_h = 1080;
//int screen_w = 1080, screen_h = 1920;
const SDL_VideoInfo *vi = SDL_GetVideoInfo();
//Half of the Desktop's width and height.
screen_w = vi->current_w / 1.1;
screen_h = vi->current_h / 1.1;
SDL_Surface *screen;
screen = SDL_SetVideoMode(screen_w, screen_h, 0, 0);
if (!screen) {
printf("SDL: could not set video mode - exiting:%s\n", SDL_GetError());
return -1;
}
SDL_Overlay *bmp;
bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen);
SDL_Rect rect;
rect.x = 0;
rect.y = 0;
rect.w = screen_w;
rect.h = screen_h;
SDL_Thread *video_tid = SDL_CreateThread(sfp_refresh_thread, NULL);
SDL_WM_SetCaption("Simplest FFmpeg Grab Desktop007", NULL);
//Event Loop
SDL_Event event;
//SDL End------------------------↑
//读取数据
//while (av_read_frame(pFormatCtx, &packet) >= 0)
while (true)
{
/*int len = av_parser_parse2(
pCodecParserCtx, pCodecCtx,
&packet.data, &packet.size,
cur_ptr, cur_size,
AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);*/
av_init_packet(&packet);
SDL_WaitEvent(&event);
if (event.type == SFM_REFRESH_EVENT)
{
char buf[sizeof(sendbuf)] = { 0 };//1296
uint8_t data[256 * 1024] = { 0 };//unsigned char
memset(&sendbuffer, 0, sizeof(sendbuffer));
memset(buf, 0, sizeof(buf));
memset(&packet, 0, sizeof(packet));
int reu = recvfrom(ServerS, buf, sizeof(buf), 0, (SOCKADDR *)&DistAddr, &nAddrLen);
int a = WSAGetLastError();//接收udp.
printf("recv ok \n");
memcpy(&sendbuffer, &buf, sizeof(buf));
memcpy(&packet, &sendbuffer.b_pPacket, sizeof(sendbuffer.b_pPacket));
packet.data = data;
memcpy(&packet.data[0], &sendbuffer.b_buf, sendbuffer.b_lenth);
if (sendbuffer.b_serialid < sendbuffer.b_count)//1 !< 1
{
for (int k = 1; k < sendbuffer.b_count; k++)
{
memset(buf, 0, sizeof(buf));
int reu = recvfrom(ServerS, buf, sizeof(buf), 0, (SOCKADDR *)&DistAddr, &nAddrLen);
memcpy(&sendbuffer, &buf, sizeof(buf));
memcpy(&packet.data[k * SIZE_BUF], &sendbuffer.b_buf, sendbuffer.b_lenth);
printf("recv section ok %d\n", k);
}
}
if (packet.stream_index == videoindex)
{
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
if (ret < 0)
{
printf("Decode Error.\n");
}
if (got_picture >= 1)
{
SDL_LockYUVOverlay(bmp);
//成功解码一帧
pFrameYUV->data[0] = bmp->pixels[0];//pFrameYUV的数据直接指向bmp,这样pFrameYUV的改变就会改变bmp地址的数据
pFrameYUV->data[1] = bmp->pixels[2];
pFrameYUV->data[2] = bmp->pixels[1];//处理YUV420P,所以我们只有3个通道,因此只有3组数据。其他格式可能有一个alpha通道或其他东西的第四个指针
pFrameYUV->linesize[0] = bmp->pitches[0];//“pitch”是SDL用于指代给定数据行的宽度的术语
pFrameYUV->linesize[1] = bmp->pitches[2];
pFrameYUV->linesize[2] = bmp->pitches[1];
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);//转换图像格式
y_size = pCodecCtx->width*pCodecCtx->height;
/*fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y 写一个YUV文件康康
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V*/
printf("Succeed to decode 1 frame!\n");
SDL_UnlockYUVOverlay(bmp);
SDL_DisplayYUVOverlay(bmp, &rect);
}
else
{
printf("未解码到一帧,可能时结尾B帧或延迟帧,在后面做flush decoder处理\n");
}
}
}
av_packet_unref(&packet);
av_free_packet(&packet);
}
av_packet_unref(&packet);
av_free_packet(&packet);
//flush decoder
//FIX: Flush Frames remained in Codec
while (true)
{
if (!(pCodec->capabilities & AV_CODEC_CAP_DELAY))
return 0;
ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet);
if (ret < 0)
{
break;
}
if (!got_picture)
{
break;
}
sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
pFrameYUV->data, pFrameYUV->linesize);
SDL_LockYUVOverlay(bmp);
int y_size = pCodecCtx->width*pCodecCtx->height;
fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv); //Y
fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv); //U
fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv); //V
printf("Flush Decoder: Succeed to decode 1 frame!\n");
SDL_UnlockYUVOverlay(bmp);
SDL_DisplayYUVOverlay(bmp, &rect);
//Delay 40ms
SDL_Delay(40);
}
SDL_Quit();
closesocket(ServerS);
WSACleanup();
sws_freeContext(img_convert_ctx);
av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
fclose(fp_yuv);
scanf("%d", &i);
return 0;
}