工程地址:http://download.csdn.net/detail/qq_24038299/9690434
利用来源库ffmpeg里集成的gdigrab工具进行屏幕的获取,具体关于ffmpeg的使用方法请见其官网的说明http://ffmpeg.org/
以下是获取视频来源的部分代码:
int VideoInput::getVideoInput()
{
AVInputFormat *ifmt;
pFormatCtx_Video = NULL;
ifmt = av_find_input_format("gdigrab");
AVDictionary *options = NULL;
av_dict_set(&options, "framerate", framerate, NULL); //设置录像的帧率
if (avformat_open_input(&pFormatCtx_Video, "desktop", ifmt, &options) != 0)
{
printf("Couldn't open input stream.(无法打开视频输入流)\n");
return -1;
}
if (avformat_find_stream_info(pFormatCtx_Video, NULL)<0)
{
printf("Couldn't find stream information.(无法获取视频流信息)\n");
return -1;
}
if (pFormatCtx_Video->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
{
printf("Couldn't find video stream information.(无法获取视频流信息)\n");
return -1;
}
pCodecCtx_Video = pFormatCtx_Video->streams[0]->codec;
pCodec_Video = avcodec_find_decoder(pCodecCtx_Video->codec_id);
if (pCodec_Video == NULL)
{
printf("Codec not found.(没有找到解码器)\n");
return -1;
}
if (avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) < 0)
{
printf("Could not open codec.(无法打开解码器)\n");
return -1;
}
return 0;
}
利用opencv开源库实现对ffmpeg获取到的视频进行滤镜即饱和度,锐度,色度,亮度进行调节:
void VideoInput::filterProcess(int width, int height, AVFrame *picture) //width和height为分辨率,传入的ffmpeg的视频帧AVFrame格式为YUV格式
{
if (brightnessswitch || contrastswitch || sharpenswitch || saturationswitch)
{
int size = width*height;
unsigned char *YUVBuffer = (unsigned char *)malloc(size * 3 / 2);
memcpy(YUVBuffer, picture->data[0], size);
memcpy(YUVBuffer + size, picture->data[1], size / 4);
memcpy(YUVBuffer + size * 5 / 4, picture->data[2], size / 4);
cv::Mat srcyuv(height + height / 2, width, CV_8UC1, YUVBuffer);
cv::Mat detrgb(height, width, CV_8UC1);
cvtColor(srcyuv, detrgb, CV_YUV2BGR_YV12);
//调用对应的滤镜的处理
if (brightnessswitch)
{
brightnessProcess(width, height,&detrgb);
}
if (contrastswitch) {
contrastProcess(width, height,&detrgb);
}
if (sharpenswitch) {
sharpenProcess(width, height, &detrgb);
}
if (saturationswitch) {
saturationProcess(width, height,&detrgb);
}
cvtColor(detrgb, srcyuv, CV_BGR2YUV_YV12); //滤镜效果处理后从RGB格式转为YUV格式
memcpy(picture->data[0],srcyuv.data, size);
memcpy(picture->data[1],srcyuv.data+size, size / 4);
memcpy(picture->data[2],srcyuv.data +size * 5 / 4,size / 4);
free(YUVBuffer);
}
return;
}
//亮度调节
void VideoInput::brightnessProcess(int width,int height, cv::Mat *src)
{
cv::Mat pnewmat;
pnewmat = pnewmat.zeros(src->size(),src->type());
for (int y = 0; y < src->rows; y++)
{
for (int x = 0; x < src->cols; x++)
{
for (int c = 0; c < 3; c++)
{
pnewmat.at(y, x)[c] = cv::saturate_cast((src->at(y, x)[c]) + brightness);
}
}
}
cv::imshow("source", *(src));
cv::waitKey(1);
cv::imshow("new", pnewmat);
cv::waitKey(1);
pnewmat.copyTo(*(src));
}
//对比度调节
void VideoInput::contrastProcess(int width, int height,cv::Mat *src)
{
cv::Mat pNewMat;
pNewMat = pNewMat.zeros(src->size(), src->type());
for (int y = 0; y < src->rows; y++)
{
for (int x = 0; x < src->cols; x++)
{
for (int c = 0; c < 3; c++)
{
pNewMat.at(y, x)[c] = cv::saturate_cast(contrast*(src->at(y, x)[c]));
}
}
}
cv::imshow("source", *(src));
cv::waitKey(1);
cv::imshow("new", pNewMat);
cv::waitKey(1);
pNewMat.copyTo(*src);
return;
}
//饱和度调节
void VideoInput::saturationProcess(int width, int height, cv::Mat *src)
{
IplImage ImageSrc;
CvScalar s1;
int i, j;
imshow("source", *(src));
cv::waitKey(1);
ImageSrc = *(src);
cvCvtColor(&ImageSrc, &ImageSrc, CV_BGR2HSV);
for (i = 0; irows; i++) {
for (j = 0; jcols; j++) {
s1 = cvGet2D(&ImageSrc, i, j);
s1.val[1] = s1.val[1] + saturation;
cvSet2D(&ImageSrc, i, j, s1);
}
}
cvCvtColor(&ImageSrc, &ImageSrc, CV_HSV2BGR);
imshow("new", *(src));
cv::waitKey(1);
return;
}
//锐度调节
void VideoInput::sharpenProcess(int width, int height, cv::Mat *src)
{
IplImage image_1;
IplImage image_2, image_3;
CvScalar s1, s2;
int i, j;
cv::Mat src3 = cv::Mat(height, width, CV_8UC3, 1);
cv::Mat src2(height, width, CV_8UC1);
//cvtColor(srcyuv, src2, CV_YUV2BGR_YV12);
imshow("source", *(src));
cv::waitKey(1);
image_1 = *(src);
src->copyTo(src2);
image_2 = src2;
image_3 = src3;
if (sharpen % 2 == 0) {
sharpen += 1;
}
cvSmooth(&image_2, &image_3, CV_GAUSSIAN, sharpen);
for (i = 0; irows; i++) {
for (j = 0; jcols; j++) {
s1 = cvGet2D(&image_2, i, j);
s2 = cvGet2D(&image_3, i, j);
s1.val[0] = s1.val[0] - s2.val[0];
s1.val[1] = s1.val[1] - s2.val[1];
s1.val[2] = s1.val[2] - s2.val[2];
cvSet2D(&image_2, i, j, s1);
}
}
for (i = 0; irows; i++) {
for (j = 0; jcols; j++) {
s1 = cvGet2D(&image_1, i, j);
s2 = cvGet2D(&image_2, i, j);
s1.val[0] = s1.val[0] + s2.val[0];
s1.val[1] = s1.val[1] + s2.val[1];
s1.val[2] = s1.val[2] + s2.val[2];
cvSet2D(&image_1, i, j, s1);
}
}
imshow("new", *(src));
cv::waitKey(1);
}
利用ffmpeg集成的dshow可以进行麦克风音频的获取,通过安装虚拟的声卡virtual audio cable 4.10来将电脑的输出的音频虚拟转接到电脑的音频输入接口,从而实现电脑声音的实时录取。以下是获取音频输入来源的部分程序:(virtual audio cable 4.10资源下载:http://download.csdn.net/detail/qq_24038299/9677547,使用时候需要将音频的输入输出均设置默认为Line 1)
int AudioInput::getAudioInput()
{
pFormatCtx_Audio = NULL;
pAudioInputFmt = NULL;
int AudioIndex = -1;
//查找输入方式
pAudioInputFmt = av_find_input_format("dshow");
//以Direct Show的方式打开设备,并将 输入方式 关联到格式上下文
//char * psDevName = dup_wchar_to_utf8(L"audio=麦克风 (Realtek High Definition Au"); //麦克风输入
char *pDevName = "audio=Line 1 (Virtual Audio Cable)";
avformat_open_input(&pFormatCtx_Audio, pDevName, pAudioInputFmt, NULL); //获取输入内容的上下文
if (avformat_find_stream_info(pFormatCtx_Audio, NULL)<0)
return -1;
for (int i = 0; inb_streams; i++)
{
if (pFormatCtx_Audio->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
AudioIndex = i;
AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio->streams[i]->codec->codec_id);
if (0 > avcodec_open2(pFormatCtx_Audio->streams[i]->codec, tmpCodec, NULL))
{
printf("can not find or open decoder!\n");
}
break;
}
}
av_dump_format(pFormatCtx_Audio, 0, NULL, 0); //打印音频相关信息
return 0;
}