方法概要:先读取camera支持的mode,然后用ffmpeg dshow打开camera并解码压缩流
一. 用相关代码读取本地camera当前支持的输出mode,方便后续选择mode来打开camera:
bool EnumerateCameras(vector
{
camIdx.clear();
struct CapDriver {
int enumValue; string enumName; string comment;
};
// list of all CAP drivers (see highgui_c.h)
vector
drivers.push_back({ CV_CAP_MIL, "CV_CAP_MIL", "MIL proprietary drivers" });
drivers.push_back({ CV_CAP_VFW, "CV_CAP_VFW", "platform native" });
drivers.push_back({ CV_CAP_FIREWARE, "CV_CAP_FIREWARE", "IEEE 1394 drivers" });
drivers.push_back({ CV_CAP_STEREO, "CV_CAP_STEREO", "TYZX proprietary drivers" });
drivers.push_back({ CV_CAP_QT, "CV_CAP_QT", "QuickTime" });
drivers.push_back({ CV_CAP_UNICAP, "CV_CAP_UNICAP", "Unicap drivers" });
drivers.push_back({ CV_CAP_DSHOW, "CV_CAP_DSHOW", "DirectShow (via videoInput)" });
drivers.push_back({ CV_CAP_MSMF, "CV_CAP_MSMF", "Microsoft Media Foundation (via videoInput)" });
drivers.push_back({ CV_CAP_PVAPI, "CV_CAP_PVAPI", "PvAPI, Prosilica GigE SDK" });
drivers.push_back({ CV_CAP_OPENNI, "CV_CAP_OPENNI", "OpenNI (for Kinect)" });
drivers.push_back({ CV_CAP_OPENNI_ASUS, "CV_CAP_OPENNI_ASUS", "OpenNI (for Asus Xtion)" });
drivers.push_back({ CV_CAP_ANDROID, "CV_CAP_ANDROID", "Android" });
drivers.push_back({ CV_CAP_ANDROID_BACK, "CV_CAP_ANDROID_BACK", "Android back camera" }),
drivers.push_back({ CV_CAP_ANDROID_FRONT, "CV_CAP_ANDROID_FRONT", "Android front camera" }),
drivers.push_back({ CV_CAP_XIAPI, "CV_CAP_XIAPI", "XIMEA Camera API" });
drivers.push_back({ CV_CAP_AVFOUNDATION, "CV_CAP_AVFOUNDATION", "AVFoundation framework for iOS" });
drivers.push_back({ CV_CAP_GIGANETIX, "CV_CAP_GIGANETIX", "Smartek Giganetix GigEVisionSDK" });
drivers.push_back({ CV_CAP_INTELPERC, "CV_CAP_INTELPERC", "Intel Perceptual Computing SDK" });
std::string winName, driverName, driverComment;
int driverEnum;
Mat frame;
bool found;
qDebug() << "Searching for cameras IDs..." << endl << endl;
for (int drv = 0; drv < drivers.size(); drv++)
{
driverName = drivers[drv].enumName;
driverEnum = drivers[drv].enumValue;
driverComment = drivers[drv].comment;
qDebug() << "Testing driver " << QString(driverName.c_str()) << "...";
found = false;
int maxID = 100; //100 IDs between drivers
if (driverEnum == CV_CAP_VFW)
maxID = 10; //VWF opens same camera after 10 ?!?
else if (driverEnum == CV_CAP_ANDROID)
maxID = 98; //98 and 99 are front and back cam
else if ((driverEnum == CV_CAP_ANDROID_FRONT) || (driverEnum == CV_CAP_ANDROID_BACK))
maxID = 1;
for (int idx = 0; idx < maxID; idx++)
{
VideoCapture cap(driverEnum + idx); // open the camera
if (cap.isOpened()) // check if we succeeded
{
found = true;
camIdx.push_back(driverEnum + idx); // vector of all available cameras
cap >> frame;
if (frame.empty())
qDebug() << QString(driverName.c_str()) << "+" << idx << "\t opens: OK \t grabs: FAIL";
else
qDebug() << QString(driverName.c_str()) << "+" << idx << "\t opens: OK \t grabs: OK";
// display the frame
// imshow(driverName + "+" + to_string(idx), frame); waitKey(1);
}
cap.release();
}
if (!found) qDebug() << "Nothing !" << endl;
qDebug() << endl;
}
qDebug() << camIdx.size() << " camera IDs has been found ";
qDebug() << "Press a key..." << endl; cin.get();
return (camIdx.size() > 0); // returns success
}
二. 直接使用ffmpeg库的dshow来打开camera
//1. 设置摄像头参数并打开,通过dshow从camera中读取h264流。如果是低分辨率可以直接读取yuv,如果中等分辨率可用MJPG,如果是高分辨率,选择使用h264,以确保camera能有足够带宽输出Raw或压缩数据。设置camera
avdevice_register_all();
AVCodecID id = AV_CODEC_ID_H264;
AVInputFormat* input_format = av_find_input_format("dshow");
AVFormatContext* format_context = avformat_alloc_context();
format_context->video_codec_id = id;
AVDictionary* dict = nullptr;
char resolution[128], fps[128], deviceIndex[128];
sprintf(resolution, "%dx%d", CAP_WIDTH, CAP_HEIGHT);
sprintf(fps, "%d", CAP_FPS);
sprintf(deviceIndex, "%d", count);
if (av_dict_set(&dict, "vcodec", "h264", 0) < 0) fprintf(stderr, "fail to av_dict_set: line: %d\n", __LINE__);
if (av_dict_set(&dict, "video_size", resolution, 0) < 0) fprintf(stderr, "fail to av_dict_set: line: %d\n", __LINE__);
if (av_dict_set(&dict, "r", fps, 0) < 0) fprintf(stderr, "fail to av_dict_set: line: %d\n", __LINE__);
if (av_dict_set(&dict, "video_device_number", deviceIndex, 0) < 0) fprintf(stderr, "fail to av_dict_set: line: %d\n", __LINE__);
if (av_dict_set(&dict, "rtbufsize", "10M", 0) < 0) fprintf(stderr, "fail to av_dict_set: line: %d\n", __LINE__);
int ret = avformat_open_input(&format_context, "video=HD Pro Webcam C920", input_format, &dict);
if (ret != 0) {
fprintf(stderr, "fail to avformat_open_input: %d\n", ret);
return;
}
ret = avformat_find_stream_info(format_context, nullptr);
if (ret < 0) {
fprintf(stderr, "fail to get stream information: %d\n", ret);
return;
}
int video_stream_index = -1;
for (unsigned int i = 0; i < format_context->nb_streams; ++i) {
const AVStream* stream = format_context->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
fprintf(stdout, "type of the encoded data: %d, dimensions of the video frame in pixels: width: %d, height: %d, pixel format: %d\n",
stream->codecpar->codec_id, stream->codecpar->width, stream->codecpar->height, stream->codecpar->format);
}
}
if (video_stream_index == -1) {
fprintf(stderr, "no video stream\n");
return;
}
fprintf(stdout, "frame rate: %f\n", av_q2d(format_context->streams[video_stream_index]->r_frame_rate));
// 2. 初始化 h264编解码器
AVCodec* encoder_id = avcodec_find_encoder(id);
AVCodec* decoder_id = avcodec_find_decoder(id);
if (!encoder_id || !decoder_id) {
fprintf(stderr, "codec not found: %d\n", id);
return;
}
AVCodecParameters* codecpar = format_context->streams[video_stream_index]->codecpar;
const AVCodec* codec = avcodec_find_decoder(codecpar->codec_id);
if (!codec) {
fprintf(stderr, "fail to avcodec_find_decoder\n");
return ;
}
if (codecpar->codec_id != id) {
fprintf(stderr, "this test code only support mjpeg encode: %d\n", codecpar->codec_id);
return;
}
AVCodecContext* codec_context = avcodec_alloc_context3(codec);
if (!codec_context) {
fprintf(stderr, "fail to avcodec_alloc_context3\n");
return ;
}
codec_context->pix_fmt = AVPixelFormat(codecpar->format);
codec_context->height = codecpar->height;
codec_context->width = codecpar->width;
codec_context->thread_count = 16;
ret = avcodec_open2(codec_context, codec, nullptr);
if (ret != 0) {
fprintf(stderr, "fail to avcodec_open2: %d\n", ret);
return;
}
AVPixelFormat dst_pixel_format = AV_PIX_FMT_BGR24;
AVFrame* frame = av_frame_alloc();
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
SwsContext* sws_context = sws_getContext(codec_context->width, codec_context->height, codec_context->pix_fmt,
codec_context->width, codec_context->height, dst_pixel_format, 0,
nullptr, nullptr, nullptr);
if (!frame || !packet || !sws_context) {
fprintf(stderr, "fail to alloc\n");
return;
}
uint8_t* bgr_data[4];
int bgr_linesize[4];
av_image_alloc(bgr_data, bgr_linesize, codec_context->width, codec_context->height, dst_pixel_format, 1);
cv::Mat mat(codec_context->height, codec_context->width, CV_8UC3);
const char* winname = deviceIndex;
cv::namedWindow(winname);
while (captureEnabled) {
ret = av_read_frame(format_context, packet);
if (ret >= 0 && packet->stream_index == video_stream_index && packet->size > 0) {
//fprintf(stderr, "avcodec_send_packet: pts:%lld, dts:%lld\n", packet->pts, packet->dts);
ret = avcodec_send_packet(codec_context, packet);
if (ret < 0) {
fprintf(stderr, "##### fail to avcodec_send_packet: %d\n", ret);
av_packet_unref(packet);
continue;
}
ret = avcodec_receive_frame(codec_context, frame);
if (ret < 0) {
fprintf(stderr, "##### fail to avcodec_receive_frame: %d\n", ret);
av_packet_unref(packet);
continue;
}
sws_scale(sws_context, frame->data, frame->linesize, 0, codec_context->height, bgr_data, bgr_linesize);
mat.data = bgr_data[0];
cv::imshow(winname, mat);
int waitTimeMs = 1000 / CAP_FPS - 10; //encode consume 10 ms
cv::waitKey(5);
cv::Mat img_dst;
cv::resize(mat, img_dst, cv::Size(CAP_WIDTH/8, CAP_HEIGHT/8), 0, 0, cv::INTER_LINEAR);
QImage image = Mat2QImageNew(img_dst);
emit thread_update_image(count, image);
}else if (ret < 0 || packet->size <= 0) {
fprintf(stderr, "##### fail to av_read_frame: %d, packet size: %d\n", ret, packet->size);
continue;
}
av_packet_unref(packet);
//int key = cv::waitKey(30);
//if (key == 27) break;
}
if (!captureEnabled) {
cv::destroyWindow(winname);
sws_freeContext(sws_context);
av_frame_free(&frame);
av_freep(packet);
av_freep(&bgr_data[0]);
avformat_close_input(&format_context);
av_dict_free(&dict);
}