//Example 2-1. A simple OpenCV program that loads an image from disk and displays it
//on the screen
#include
// 帮助信息
void help(char** argv ) {
std::cout << "\n"
<< "A simple OpenCV program that loads and displays an image from disk\n"
<< argv[0] <<" \n"
<< "For example:\n"
<< argv[0] << " ../fruits.jpg\n"
<< std::endl;
}
int main( int argc, char** argv ) {
if (argc != 2) {
help(argv);
return 0;
}
cv::Mat img = cv::imread( argv[1], -1 ); //读取图片
if( img.empty() ) return -1; // 判读读取的图片是否为空
cv::namedWindow( "Example 2-1", cv::WINDOW_AUTOSIZE ); // 创建自适应大小的窗口
cv::imshow( "Example 2-1", img ); // 在窗口下显示图片
cv::waitKey( 0 ); // 使程序暂停,等待用户触发一个按键操作
cv::destroyWindow( "Example 2-1" ); //销毁图片文件窗口
return 0;
}
argv[1]:命令行参数,所指示的文件是否存在,若不存在,向用户发送一条错误提示信息。
namedWindow() 函数由HighGUI库提供,用于在屏幕上创建一个窗口,将被显示的图像包含于该窗口中。
imshow() 函数用于显示图像,第一个参数确定在已存在的窗口中显示图像,第二个参数传要显示的图片矩阵。
waitKey()函数用于使程序暂停,等待用户触发一个按键操作:
destroyWindow() 函数用于销毁消失图像文件的窗口,并同时释放窗口所分配的所有内存。
// Example 2-3. A simple OpenCV program for playing a video file from disk
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include
using namespace std;
void help(char** argv ) {
std::cout << "\n"
<< "2-03: play video from disk \n"
<< argv[0] <<" \n"
<< "For example:\n"
<< argv[0] << " ../tree.avi\n"
<< std::endl;
}
int main( int argc, char** argv ) {
if (argc != 2) {
help(argv);
return 0;
}
cv::namedWindow( "Example 2-3", cv::WINDOW_AUTOSIZE );
cv::VideoCapture cap;
cap.open( string(argv[1]) );
cout <<"Opened file: " <<argv[1] <<endl;
cv::Mat frame;
for(;;) {
cap >> frame;
if( frame.empty() ) break; // Ran out of film
cv::imshow( "Example 2-3", frame );
if( (char)cv::waitKey(33) >= 0 ) break;
// int c = cv::waitKey(33);
// for(int i=0;i<32;i++) {
// cout <<((c&(0x1<<(31-i)))?1:0);
// }
// cout <
// cout <<"Break key: '" <<(int)c <<"'"<
// if( (signed char)c >= 0 ) {
// break;
// }
}
return 0;
}
main函数从创建一个窗口开始,视频读取结构cv::VideoCapture在其后被加载进来,这个结构可以打开和关闭很多类型的ffmpeg支持的视频文件。
cap.open(string(argv[1])); cv: :Mat frame;
视频读取结构通过传人字符串打开文件,这个字符串指示了想要打开的视频文件的路径。一旦视频被打开,视频读取结构将会包含所有的关于这个视频文件可以读取的属性,包括状态信息。以这样的方式创建以后cv::VideoCapture结构将会在视频的开头被初始化。在这个程序中,cv::Mat frame声明了一个可以保存视频帧的结构。
cap >> frame; if( frame.empty( ) break; cv: :imshow(“Example3”, frame );
一旦内部的while()循环开始执行,视频文件会按照帧从视频流中被读取。这个程序通过if(frame.empty())检查数据是不是真的从视频中读了出来,如果没有,程序将会退出。如果视频帧被成功读取,将通过cv: :imshow()显示。
if( cv : :waitKey(33) >= o ) break;
一旦显示了这帧图片,我们会等待33毫秒。“如果用户在这段时间在键盘有任何输入,我们将退出循环。如果没有发生,33毫秒之后我们会跳过并执行下一个循环。在退出的时候,所有数据的内存空间将会由于生命周期的结束被自动释放掉。
//Example 2-4. Adding a trackbar slider to the basic viewer window for moving around
//within the video file
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include
#include
using namespace std;
int g_slider_position = 0;
int g_run = 1, g_dontset = 0; //start out in single step mode
cv::VideoCapture g_cap;
void onTrackbarSlide(int pos, void*) {
g_cap.set(CV_CAP_PROP_POS_FRAMES, pos);
if (!g_dontset) g_run = 1;
g_dontset = 0;
}
void help(char** argv) {
std::cout << "\n"
<< "2-04: Addeing a trackbar to a basic viewer for moving w/in the video file \n"
<< argv[0] << " \n"
<< "For example:\n"
<< argv[0] << " ../tree.avi\n"
<< std::endl;
}
int main2_4(int argc, char** argv) {
/* if (argc != 2) {
help(argv);
return 0;
}*/
cv::namedWindow("Example 2-4", cv::WINDOW_AUTOSIZE);
g_cap.open(string("C:/Study/opencv/cvproject/Learning-OpenCV-3_examples/test.avi"));
int frames = (int)g_cap.get(CV_CAP_PROP_FRAME_COUNT);
int tmpw = (int)g_cap.get(CV_CAP_PROP_FRAME_WIDTH);
int tmph = (int)g_cap.get(CV_CAP_PROP_FRAME_HEIGHT);
cout << "Video has " << frames << " frames of dimensions("
<< tmpw << ", " << tmph << ")." << endl;
cv::createTrackbar(
"Position",
"Example 2-4",
&g_slider_position,
frames,
onTrackbarSlide
);
cv::Mat frame;
for (;;) {
if (g_run != 0) {
g_cap >> frame;
if (frame.empty()) break;
int current_pos = (int)g_cap.get(CV_CAP_PROP_POS_FRAMES);
g_dontset = 1;
cv::setTrackbarPos("Position", "Example 2-4", current_pos);
cv::imshow("Example 2-4", frame);
g_run -= 1;
}
char c = (char)cv::waitKey(10);
if (c == 's') { // single step
g_run = 1;
cout << "Single step, run = " << g_run << endl;
}
if (c == 'r') { // run mode
g_run = -1;
cout << "Run mode, run = " << g_run << endl;
}
if (c == 27) break;
}
return(0);
}
本质上,代码中添加一个全局变量来表示滑动条的位置并且添加一个回调函数来更改这个变量来重新定位视频读取的位置。在创建滑动条和回调函数之后,我们就开始运行程序,让我们从全局变量开始更详细地查看整个程序。
int g_slider_position = o;
int g_run = 1;
int g_dontset =0; // start out in single-step mode
videoCapture g_cap;
在程序的开始处,我们定义了一个全局变量g_slider_position来存储滑动条位置。由于回调函数需要访问帧读取结构g_cap,所以我们最好将这个结构也提升为全局变量。
为了保证代码的易读性。我们会在每一个全局变量前加上g_前缀以表明这是一个全局变量。同理,全局变量g_run将在新的跳转触发之后置0。当它为正的时候,指示在停止之前程序要播放多少张图片﹔当它为负的时候,表示系统处于连续视频播放模式。
为了防止冲突,在用户单击了滑动条跳转到一个视频的新位置之后,我们将会通过设置g_run变量为1使视频进入单步模式。即使是这样,也存在一个小问题:当视频前进的时候,我们希望滑动条也能够随着视频的当前进度前进。我们通过在程序中调用滑动条的回调函数实现这一功能。但是,我们并不希望在这个时候进入单步模式。为了避免这样的事情发生,我们引入最后一个全局变量g_dontset来避免在调整进度条位置的时候触
发单步模式。
void onTrackbarSlide(int pos, void*) {
g_cap.set(CV_CAP_PROP_POS_FRAMES, pos);
if (!g_dontset) g_run = 1;
g_dontset = 0;
}
int frames = (int)g_cap.get(cv::CAP_PROP_FRAME_COUNT);
int tmpw= (int)g_cap.get(cv::CAP_PROP_FRAME_WIDTH);
int tmph= (int) g_cap.get(cv::CAP_PROP_FRAME_HEIGHT);
cout <<"video has " <<'frames << " frames of dimensions("<< tmpw << ", " << tmph << ")." <
if (g_run != 0) {
g_cap >> frame;
if (frame.empty()) break;
int current_pos = (int)g_cap.get(CV_CAP_PROP_POS_FRAMES);
g_dontset = 1;
cv::setTrackbarPos("Position", "Example 2-4", current_pos);
cv::imshow("Example 2-4", frame);
g_run -= 1;
}
char c = (char)cv::waitKey(10);
if (c == 's') { // single step
g_run = 1;
cout << "Single step, run = " << g_run << endl;
}
if (c == 'r') { // run mode
g_run = -1;
cout << "Run mode, run = " << g_run << endl;
}
if (c == 27) break;
}
// Example 2-5. Loading and then smoothing an image before it is displayed on the screen
#include
void help(char** argv) {
std::cout << "\n"
<< "2-05: load and smooth an image before displaying \n"
<< argv[0] << " \n"
<< "For example:\n"
<< argv[0] << " ../tree.avi\n"
<< std::endl;
}
int main(int argc, char** argv) {
//if (argc != 2) {
// help(argv);
// return 0;
//}
// Load an image specified on the command line.
//
cv::Mat image = cv::imread("C:/Study/opencv/cvproject/testopencvinstall/test.jpg", -1);
// Create some windows to show the input
// and output images in.
//
cv::namedWindow("Example 2-5-in", cv::WINDOW_AUTOSIZE);
cv::namedWindow("Example 2-5-out", cv::WINDOW_AUTOSIZE);
// Create a window to show our input image
//
cv::imshow("Example 2-5-in", image);
// Create an image to hold the smoothed output
//
cv::Mat out;
// Do the smoothing
// ( Note: Could use GaussianBlur(), blur(), medianBlur() or
// bilateralFilter(). )
//
cv::GaussianBlur(image, out, cv::Size(5, 5), 3, 3);
cv::GaussianBlur(out, out, cv::Size(5, 5), 3, 3);
// Show the smoothed image in the output window
//
cv::imshow("Example 2-5-out", out);
// Wait for the user to hit a key, windows will self destruct
//
cv::waitKey(0);
}
// Example 2-6. Using cv::pyrDown() to create a new image that is half the width and
// height of the input image
#include
void help(char** argv ) {
std::cout << "\n"
<< "2-06: AUsing cv::pyrDown() to create a new image that is half the width and"
<< " height of the input image\n"
<< argv[0] <<" \n"
<< "For example:\n"
<< argv[0] << " ../faces.png\n"
<< std::endl;
}
int main( int argc, char** argv ) {
if (argc != 2) {
help(argv);
return 0;
}
cv::Mat img1,img2;
cv::namedWindow( "Example 2-6-in", cv::WINDOW_AUTOSIZE );
cv::namedWindow( "Example 2-6-out", cv::WINDOW_AUTOSIZE );
img1 = cv::imread( argv[1] );
cv::imshow( "Example 2-6-in", img1 );
cv::pyrDown( img1, img2);
cv::imshow( "Example 2-6-out", img2 );
cv::waitKey(0);
return 0;
};
在OpenCV中,高斯模糊以及降采样通过cv::pyrDown()函数来实现将图像尺寸行和列方向缩减一半
Canny边缘检测器输出一个单通道的(灰度)图像
// Example 2-7. The Canny edge detector writes its output to a single-channel (grayscale) image
// 2
#include
void help(char** argv ) {
std::cout << "\n"
<< "\nExample 2-7. The Canny edge detector writes its output to a single-channel (grayscale) image"
<< "\nCall:\n"
<< argv[0] <<" \n"
<< "For example:\n"
<< argv[0] << " ../fruits.jpg\n"
<< std::endl;
}
int main( int argc, char** argv ) {
if (argc != 2) {
help(argv);
return 0;
}
cv::Mat img_rgb, img_gry, img_cny;
cv::namedWindow( "Example Gray", cv::WINDOW_AUTOSIZE );
cv::namedWindow( "Example Canny", cv::WINDOW_AUTOSIZE );
img_rgb = cv::imread( argv[1] );
cv::cvtColor( img_rgb, img_gry, cv::COLOR_BGR2GRAY);
cv::imshow( "Example Gray", img_gry );
cv::Canny( img_gry, img_cny, 10, 100, 3, true );
cv::imshow( "Example Canny", img_cny );
cv::waitKey(0);
}
// Example 2-8. Combining the pyramid down operator (twice) and the Canny
// subroutine in a simple image pipeline
//2
#include
void help(char** argv ) {
std::cout << "\n"
<< "\nExample 2-8. Combining the pyramid down operator (twice) and the Canny"
<< "\n subroutine in a simple image pipeline"
<< "\nCall:\n"
<< argv[0] <<" \n"
<< "For example:\n"
<< argv[0] << " ../fruits.jpg\n"
<< std::endl;
}
int main( int argc, char** argv ) {
if (argc != 2) {
help(argv);
return 0;
}
cv::Mat img_rgb, img_gry, img_cny, img_pyr, img_pyr2;
cv::namedWindow( "Example Gray", cv::WINDOW_AUTOSIZE );
cv::namedWindow( "Example Canny", cv::WINDOW_AUTOSIZE );
img_rgb = cv::imread( argv[1] );
cv::cvtColor( img_rgb, img_gry, cv::COLOR_BGR2GRAY);
cv::pyrDown( img_gry, img_pyr );
cv::pyrDown( img_pyr, img_pyr2 );
cv::Canny( img_pyr2, img_cny, 10, 100, 3, true );
cv::imshow( "Example Gray", img_gry );
cv::imshow( "Example Canny", img_cny );
cv::waitKey(0);
}
// Example 2-9. Getting and setting pixels in Example 2-8
#include
void help(char** argv ) {
std::cout << "\n"
<< "\nExample 2-9. Getting and setting pixels in Example 2-8"
<< "\nCall:\n"
<< argv[0] <<" \n"
<< "For example:\n"
<< argv[0] << " ../fruits.jpg\n"
<< std::endl;
}
int main( int argc, char** argv ) {
if (argc != 2) {
help(argv);
return 0;
}
cv::Mat img_rgb, img_gry, img_cny, img_pyr, img_pyr2;
cv::namedWindow( "Example Gray", cv::WINDOW_AUTOSIZE );
cv::namedWindow( "Example Canny", cv::WINDOW_AUTOSIZE );
img_rgb = cv::imread( argv[1] );
cv::cvtColor( img_rgb, img_gry, cv::COLOR_BGR2GRAY);
cv::pyrDown( img_gry, img_pyr );
cv::pyrDown( img_pyr, img_pyr2 );
cv::Canny( img_pyr2, img_cny, 10, 100, 3, true );
// ----------------------------------------------------
// Start new code for example 2-9
//
int x = 16, y = 32;
cv::Vec3b intensity = img_rgb.at< cv::Vec3b >(y, x);
// ( Note: We could write img_rgb.at< cv::Vec3b >(x,y)[0] )
//
uchar blue = intensity[0];
uchar green = intensity[1];
uchar red = intensity[2];
std::cout << "At (x,y) = (" << x << ", " << y <<
"): (blue, green, red) = (" <<
(unsigned int) blue <<
", " << (unsigned int)green << ", " <<
(unsigned int) red << ")" << std::endl;
std::cout << "Gray pixel there is: " <<
(unsigned int) img_gry.at<uchar>(y, x) << std::endl;
x /= 4; y /= 4;
std::cout << "Pyramid2 pixel there is: " <<
(unsigned int)img_pyr2.at<uchar>(y, x) << std::endl;
img_cny.at<uchar>(x, y) = 128; // Set the Canny pixel there to 128
//
// End new code for example 2-9
// ----------------------------------------------------
cv::imshow( "Example Gray", img_gry );
cv::imshow( "Example Canny", img_cny );
cv::waitKey(0);
}
// Example 2-10. The same object can load videos from a camera or a file
//
#include
#include
void help(char** argv ) {
std::cout << "\n"
<< "\nxample 2-10. The same object can load videos from a camera or a file"
<< "\nCall:\n"
<< argv[0] <<" [path/image]\n"
<< "\nor, read from camera:\n"
<< argv[0]
<< "\nFor example:\n"
<< argv[0] << " ../tree.avi\n"
<< std::endl;
}
int main( int argc, char** argv ) {
help(argv);
cv::namedWindow( "Example 2-10", cv::WINDOW_AUTOSIZE );
cv::VideoCapture cap;
if (argc==1) {
cap.open(0); // open the first camera
} else {
cap.open(argv[1]);
}
if( !cap.isOpened() ) { // check if we succeeded
std::cerr << "Couldn't open capture." << std::endl;
return -1;
}
cv::Mat frame;
for(;;) {
cap >> frame;
if( frame.empty() ) break; // Ran out of film
cv::imshow( "Example 2-10", frame );
if( (char) cv::waitKey(33) >= 0 ) break;
}
return 0;
}
// Example 2-11. A complete program to read in a color video and write out the log-polar-
// transformed video
#include
#include
void help(char** argv ) {
std::cout << "\n"
<< "Read in a video, write out a log polar of it\n"
<< argv[0] <<" \n"
<< "For example:\n"
<< argv[0] << " ../tree.avi ../vout.avi\n"
<< "\nThen read it with:\n ./example_02-10 ../vout.avi\n"
<< std::endl;
}
int main( int argc, char** argv ) {
if (argc != 3) {
help(argv);
return 0;
}
cv::namedWindow( "Example 2-11", cv::WINDOW_AUTOSIZE );
cv::namedWindow( "Log_Polar", cv::WINDOW_AUTOSIZE );
// ( Note: could capture from a camera by giving a camera id as an int.)
//
cv::VideoCapture capture( argv[1] );
double fps = capture.get( CV_CAP_PROP_FPS );
cv::Size size(
(int)capture.get( CV_CAP_PROP_FRAME_WIDTH ),
(int)capture.get( CV_CAP_PROP_FRAME_HEIGHT )
);
cv::VideoWriter writer;
writer.open( argv[2], CV_FOURCC('M','J','P','G'), fps, size );
cv::Mat logpolar_frame, bgr_frame;
for(;;) {
capture >> bgr_frame;
if( bgr_frame.empty() ) break; // end if done
cv::imshow( "Example 2-11", bgr_frame );
cv::logPolar(
bgr_frame, // Input color frame
logpolar_frame, // Output log-polar frame
cv::Point2f( // Centerpoint for log-polar transformation
bgr_frame.cols/2, // x
bgr_frame.rows/2 // y
),
40, // Magnitude (scale parameter)
CV_WARP_FILL_OUTLIERS // Fill outliers with 'zero'
);
cv::imshow( "Log_Polar", logpolar_frame );
writer << logpolar_frame;
char c = cv::waitKey(10);
if( c == 27 ) break; // allow the user to break out
}
writer.release();
capture.release();
}
这个程序里面有很多我们非常熟悉的元素。首先是打开视频并且读取一些在cv::Videowriter建立时用到的属性(每秒播放的帧数以及图像宽高)。在此之后,我们将会从cv::VideoReader中逐帧读取视频,并将每一帧转换为对数极坐标形式,然后将转换的对数极坐标图像写入新的视频文件,循环操作直到源文件读完或者用户按下Esc键。
cv::Videowriter的调用有几个需要理解的参数。第一个参数是新建视频文件的文件名,第二个参数是视频编码方式,指明视频将以何种方式进行压缩。现在有非常多的编码器可以选择,但是选择的任何编码器都必须确保可以在你的机器上使用(编码器是与OpenCV分开安装的)。在我们的例子中,我们选择了通用的MJPG编码器,我们通过使用OpenCV所提供的的宏CV_FOURCC()指定它,这个宏将四个字符作为参数,所有编码器都有类似的四个字符作为其标识。比如本例中用于运动jpeg图像编码的字符为“MJPG”,所以我们指定CV_FOURCC(‘W’,’’,‘P’,‘G’)。接下来的参数是帧率以及图像的大小。在我们的例子中,这两个值和原始视频一致。