参考:https://docs.opencv.org/3.2.0/
参考:https://docs.opencv.org/3.2.0/da/d6a/tutorial_trackbar.html
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include
using namespace cv;
const int alpha_slider_max = 100;
int alpha_slider;
double alpha;
double beta;
Mat src1;
Mat src2;
Mat dst;
static void on_trackbar( int, void* )
{
alpha = (double) alpha_slider/alpha_slider_max ;
beta = ( 1.0 - alpha );
addWeighted( src1, alpha, src2, beta, 0.0, dst);
imshow( "Linear Blend", dst );
}
int main( void )
{
src1 = imread("../data/LinuxLogo.jpg");
src2 = imread("../data/WindowsLogo.jpg");
if( src1.empty() ) { printf("Error loading src1 \n"); return -1; }
if( src2.empty() ) { printf("Error loading src2 \n"); return -1; }
alpha_slider = 0;
namedWindow("Linear Blend", WINDOW_AUTOSIZE); // Create Window
char TrackbarName[50];
sprintf( TrackbarName, "Alpha x %d", alpha_slider_max );
createTrackbar( TrackbarName, "Linear Blend", &alpha_slider, alpha_slider_max, on_trackbar );
on_trackbar( alpha_slider, 0 );
waitKey(0);
return 0;
}
参考:http://blog.csdn.net/wc781708249/article/details/78501484
参考:http://blog.csdn.net/wc781708249/article/details/78296147
import cv2
import numpy as np
def nothing(x):
pass
# Create a black image, a window
img = np.zeros((300,512,3), np.uint8)
cv2.namedWindow('image')
# create trackbars for color change
cv2.createTrackbar('R','image',0,255,nothing) # 取值范围 0~255
cv2.createTrackbar('G','image',0,255,nothing)
cv2.createTrackbar('B','image',0,255,nothing)
# create switch for ON/OFF functionality
switch = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch, 'image',0,1,nothing) # 0~1 范围
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# get current positions of four trackbars
r = cv2.getTrackbarPos('R','image')
g = cv2.getTrackbarPos('G','image')
b = cv2.getTrackbarPos('B','image')
s = cv2.getTrackbarPos(switch,'image')
if s == 0:
img[:] = 0
else:
img[:] = [b,g,r]
cv2.destroyAllWindows()
参考:http://blog.csdn.net/wc781708249/article/details/78479584
1、如何打开和读视频流
2、两种检查图像相似性的方法:PSNR和SSIM
参考:
https://docs.opencv.org/3.2.0/d5/dc4/tutorial_video_input_psnr_ssim.html
#include // for standard I/O
#include // for strings
#include // for controlling float print precision
#include // string to number conversion
#include // Basic OpenCV structures (cv::Mat, Scalar)
#include // Gaussian Blur
#include
#include // OpenCV window I/O
using namespace std;
using namespace cv;
double getPSNR(const Mat& I1, const Mat& I2);
Scalar getMSSIM(const Mat& I1, const Mat& I2);
static void help()
{
cout
<< "------------------------------------------------------------------------------" << endl
<< "This program shows how to read a video file with OpenCV. In addition, it "
<< "tests the similarity of two input videos first with PSNR, and for the frames "
<< "below a PSNR trigger value, also with MSSIM." << endl
<< "Usage:" << endl
<< "./video-input-psnr-ssim " << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[])
{
help();
if (argc != 5)
{
cout << "Not enough parameters" << endl;
return -1;
}
stringstream conv;
const string sourceReference = argv[1], sourceCompareWith = argv[2];
int psnrTriggerValue, delay;
conv << argv[3] << endl << argv[4]; // put in the strings
conv >> psnrTriggerValue >> delay; // take out the numbers
int frameNum = -1; // Frame counter
VideoCapture captRefrnc(sourceReference), captUndTst(sourceCompareWith);
if (!captRefrnc.isOpened())
{
cout << "Could not open reference " << sourceReference << endl;
return -1;
}
if (!captUndTst.isOpened())
{
cout << "Could not open case test " << sourceCompareWith << endl;
return -1;
}
Size refS = Size((int)captRefrnc.get(CAP_PROP_FRAME_WIDTH),
(int)captRefrnc.get(CAP_PROP_FRAME_HEIGHT)),
uTSi = Size((int)captUndTst.get(CAP_PROP_FRAME_WIDTH),
(int)captUndTst.get(CAP_PROP_FRAME_HEIGHT));
if (refS != uTSi)
{
cout << "Inputs have different size!!! Closing." << endl;
return -1;
}
const char* WIN_UT = "Under Test";
const char* WIN_RF = "Reference";
// Windows
namedWindow(WIN_RF, WINDOW_AUTOSIZE);
namedWindow(WIN_UT, WINDOW_AUTOSIZE);
moveWindow(WIN_RF, 400, 0); //750, 2 (bernat =0)
moveWindow(WIN_UT, refS.width, 0); //1500, 2
cout << "Reference frame resolution: Width=" << refS.width << " Height=" << refS.height
<< " of nr#: " << captRefrnc.get(CAP_PROP_FRAME_COUNT) << endl;
cout << "PSNR trigger value " << setiosflags(ios::fixed) << setprecision(3)
<< psnrTriggerValue << endl;
Mat frameReference, frameUnderTest;
double psnrV;
Scalar mssimV;
for (;;) //Show the image captured in the window and repeat
{
captRefrnc >> frameReference;
captUndTst >> frameUnderTest;
if (frameReference.empty() || frameUnderTest.empty())
{
cout << " < < < Game over! > > > ";
break;
}
++frameNum;
cout << "Frame: " << frameNum << "# ";
psnrV = getPSNR(frameReference, frameUnderTest);
cout << setiosflags(ios::fixed) << setprecision(3) << psnrV << "dB";
if (psnrV < psnrTriggerValue && psnrV)
{
mssimV = getMSSIM(frameReference, frameUnderTest);
cout << " MSSIM: "
<< " R " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[2] * 100 << "%"
<< " G " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[1] * 100 << "%"
<< " B " << setiosflags(ios::fixed) << setprecision(2) << mssimV.val[0] * 100 << "%";
}
cout << endl;
imshow(WIN_RF, frameReference);
imshow(WIN_UT, frameUnderTest);
char c = (char)waitKey(delay);
if (c == 27) break;
}
return 0;
}
double getPSNR(const Mat& I1, const Mat& I2)
{
Mat s1;
absdiff(I1, I2, s1); // |I1 - I2|
s1.convertTo(s1, CV_32F); // cannot make a square on 8 bits
s1 = s1.mul(s1); // |I1 - I2|^2
Scalar s = sum(s1); // sum elements per channel
double sse = s.val[0] + s.val[1] + s.val[2]; // sum channels
if (sse <= 1e-10) // for small values return zero
return 0;
else
{
double mse = sse / (double)(I1.channels() * I1.total());
double psnr = 10.0 * log10((255 * 255) / mse);
return psnr;
}
}
Scalar getMSSIM(const Mat& i1, const Mat& i2)
{
const double C1 = 6.5025, C2 = 58.5225;
/***************************** INITS **********************************/
int d = CV_32F;
Mat I1, I2;
i1.convertTo(I1, d); // cannot calculate on one byte large values
i2.convertTo(I2, d);
Mat I2_2 = I2.mul(I2); // I2^2
Mat I1_2 = I1.mul(I1); // I1^2
Mat I1_I2 = I1.mul(I2); // I1 * I2
/*************************** END INITS **********************************/
Mat mu1, mu2; // PRELIMINARY COMPUTING
GaussianBlur(I1, mu1, Size(11, 11), 1.5);
GaussianBlur(I2, mu2, Size(11, 11), 1.5);
Mat mu1_2 = mu1.mul(mu1);
Mat mu2_2 = mu2.mul(mu2);
Mat mu1_mu2 = mu1.mul(mu2);
Mat sigma1_2, sigma2_2, sigma12;
GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
Mat t1, t2, t3;
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
t3 = t1.mul(t2); // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1.mul(t2); // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
Mat ssim_map;
divide(t3, t1, ssim_map); // ssim_map = t3./t1;
Scalar mssim = mean(ssim_map); // mssim = average of ssim map
return mssim;
}
import cv2
import numpy as np
#########################################
def getPSNR(I1,I2):
s1=cv2.absdiff(I1,I2) # |I1 - I2|
s1=np.float32(s1) # cannot make a square on 8 bits
s1=s1*s1 # |I1 - I2|^2
s=np.sum(s1)
if s<1e-10: # for small values return zero
return 0
else:
mse=s/I1.size
psnr=10.0 * np.log10((255 * 255) / mse)
return psnr
def getMSSIM(I1,I2):
C1 = 6.5025; C2 = 58.5225
d = cv2.CV_32F
I1 = np.float32(I1)
I2 = np.float32(I2)
I2_2=I2*I2
I1_2=I1*I1
I1_I2=I1*I2
mu1=cv2.GaussianBlur(I1,(11,11),1.5)
mu2 = cv2.GaussianBlur(I2, (11, 11), 1.5)
mu1_2=mu1*mu1
mu2_2=mu2*mu2
mu1_mu2=mu1*mu2
sigma1_2=cv2.GaussianBlur(I1_2, (11, 11), 1.5)
sigma1_2 -= mu1_2
sigma2_2=cv2.GaussianBlur(I2_2, (11, 11), 1.5)
sigma2_2 -= mu2_2
sigma12=cv2.GaussianBlur(I1_I2, (11, 11), 1.5)
sigma12 -= mu1_mu2
t1 = 2 * mu1_mu2 + C1
t2 = 2 * sigma12 + C2
t3 = t1*t2 # t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
t1 = t1*t2 # t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
ssim_map=cv2.divide(t3,t1) # ssim_map = t3./t1;
mssim = cv2.mean(ssim_map) # mssim = average of ssim map
return mssim
########################################
sourceReference="vtest.avi"
sourceCompareWith="vtest.avi"
psnrTriggerValue=10
delay=60
# 打开视频
captRefrnc = cv2.VideoCapture(sourceReference)
captUndTst = cv2.VideoCapture(sourceCompareWith)
if not captRefrnc.isOpened():
print("Could not open reference", sourceReference);exit(-1)
if not captUndTst.isOpened():
print("Could not open case test", sourceCompareWith);exit(-1)
# 比较视频尺寸
w1=int(captRefrnc.get(cv2.CAP_PROP_FRAME_WIDTH)) # 或 w1=captRefrnc.get(3) 宽
h1=int(captRefrnc.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 或 h1=captRefrnc.get(4) 高
# 补充 设置视频高 ret=cap.set(4,240)
w2=int(captUndTst.get(cv2.CAP_PROP_FRAME_WIDTH))
h2=int(captUndTst.get(cv2.CAP_PROP_FRAME_HEIGHT))
if w1!=w2 or h1!=h2:
print("Inputs have different size!!! Closing.");exit(-1)
# Windows
WIN_UT = "Under Test"
WIN_RF = "Reference"
cv2.namedWindow(WIN_RF,cv2.WINDOW_AUTOSIZE)
cv2.namedWindow(WIN_UT,cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(WIN_RF, 400, 0)
cv2.moveWindow(WIN_UT, w1, 0)
print("Reference frame resolution: Width=",w1," Height=",h1," of nr#: ",
captRefrnc.get(cv2.CAP_PROP_FRAME_COUNT))
frameNum = -1
# Show the image captured in the window and repeat
while(1):
ret1,frameReference = captRefrnc.read()
ret2,frameUnderTest = captUndTst.read()
if not ret1 or not ret2:
print( "Game over!" )
break
frameNum+=1
print("Frame: ",frameNum,"# ")
psnrV = getPSNR(frameReference, frameUnderTest)
if psnrVand psnrV:
mssimV = getMSSIM(frameReference, frameUnderTest)
print(" MSSIM:"," R ",mssimV[2] * 100," G ",mssimV[1] * 100," B ",mssimV[0] * 100)
cv2.imshow(WIN_RF, frameReference)
cv2.imshow(WIN_UT, frameUnderTest)
# c = ord(cv2.waitKey(delay)) # 转ASCII
c=cv2.waitKey(delay) & 0xff
if c == 27:break
参考:https://docs.opencv.org/3.2.0/d7/d9e/tutorial_video_write.html
#include // for standard I/O
#include // for strings
#include // Basic OpenCV structures (cv::Mat)
#include // Video write
using namespace std;
using namespace cv;
static void help()
{
cout
<< "------------------------------------------------------------------------------" << endl
<< "This program shows how to write video files." << endl
<< "You can extract the R or G or B color channel of the input video." << endl
<< "Usage:" << endl
<< "./video-write [ R | G | B] [Y | N]" << endl
<< "------------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char *argv[])
{
help();
if (argc != 4)
{
cout << "Not enough parameters" << endl;
return -1;
}
const string source = argv[1]; // the source file name
const bool askOutputType = argv[3][0] =='Y'; // If false it will use the inputs codec type
VideoCapture inputVideo(source); // Open input
if (!inputVideo.isOpened())
{
cout << "Could not open the input video: " << source << endl;
return -1;
}
string::size_type pAt = source.find_last_of('.'); // Find extension point
const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the new name with container
int ex = static_cast<int>(inputVideo.get(CAP_PROP_FOURCC)); // Get Codec Type- Int form
// Transform from int to char via Bitwise operators
char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
Size S = Size((int) inputVideo.get(CAP_PROP_FRAME_WIDTH), // Acquire input size
(int) inputVideo.get(CAP_PROP_FRAME_HEIGHT));
VideoWriter outputVideo; // Open the output
if (askOutputType)
outputVideo.open(NAME, ex=-1, inputVideo.get(CAP_PROP_FPS), S, true);
else
outputVideo.open(NAME, ex, inputVideo.get(CAP_PROP_FPS), S, true);
if (!outputVideo.isOpened())
{
cout << "Could not open the output video for write: " << source << endl;
return -1;
}
cout << "Input frame resolution: Width=" << S.width << " Height=" << S.height
<< " of nr#: " << inputVideo.get(CAP_PROP_FRAME_COUNT) << endl;
cout << "Input codec type: " << EXT << endl;
int channel = 2; // Select the channel to save
switch(argv[2][0])
{
case 'R' : channel = 2; break;
case 'G' : channel = 1; break;
case 'B' : channel = 0; break;
}
Mat src, res;
vector spl;
for(;;) //Show the image captured in the window and repeat
{
inputVideo >> src; // read
if (src.empty()) break; // check if at end
split(src, spl); // process - extract only the correct channel
for (int i =0; i < 3; ++i)
if (i != channel)
spl[i] = Mat::zeros(S, spl[0].type());
merge(spl, res);
//outputVideo.write(res); //save or
outputVideo << res;
}
cout << "Finished writing" << endl;
return 0;
}
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
import os
source="./vtest.avi"
askOutputType =False
inputVideo=cv2.VideoCapture(source)
if not inputVideo.isOpened():
print("Could not open the input video: ");exit(-1)
pAt =source.split('/')[0]
NAME =os.path.join(pAt,"R"+".avi") # Form the new name with container
ex=int(inputVideo.get(cv2.CAP_PROP_FOURCC)) # Get Codec Type- Int form
# Transform from int to char via Bitwise operators
EXT=[(chr)(ex & 0XFF),(chr)((ex & 0XFF00) >> 8),(chr)((ex & 0XFF0000) >> 16),(chr)((ex & 0XFF000000) >> 24), 0]
S=(int(inputVideo.get(cv2.CAP_PROP_FRAME_WIDTH)), # Acquire input size
int(inputVideo.get(cv2.CAP_PROP_FRAME_HEIGHT)))
# outputVideo=cv2.VideoWriter()
if askOutputType:
# outputVideo.open(NAME, ex=-1)
outputVideo = cv2.VideoWriter(NAME, -1, inputVideo.get(cv2.CAP_PROP_FPS), S)
else:
# outputVideo.open(NAME, ex)
outputVideo = cv2.VideoWriter(NAME, ex, inputVideo.get(cv2.CAP_PROP_FPS), S)
if not outputVideo.isOpened():
print("Could not open the output video for write: ",source)
exit(-1)
print("Input frame resolution: Width=",S[0]," Height=",S[1]," of nr#: ",
inputVideo.get(cv2.CAP_PROP_FRAME_COUNT))
print("Input codec type: ",EXT)
channel = 2
while(1):
ret,src=inputVideo.read()
if not ret:break
spl=cv2.split(src)
for i in range(3):
if i !=channel:
spl[i]=np.zeros(spl[0].shape[:2],spl[0].dtype)
res=cv2.merge(spl)
# outputVideo.write(res); //save or
outputVideo.write(res)
inputVideo.release()
outputVideo.release()
cv2.destroyAllWindows()
print("Finished writing")
exit(0)
参考:http://blog.csdn.net/wc781708249/article/details/78528920#4221-%E8%AE%BE%E7%BD%AE-findchessboardcornerspy
# -*- coding: utf-8 -*-
# @Time : 2017/7/13 下午6:21
# @Author : play4fun
# @File : 42.2.1-设置-findChessboardCorners.py
# @Software: PyCharm
"""
42.2.1-设置-findChessboardCorners.py:
径向畸变和切想畸变
摄像机的内部和外部参数。 内部参数是摄像机特异的。它包括的信息有焦 ( fx, fy) 光学中心 (cx, cy) 等。 也 称为摄像机矩阵。它完全取决于摄像机自 只需要计算一次 以后就可以已知使用了。
至少需要10张图案模式来进行摄像机标定
3D 点 称为对象点, 2D 图像点 称为图像点
除了使用棋盘之外 我们 可以使用环形格子
使用函数 cv2.findCirclesGrid() 来找图案。
据说使用环形格子只需要很少的图像 就可以了。
"""
import numpy as np
import cv2
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6 * 7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('../data/left*.jpg')
images += glob.glob('../data/right*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (7, 6), corners2, ret)
cv2.imshow('img', img)
cv2.waitKey(500)
cv2.destroyAllWindows()
摄像机校准
参考:https://docs.opencv.org/3.2.0/d4/d94/tutorial_camera_calibration.html
参考:https://stackoverflow.com/questions/8368255/camera-calibration-with-opencv-assertion-failed-fault
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "C:/opencv/include/opencv/cv.h"
#include
#include
using namespace cv;
using namespace std;
std::vector imageCorners;
std::vector objectCorners;
std::vector<std::vector > objectPoints;
std::vector<std::vector > imagePoints;
void addPoints(const std::vector &imageCorners, const std::vector & objectCorners)
{
// 2D image points from one view
imagePoints.push_back(imageCorners);
// corresponding 3D scene points
objectPoints.push_back(objectCorners);
}
int main()
{
int key;
cv::Mat image;
cv::Mat gray_image;
VideoCapture cap("here goes path of the file");
if (!cap.isOpened()) // check if we succeeded
cout<<"failed";
else
cout<<"success";
cvNamedWindow( "video",0);
cv::Size boardSize(8,6);
// output Matrices
cv::Mat cameraMatrix;
std::vector rvecs, tvecs;
cv::Mat distCoeffs;
for (int i=0; ifor (int j=0; j0.0f));
}
}
int frame=1;
int corner_count=0;
while(1)
{
if(cap.read(image))
{
frame++;
if(frame%20==0)
{
if(waitKey(30) >= 0) break;
bool found = cv::findChessboardCorners(image, boardSize, imageCorners);
cvtColor( image, gray_image, CV_RGB2GRAY );
addPoints(imageCorners, objectCorners);
//bool found = cv::findChessboardCorners(image,boardSize, imageCorners);
cv::drawChessboardCorners(gray_image,boardSize, imageCorners,found);
imshow( "video", gray_image );
}
}
else
break;
}
int flag=0;
std::string text="";
for (int i=1; istd::stringstream out;
out << imagePoints[i];
text=out.str();
cout<return 0;
}
参考:https://stackoverflow.com/questions/31249037/calibrating-webcam-using-python-and-opencv-error?rq=1
# -*- coding: UTF-8 -*-
import numpy as np
import cv2
import glob
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world
imgpoints = [] # 2d points in image plane.
# checkerboard Dimensions
cbrow = 5
cbcol = 7
objp = np.zeros((cbrow*cbcol,3), np.float32)
objp[:,:2] = np.mgrid[0:cbcol,0:cbrow].T.reshape(-1,2)
objp = objp * 22
"""
inputVideo=cv2.VideoCapture("./vtest.avi")
if not inputVideo.isOpened():
print("Could not open the input video: ");exit(-1)
while(1):
ret, img = inputVideo.read()
if not ret: break
"""
images = glob.glob('./left/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret = False
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (cbcol,cbrow))
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
imgpoints.append(corners)
# Draw and display the corners
cv2.drawChessboardCorners(img, (cbcol,cbrow), corners, ret)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.waitKey(0)
for i in range (1,5):
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
ret, cameraMatrix, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
print ("camera matrix:\n", cameraMatrix)
# pi camera intrinsic parameters
ay = cameraMatrix[1, 1]
u0 = cameraMatrix[0, 2]
v0 = cameraMatrix[1, 2]
print ("Ay:", ay)
print ("u0:", u0)
print ("v0:", v0)
Real Time pose estimation of a textured object
参考:https://docs.opencv.org/3.2.0/dc/d2c/tutorial_real_time_pose.html
参考:http://blog.csdn.net/wc781708249/article/details/78528920#calib3dpy
参考:
https://docs.opencv.org/3.2.0/d7/d21/tutorial_interactive_calibration.html
参考:https://docs.opencv.org/3.2.0/d4/d7d/tutorial_harris_detector.html
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include
using namespace cv;
using namespace std;
Mat src, src_gray;
int thresh = 200;
int max_thresh = 255;
const char* source_window = "Source image";
const char* corners_window = "Corners detected";
void cornerHarris_demo( int, void* );
int main( int, char** argv )
{
src = imread( argv[1], IMREAD_COLOR );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
namedWindow( source_window, WINDOW_AUTOSIZE );
createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo );
imshow( source_window, src );
cornerHarris_demo( 0, 0 );
waitKey(0);
return(0);
}
void cornerHarris_demo( int, void* )
{
Mat dst, dst_norm, dst_norm_scaled;
dst = Mat::zeros( src.size(), CV_32FC1 );
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
cornerHarris( src_gray, dst, blockSize, apertureSize, k, BORDER_DEFAULT );
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
for( int j = 0; j < dst_norm.rows ; j++ )
{ for( int i = 0; i < dst_norm.cols; i++ )
{
if( (int) dst_norm.at<float>(j,i) > thresh )
{
circle( dst_norm_scaled, Point( i, j ), 5, Scalar(0), 2, 8, 0 );
}
}
}
namedWindow( corners_window, WINDOW_AUTOSIZE );
imshow( corners_window, dst_norm_scaled );
}
参考:
http://blog.csdn.net/wc781708249/article/details/78524311#ch30-harris%E8%A7%92%E7%82%B9%E6%A3%80%E6%B5%8B
参考:
https://docs.opencv.org/3.2.0/d8/dd8/tutorial_good_features_to_track.html
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include
using namespace cv;
using namespace std;
Mat src, src_gray;
int maxCorners = 23;
int maxTrackbar = 100;
RNG rng(12345);
const char* source_window = "Image";
void goodFeaturesToTrack_Demo( int, void* );
int main( int, char** argv )
{
src = imread( argv[1], IMREAD_COLOR );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
namedWindow( source_window, WINDOW_AUTOSIZE );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo );
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey(0);
return(0);
}
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 ) { maxCorners = 1; }
vector corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
Mat copy;
copy = src.clone();
goodFeaturesToTrack( src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
useHarrisDetector,
k );
cout<<"** Number of corners detected: "<int r = 4;
for( size_t i = 0; i < corners.size(); i++ )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); }
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy );
}
参考:
http://blog.csdn.net/wc781708249/article/details/78528617#goodfeaturestotrackpy
参考:
https://docs.opencv.org/3.2.0/d9/dbc/tutorial_generic_corner_detector.html
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include
using namespace cv;
using namespace std;
Mat src, src_gray;
Mat myHarris_dst; Mat myHarris_copy; Mat Mc;
Mat myShiTomasi_dst; Mat myShiTomasi_copy;
int myShiTomasi_qualityLevel = 50;
int myHarris_qualityLevel = 50;
int max_qualityLevel = 100;
double myHarris_minVal; double myHarris_maxVal;
double myShiTomasi_minVal; double myShiTomasi_maxVal;
RNG rng(12345);
const char* myHarris_window = "My Harris corner detector";
const char* myShiTomasi_window = "My Shi Tomasi corner detector";
void myShiTomasi_function( int, void* );
void myHarris_function( int, void* );
int main( int, char** argv )
{
src = imread( argv[1], IMREAD_COLOR );
cvtColor( src, src_gray, COLOR_BGR2GRAY );
int blockSize = 3; int apertureSize = 3;
myHarris_dst = Mat::zeros( src_gray.size(), CV_32FC(6) );
Mc = Mat::zeros( src_gray.size(), CV_32FC1 );
cornerEigenValsAndVecs( src_gray, myHarris_dst, blockSize, apertureSize, BORDER_DEFAULT );
/* calculate Mc */
for( int j = 0; j < src_gray.rows; j++ )
{ for( int i = 0; i < src_gray.cols; i++ )
{
float lambda_1 = myHarris_dst.at(j, i)[0];
float lambda_2 = myHarris_dst.at(j, i)[1];
Mc.at<float>(j,i) = lambda_1*lambda_2 - 0.04f*pow( ( lambda_1 + lambda_2 ), 2 );
}
}
minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */
namedWindow( myHarris_window, WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function );
myHarris_function( 0, 0 );
myShiTomasi_dst = Mat::zeros( src_gray.size(), CV_32FC1 );
cornerMinEigenVal( src_gray, myShiTomasi_dst, blockSize, apertureSize, BORDER_DEFAULT );
minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() );
/* Create Window and Trackbar */
namedWindow( myShiTomasi_window, WINDOW_AUTOSIZE );
createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function );
myShiTomasi_function( 0, 0 );
waitKey(0);
return(0);
}
void myShiTomasi_function( int, void* )
{
myShiTomasi_copy = src.clone();
if( myShiTomasi_qualityLevel < 1 ) { myShiTomasi_qualityLevel = 1; }
for( int j = 0; j < src_gray.rows; j++ )
{ for( int i = 0; i < src_gray.cols; i++ )
{
if( myShiTomasi_dst.at<float>(j,i) > myShiTomasi_minVal + ( myShiTomasi_maxVal - myShiTomasi_minVal )*myShiTomasi_qualityLevel/max_qualityLevel )
{ circle( myShiTomasi_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255) ), -1, 8, 0 ); }
}
}
imshow( myShiTomasi_window, myShiTomasi_copy );
}
void myHarris_function( int, void* )
{
myHarris_copy = src.clone();
if( myHarris_qualityLevel < 1 ) { myHarris_qualityLevel = 1; }
for( int j = 0; j < src_gray.rows; j++ )
{ for( int i = 0; i < src_gray.cols; i++ )
{
if( Mc.at<float>(j,i) > myHarris_minVal + ( myHarris_maxVal - myHarris_minVal )*myHarris_qualityLevel/max_qualityLevel )
{ circle( myHarris_copy, Point(i,j), 4, Scalar( rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255) ), -1, 8, 0 ); }
}
}
imshow( myHarris_window, myHarris_copy );
}
参考:https://docs.opencv.org/3.2.0/d8/d5e/tutorial_corner_subpixeles.html
参考:https://docs.opencv.org/3.2.0/d7/d66/tutorial_feature_detection.html
#include
#include
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
/* @function main */
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE );
Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
Ptr detector = SURF::create( minHessian );
std::vector keypoints_1, keypoints_2;
detector->detect( img_1, keypoints_1 );
detector->detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//-- Show detected (drawn) keypoints
imshow("Keypoints 1", img_keypoints_1 );
imshow("Keypoints 2", img_keypoints_2 );
waitKey(0);
return 0;
}
/* @function readme */
void readme()
{ std::cout << " Usage: ./SURF_detector " << std::endl; }