Opencv3 Sift和Surf特征实现图像无缝拼接生成全景图像

/*
#include
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/imgproc.hpp"
#include"opencv2/xfeatures2d.hpp"


using namespace cv;
using namespace std;
using namespace cv::xfeatures2d;
using namespace cv::ml;

int main()
{
    Mat a = imread("21.jpg");    //读取灰度图像
    Mat b = imread("22.jpg");

    Ptr surf;      //创建方式和2中的不一样
    surf = SURF::create(800);

    BFMatcher matcher;
    Mat c, d;
    vectorkey1, key2;
    vector matches;

    surf->detectAndCompute(a, Mat(), key1, c);
    surf->detectAndCompute(b, Mat(), key2, d);

    matcher.match(c, d, matches);       //匹配

    sort(matches.begin(), matches.end());  //筛选匹配点
    vector< DMatch > good_matches;
    int ptsPairs = std::min(50, (int)(matches.size() * 0.15));
    cout << ptsPairs << endl;
    for (int i = 0; i < ptsPairs; i++)
    {
        good_matches.push_back(matches[i]);
    }

    Mat outimg;
    drawMatches(a, key1, b, key2, good_matches, outimg, Scalar::all(-1), Scalar::all(-1),vector(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);  //绘制匹配点
    imshow("out", outimg);
    waitKey(0);

    std::vector obj;
    std::vector scene;

    for (size_t i = 0; i < good_matches.size(); i++)
    {
        obj.push_back(key1[good_matches[i].queryIdx].pt);
        scene.push_back(key2[good_matches[i].trainIdx].pt);
    }

    std::vector obj_corners(4);
    obj_corners[0] = Point(0, 0);
    obj_corners[1] = Point(a.cols, 0);
    obj_corners[2] = Point(a.cols, a.rows);
    obj_corners[3] = Point(0, a.rows);
    std::vector scene_corners(4);

    Mat H = findHomography(obj, scene, RANSAC);      //寻找匹配的图像
    perspectiveTransform(obj_corners, scene_corners, H);

    line(outimg,scene_corners[0] + Point2f((float)a.cols, 0), scene_corners[1] + Point2f((float)a.cols, 0),Scalar(0, 255, 0), 2, LINE_AA);       //绘制
    line(outimg,scene_corners[1] + Point2f((float)a.cols, 0), scene_corners[2] + Point2f((float)a.cols, 0),Scalar(0, 255, 0), 2, LINE_AA);
    line(outimg,scene_corners[2] + Point2f((float)a.cols, 0), scene_corners[3] + Point2f((float)a.cols, 0),Scalar(0, 255, 0), 2, LINE_AA);
    line(outimg,scene_corners[3] + Point2f((float)a.cols, 0), scene_corners[0] + Point2f((float)a.cols, 0),Scalar(0, 255, 0), 2, LINE_AA);
    imshow("aaaa",outimg);
    waitKey(0);
}
*/

#include
#include
#include
#include
#include

#include
using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;
typedef struct
{
    Point2f left_top;
    Point2f left_bottom;
    Point2f right_top;
    Point2f right_bottom;
}four_corners_t;

four_corners_t corners;

//优化两图的连接处,使得拼接自然
void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst)
{
    int start = MIN(corners.left_top.x, corners.left_bottom.x);//开始位置,即重叠区域的左边界

    double processWidth = img1.cols - start;//重叠区域的宽度
    int rows = dst.rows;
    int cols = img1.cols; //注意,是列数*通道数
    double alpha = 1;//img1中像素的权重
    for (int i = 0; i < rows; i++)
    {
        uchar* p = img1.ptr(i);  //获取第i行的首地址
        uchar* t = trans.ptr(i);
        uchar* d = dst.ptr(i);
        for (int j = start; j < cols; j++)
        {
            //如果遇到图像trans中无像素的黑点,则完全拷贝img1中的数据
            if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
            {
                alpha = 1;
            }
            else
            {
                //img1中像素的权重,与当前处理点距重叠区域左边界的距离成正比,实验证明,这种方法确实好
                alpha = (processWidth - (j - start)) / processWidth;
            }

            d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
            d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
            d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);

        }
    }

}



int main()
{
    Mat srcImage1 = imread("21.jpg", 1);
    Mat srcImage2 = imread("22.jpg",1);
    if (!srcImage1.data || !srcImage2.data)
    {
        cout << "读取图片出错" << endl;
        return false;
    }

    imshow("原始图1",srcImage1);
    imshow("原始图2", srcImage2);

    int minHessian = 100;
    Ptr detector = SurfFeatureDetector::create(minHessian);

    vector key_points_1, key_points_2;

    Mat dstImage1, dstImage2;
    detector->detectAndCompute(srcImage1,Mat(), key_points_1,dstImage1);
    detector->detectAndCompute(srcImage2,Mat(), key_points_2,dstImage2);//可以分成detect和compute

    Mat img_keypoints_1, img_keypoints_2;
   // drawKeypoints(srcImage1,key_points_1,img_keypoints_1,Scalar::all(-1),DrawMatchesFlags::DEFAULT);
   // drawKeypoints(srcImage2, key_points_2, img_keypoints_2, Scalar::all(-1),DrawMatchesFlags::DEFAULT);

    Ptr matcher = DescriptorMatcher::create("FlannBased");
    vectormach;

    matcher->match(dstImage1,dstImage2,mach);
    double Max_dist = 0;
    double Min_dist = 100;
    for (int i = 0; i < dstImage1.rows; i++)
    {
        double dist = mach[i].distance;
        if (dist < Min_dist)Min_dist = dist;
        if (dist > Max_dist)Max_dist = dist;
    }
    cout << "最短距离" << Min_dist << endl;
    cout << "最长距离" << Max_dist << endl;

    vectorgoodmaches;
    for (int i = 0; i < dstImage1.rows; i++)
    {
        if (mach[i].distance < 2 * Min_dist)
            goodmaches.push_back(mach[i]);
    }
    Mat img_maches;
    drawMatches(srcImage1,key_points_1,srcImage2,key_points_2,goodmaches,img_maches);

    for (int i = 0; i < goodmaches.size(); i++)
    {
        cout << "符合条件的匹配:" << goodmaches[i].queryIdx << "--" << goodmaches[i].trainIdx << endl;
    }
   // imshow("效果图1", img_keypoints_1);
    //imshow("效果图2", img_keypoints_2);
    imshow("匹配效果",img_maches);
    waitKey(0);
    Mat mat1;
    OptimizeSeam(srcImage1 , img_maches,mat1);
    imshow("匹配效果",mat1);
    waitKey(0);
    return 0;
}

Opencv3 Sift和Surf特征实现图像无缝拼接生成全景图像_第1张图片

Opencv3 Sift和Surf特征实现图像无缝拼接生成全景图像_第2张图片

你可能感兴趣的:(图像)