任意n张图像拼接_效果很好_计算机视觉大作业1终版

#include 
#include 
#include 
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include
using namespace std;
using namespace cv;
using namespace cv::detail;
//定义参数
vector img_names;
bool try_gpu = false;
double work_megapix = 0.6;//图像匹配的分辨率大小,图像的面积尺寸变为work_megapix*100000

double seam_megapix = 0.1;//拼接缝像素的大小
double compose_megapix =0.6;//拼接分辨率
float conf_thresh = 1.f;//两幅图来自同一全景图的置信度
WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;//波形校验,水平
int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;//光照补偿方法,默认是gain_blocks
float match_conf = 0.3f;//特征点检测置信等级,最近邻匹配距离与次近邻匹配距离的比值,surf默认为0.65
int blend_type = Blender::MULTI_BAND;//融合方法,默认是多频段融合
float blend_strength = 5;//融合强度,0 - 100.默认是5.
string result_name = "result.jpg";//输出图像的文件名

int main()
{
	clock_t start,finish;
   double totaltime;
   start=clock();
	int argc = 10;
	char* argv[] = {"1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg", "7.jpg", "8.jpg", "9.jpg", 
		"10.jpg"
	};

	for (int i = 0; i < argc; ++i)
		img_names.push_back(argv[i]);
	int num_images = static_cast(img_names.size());
	double work_scale = 1, seam_scale = 1, compose_scale = 1;
	//特征点检测以及对图像进行预处理(尺寸缩放),然后计算每幅图形的特征点,以及特征点描述子
	cout<<"Finding features..."< finder;
	finder = new SurfFeaturesFinder();///采用Surf特征点检测

	Mat full_img1,full_img, img;
	vector features(num_images);
	vector images(num_images);
	vector full_img_sizes(num_images);
	double seam_work_aspect = 1;

	for (int i = 0; i < num_images; ++i)
	{
		full_img1 = imread(img_names[i]);
		resize(full_img1,full_img, Size(400,300));
		full_img_sizes[i] = full_img.size();

		//计算work_scale,将图像resize到面积在work_megapix*10^6以下
		work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));

		resize(full_img, img, Size(), work_scale, work_scale);
		//将图像resize到面积在work_megapix*10^6以下
		seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
		seam_work_aspect = seam_scale / work_scale;
		// 计算图像特征点,以及计算特征点描述子,并将img_idx设置为i
		(*finder)(img, features[i]);
		features[i].img_idx = i;
		cout<<"Features in image #" << i+1 << ": " << features[i].keypoints.size()<collectGarbage();
	full_img.release();
	img.release();

	//对图像进行两两匹配
	cout<<"Pairwise matching"< pairwise_matches;
	BestOf2NearestMatcher matcher(try_gpu, match_conf);//最近邻和次近邻法
	matcher(features, pairwise_matches); //对每两个图片进行匹配
	matcher.collectGarbage();
	//将置信度高于门限的所有匹配合并到一个集合中
	///只留下确定是来自同一全景图的图片
	
	vector indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
	vector img_subset;
	vector img_names_subset;
	vector full_img_sizes_subset;
	for (size_t i = 0; i < indices.size(); ++i)
	{
		img_names_subset.push_back(img_names[indices[i]]);
		img_subset.push_back(images[indices[i]]);
		full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
	}

	images = img_subset;
	img_names = img_names_subset;
	full_img_sizes = full_img_sizes_subset;

	// 检查图片数量是否依旧满足要求
	num_images = static_cast(img_names.size());
	if (num_images < 2)
	{
	cout<<"Need more images"< cameras;//相机参数
	estimator(features, pairwise_matches, cameras);

	for (size_t i = 0; i < cameras.size(); ++i)
	{
		Mat R;
		cameras[i].R.convertTo(R, CV_32F);
		cameras[i].R = R;
		cout<<"Initial intrinsics #" << indices[i]+1 << ":\n" << cameras[i].K()< adjuster;//光束调整器参数
	adjuster = new detail::BundleAdjusterRay();//使用Bundle Adjustment(光束法平差)方法对所有图片进行相机参数校正

	adjuster->setConfThresh(conf_thresh);//设置配置阈值
	Mat_ refine_mask = Mat::zeros(3, 3, CV_8U);
	refine_mask(0,0) = 1;
	refine_mask(0,1) = 1;
	refine_mask(0,2) = 1;
	refine_mask(1,1) = 1;
	refine_mask(1,2) = 1;
	adjuster->setRefinementMask(refine_mask);
	(*adjuster)(features, pairwise_matches, cameras);//进行矫正

	
	// 求出的焦距取中值和所有图片的焦距并构建camera参数,将矩阵写入camera
	vector focals;
	for (size_t i = 0; i < cameras.size(); ++i)
	{
		cout<<"Camera #" << indices[i]+1 << ":\n" << cameras[i].K()<(focals[focals.size() / 2]);
	else
		warped_image_scale = static_cast(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

	///波形矫正
		vector rmats;
		for (size_t i = 0; i < cameras.size(); ++i)
			rmats.push_back(cameras[i].R);
		waveCorrect(rmats, wave_correct);////波形矫正
		for (size_t i = 0; i < cameras.size(); ++i)
			cameras[i].R = rmats[i];


	cout<<"Warping images ... "< corners(num_images);//统一坐标后的顶点
	vector masks_warped(num_images);
	vector images_warped(num_images);
	vector sizes(num_images);
	vector masks(num_images);//融合掩码

	// 准备图像融合掩码
	for (int i = 0; i < num_images; ++i)
	{
		masks[i].create(images[i].size(), CV_8U);
		masks[i].setTo(Scalar::all(255));
	}

	//弯曲图像和融合掩码

	Ptr warper_creator;
	warper_creator = new cv::SphericalWarper();

	Ptr warper = warper_creator->create(static_cast(warped_image_scale * seam_work_aspect));

	for (int i = 0; i < num_images; ++i)
	{
		Mat_ K;
		cameras[i].K().convertTo(K, CV_32F);
		float swa = (float)seam_work_aspect;
		K(0,0) *= swa; K(0,2) *= swa;
		K(1,1) *= swa; K(1,2) *= swa;

		corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);//计算统一后坐标顶点
		sizes[i] = images_warped[i].size();

		warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);//弯曲当前图像
	}

	vector images_warped_f(num_images);
	for (int i = 0; i < num_images; ++i)
		images_warped[i].convertTo(images_warped_f[i], CV_32F);


	Ptr compensator = ExposureCompensator::createDefault(expos_comp_type);//建立补偿器以进行关照补偿,补偿方法是gain_blocks
	compensator->feed(corners, images_warped, masks_warped);

	//查找接缝
	Ptr seam_finder;
	seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
    seam_finder->find(images_warped_f, corners, masks_warped);

	// 释放未使用的内存
	images.clear();
	images_warped.clear();
	images_warped_f.clear();
	masks.clear();

	//////图像融合
	cout<<"Compositing..."< blender;
	
	double compose_work_aspect = 1;

	for (int img_idx = 0; img_idx < num_images; ++img_idx)
	{
		cout<<"Compositing image #" << indices[img_idx]+1<(compose_work_aspect);
		warper = warper_creator->create(warped_image_scale);

		// 更新corners和sizes
		for (int i = 0; i < num_images; ++i)
		{
			// 更新相机以下特性
			cameras[i].focal *= compose_work_aspect;
			cameras[i].ppx *= compose_work_aspect;
			cameras[i].ppy *= compose_work_aspect;

			// 更新corners和sizes
			Size sz = full_img_sizes[i];
			if (std::abs(compose_scale - 1) > 1e-1)
			{
				sz.width = cvRound(full_img_sizes[i].width * compose_scale);
				sz.height = cvRound(full_img_sizes[i].height * compose_scale);
			}

			Mat K;
			cameras[i].K().convertTo(K, CV_32F);
			Rect roi = warper->warpRoi(sz, K, cameras[i].R);
			corners[i] = roi.tl();
			sizes[i] = roi.size();
		}
	
		if (abs(compose_scale - 1) > 1e-1)
			resize(full_img, img, Size(), compose_scale, compose_scale);
		else
			img = full_img;
		full_img.release();
		Size img_size = img.size();
	
		Mat K;
		cameras[img_idx].K().convertTo(K, CV_32F);
		// 扭曲当前图像
		warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);
		// 扭曲当前图像掩模
		mask.create(img_size, CV_8U);
		mask.setTo(Scalar::all(255));
		warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

		// 曝光补偿
		compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);

		img_warped.convertTo(img_warped_s, CV_16S);
		img_warped.release();
		img.release();
		mask.release();

		dilate(masks_warped[img_idx], dilated_mask, Mat());
		resize(dilated_mask, seam_mask, mask_warped.size());
		mask_warped = seam_mask & mask_warped;
		//初始化blender
		if (blender.empty())
		{
			blender = Blender::createDefault(blend_type, try_gpu);
			Size dst_sz = resultRoi(corners, sizes).size();
			float blend_width = sqrt(static_cast(dst_sz.area())) * blend_strength / 100.f;
			if (blend_width < 1.f)
				blender = Blender::createDefault(Blender::NO, try_gpu);
			else 
			{
				MultiBandBlender* mb = dynamic_cast(static_cast(blender));
				mb->setNumBands(static_cast(ceil(log(blend_width)/log(2.)) - 1.));
				cout<<"Multi-band blender, number of bands: " << mb->numBands()<prepare(corners, sizes);
		}

		// // 融合当前图像
		blender->feed(img_warped_s, mask_warped, corners[img_idx]);
	}

	Mat result, result_mask;
	blender->blend(result, result_mask);

	imwrite(result_name, result);

	  finish=clock();
   totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
   cout<<"\n此程序的运行时间为"<

你可能感兴趣的:(编程_C++,opencv)