% 以图像集的方法加载图片
buildingDir = fullfile(toolboxdir('vision'), 'visiondata', 'building');
buildingScene = imageSet(buildingDir);
% 显示要拼接的所有图片
montage(buildingScene.ImageLocation)
% 从图片集中读取第一幅图像
I = read(buildingScene, 1);
% 将图像转为灰度图,再提取I(1)的特征点,用的是surf算法。
grayImage = rgb2gray(I);
points = detectSURFFeatures(grayImage);
[features, points] = extractFeatures(grayImage, points);
% 初始化所有变换的恒等矩阵。
tforms(buildingScene.Count) = projective2d(eye(3));
% Iterate over remaining image pairs
for n = 2:buildingScene.Count
% Store points and features for I(n-1).
% 存储前一图像的特征点坐标和值。
pointsPrevious = points;
featuresPrevious = features;
% Read I(n).
% 读取第n张图片。
I = read(buildingScene, n);
% Detect and extract SURF features for I(n).
%检测和提取surf特征值。
grayImage = rgb2gray(I);
points = detectSURFFeatures(grayImage);
[features, points] = extractFeatures(grayImage, points);
% 匹配I(n)和I(n-1)之间对应的特征点
indexPairs = matchFeatures(features, featuresPrevious, 'Unique', true);
matchedPoints = points(indexPairs(:,1), :);
matchedPointsPrev = pointsPrevious(indexPairs(:,2), :);
% 用MSAC算法计算几何变化。
tforms(n) = estimateGeometricTransform(matchedPoints, matchedPointsPrev,...
'projective', 'Confidence', 99.9, 'MaxNumTrials', 2000);
% 计算T(1) * … * T(n-1) * T(n)
tforms(n).T = tforms(n-1).T * tforms(n).T;
end
imageSize = size(I); % 所有的图像尺寸都是一样的
% 对每个投影变化找到输出的空间坐标限制值。
for i = 1:numel(tforms)
[xlim(i,:), ylim(i,:)] = outputLimits(tforms(i), [1 imageSize(2)], [1 imageSize(1)]);
end
avgXLim = mean(xlim, 2);
[~, idx] = sort(avgXLim);
centerIdx = floor((numel(tforms)+1)/2);
centerImageIdx = idx(centerIdx);
Tinv = invert(tforms(centerImageIdx));
for i = 1:numel(tforms)
tforms(i).T = Tinv.T * tforms(i).T;
end
for i = 1:numel(tforms)
[xlim(i,:), ylim(i,:)] = outputLimits(tforms(i), [1 imageSize(2)], [1 imageSize(1)]);
end
% 找到输出空间限制的最大最小值
xMin = min([1; xlim(:)]);
xMax = max([imageSize(2); xlim(:)]);
yMin = min([1; ylim(:)]);
yMax = max([imageSize(1); ylim(:)]);
% 全景图的宽高
width = round(xMax - xMin);
height = round(yMax - yMin);
% 生成空数据的全景图
panorama = zeros([height width 3], 'like', I);
blender = vision.AlphaBlender('Operation', 'Binary mask', ...
'MaskSource', 'Input port');
% Create a 2-D spatial reference object defining the size of the panorama.
xLimits = [xMin xMax];
yLimits = [yMin yMax];
panoramaView = imref2d([height width], xLimits, yLimits);
% Create the panorama.
for i = 1:buildingScene.Count
I = read(buildingScene, i);
% Transform I into the panorama.
warpedImage = imwarp(I, tforms(i), 'OutputView', panoramaView);
% Overlay the warpedImage onto the panorama.
panorama = step(blender, panorama, warpedImage, warpedImage(:,:,1));
end
figure
imshow(panorama)
fprintf('================完成================');
function image_stitching_other
buildingDir = fullfile(toolboxdir('vision'), 'visiondata', 'building');
buildingScene = imageSet(buildingDir);
% 显示要拼接的所有图片
montage(buildingScene.ImageLocation);
input_A = read(buildingScene, 1);
input_B = read(buildingScene, 2);
figure;imshow(input_A);
figure;imshow(input_B);
image_stitching_matlab(input_A,input_B);
fprintf('================完成================');
function [] = image_stitching_matlab(input_A, input_B)
% -------------------------------------------------------------------------
% 1. Load both images, convert to double and to grayscale.
% 2. Detect feature points in both images.
% 3. Extract fixed-size patches around every keypoint in both images, and
% form descriptors simply by "flattening" the pixel values in each patch to
% one-dimensional vectors.
% 4. Compute distances between every descriptor in one image and every descriptor in the other image.
% 5. Select putative matches based on the matrix of pairwise descriptor
% distances obtained above.
% 6. Run RANSAC to estimate (1) an affine transformation and (2) a
% homography mapping one image onto the other.
% 7. Warp one image onto the other using the estimated transformation.
% 8. Create a new image big enough to hold the panorama and composite the
% two images into it.
%
% Input:
% input_A - filename of warped image
% input_B - filename of unwarped image
% Output:
% output_image - combined new image
%
% Reference:
% [1] C.G. Harris and M.J. Stephens, A combined corner and edge detector, 1988.
% [2] Matthew Brown, Multi-Image Matching using Multi-Scale Oriented Patches.
%
% [email protected]
% -------------------------------------------------------------------------
% READ IMAGE, GET SIZE INFORMATION
image_A = input_A;
image_B = input_B;
[height_wrap, width_wrap,~] = size(image_A);
[height_unwrap, width_unwrap,~] = size(image_B);
% CONVERT TO GRAY SCALE
gray_A = im2double(rgb2gray(image_A));
gray_B = im2double(rgb2gray(image_B));
% FIND HARRIS CORNERS IN BOTH IMAGE
[x_A, y_A, v_A] = harris(gray_A, 2, 0.0, 2);
[x_B, y_B, v_B] = harris(gray_B, 2, 0.0, 2);
% ADAPTIVE NON-MAXIMAL SUPPRESSION (ANMS)
ncorners = 500;
[x_A, y_A, ~] = ada_nonmax_suppression(x_A, y_A, v_A, ncorners);
[x_B, y_B, ~] = ada_nonmax_suppression(x_B, y_B, v_B, ncorners);
% EXTRACT FEATURE DESCRIPTORS
sigma = 7;
[des_A] = getFeatureDescriptor(gray_A, x_A, y_A, sigma);
[des_B] = getFeatureDescriptor(gray_B, x_B, y_B, sigma);
% IMPLEMENT FEATURE MATCHING
dist = dist2(des_A,des_B);
[ord_dist, index] = sort(dist, 2);
% THE RATIO OF FIRST AND SECOND DISTANCE IS A BETTER CRETIA THAN DIRECTLY
% USING THE DISTANCE. RATIO LESS THAN .5 GIVES AN ACCEPTABLE ERROR RATE.
ratio = ord_dist(:,1)./ord_dist(:,2);
threshold = 0.5;
idx = ratiothd));
% RETURN X AND Y COORDINATES
[xp,yp,value] = find(R);
function [newx, newy, newvalue] = ada_nonmax_suppression(xp, yp, value, n)
% Adaptive non-maximun suppression
% For each Harris Corner point, the minimum suppression radius is the
% minimum distance from that point to a different point with a higher
% corner strength.
% Input:
% xp,yp - coordinates of harris corner points
% value - strength of suppression
% n - number of interesting points
% Output:
% newx, newy - new x and y coordinates after adaptive non-maximun suppression
% value - strength of suppression after adaptive non-maximun suppression
% ALLOCATE MEMORY
% newx = zeros(n,1);
% newy = zeros(n,1);
% newvalue = zeros(n,1);
if(length(xp) < n)
newx = xp;
newy = yp;
newvalue = value;
return;
end
radius = zeros(n,1);
c = .9;
maxvalue = max(value)*c;
for i=1:length(xp)
if(value(i)>maxvalue)
radius(i) = 99999999;
continue;
else
dist = (xp-xp(i)).^2 + (yp-yp(i)).^2;
dist((value*c) < value(i)) = [];
radius(i) = sqrt(min(dist));
end
end
[~, index] = sort(radius,'descend');
index = index(1:n);
newx = xp(index);
newy = yp(index);
newvalue = value(index);
function n2 = dist2(x, c)
% DIST2 Calculates squared distance between two sets of points.
% Adapted from Netlab neural network software:
% http://www.ncrg.aston.ac.uk/netlab/index.php
%
% Description
% D = DIST2(X, C) takes two matrices of vectors and calculates the
% squared Euclidean distance between them. Both matrices must be of
% the same column dimension. If X has M rows and N columns, and C has
% L rows and N columns, then the result has M rows and L columns. The
% I, Jth entry is the squared distance from the Ith row of X to the
% Jth row of C.
%
%
% Copyright (c) Ian T Nabney (1996-2001)
[ndata, dimx] = size(x);
[ncentres, dimc] = size(c);
if dimx ~= dimc
error('Data dimension does not match dimension of centres')
end
n2 = (ones(ncentres, 1) * sum((x.^2)', 1))' + ...
ones(ndata, 1) * sum((c.^2)',1) - ...
2.*(x*(c'));
% Rounding errors occasionally cause negative entries in n2
if any(any(n2<0))
n2(n2<0) = 0;
end
function [descriptors] = getFeatureDescriptor(input_image, xp, yp, sigma)
% Extract non-rotation invariant feature descriptors
% Input:
% input_image - input gray-scale image
% xx - x coordinates of potential feature points
% yy - y coordinates of potential feature points
% output:
% descriptors - array of descriptors
% FIRST BLUR WITH GAUSSIAN KERNEL
g = fspecial('gaussian', 5, sigma);
blurred_image = imfilter(input_image, g, 'replicate','same');
% THEN TAKE A 40x40 PIXEL WINDOW AND DOWNSAMPLE TO 8x8 PATCH
npoints = length(xp);
descriptors = zeros(npoints,64);
for i = 1:npoints
%pA = imresize( blurred_image(xp(i)-20:xp(i)+19, yp(i)-20:yp(i)+19), .2);
patch = blurred_image(xp(i)-20:xp(i)+19, yp(i)-20:yp(i)+19);
patch = imresize(patch, .2);
descriptors(i,:) = reshape((patch - mean2(patch))./std2(patch), 1, 64);
end
function [hh] = getHomographyMatrix(point_ref, point_src, npoints)
% Use corresponding points in both images to recover the parameters of the transformation
% Input:
% x_ref, x_src --- x coordinates of point correspondences
% y_ref, y_src --- y coordinates of point correspondences
% Output:
% h --- matrix of transformation
% NUMBER OF POINT CORRESPONDENCES
x_ref = point_ref(1,:)';
y_ref = point_ref(2,:)';
x_src = point_src(1,:)';
y_src = point_src(2,:)';
% COEFFICIENTS ON THE RIGHT SIDE OF LINEAR EQUATIONS
A = zeros(npoints*2,8);
A(1:2:end,1:3) = [x_ref, y_ref, ones(npoints,1)];
A(2:2:end,4:6) = [x_ref, y_ref, ones(npoints,1)];
A(1:2:end,7:8) = [-x_ref.*x_src, -y_ref.*x_src];
A(2:2:end,7:8) = [-x_ref.*y_src, -y_ref.*y_src];
% COEFFICIENT ON THE LEFT SIDE OF LINEAR EQUATIONS
B = [x_src, y_src];
B = reshape(B',npoints*2,1);
% SOLVE LINEAR EQUATIONS
h = A\B;
hh = [h(1),h(2),h(3);h(4),h(5),h(6);h(7),h(8),1];
function [hh, inliers] = ransacfithomography(ref_P, dst_P, npoints, threshold);
% 4-point RANSAC fitting
% Input:
% matcher_A - match points from image A, a matrix of 3xN, the third row is 1
% matcher_B - match points from image B, a matrix of 3xN, the third row is 1
% thd - distance threshold
% npoints - number of samples
%
% 1. Randomly select minimal subset of points
% 2. Hypothesize a model
% 3. Computer error function
% 4. Select points consistent with model
% 5. Repeat hypothesize-and-verify loop
%
% Yihua Zhao 02-01-2014
% [email protected]
ninlier = 0;
fpoints = 8; %number of fitting points
for i=1:2000
rd = randi([1 npoints],1,fpoints);
pR = ref_P(:,rd);
pD = dst_P(:,rd);
h = getHomographyMatrix(pR,pD,fpoints);
rref_P = h*ref_P;
rref_P(1,:) = rref_P(1,:)./rref_P(3,:);
rref_P(2,:) = rref_P(2,:)./rref_P(3,:);
error = (rref_P(1,:) - dst_P(1,:)).^2 + (rref_P(2,:) - dst_P(2,:)).^2;
n = nnz(error= npoints*.95)
hh=h;
inliers = find(errorninlier)
ninlier = n;
hh=h;
inliers = find(error0 |warped_image(:,:,2)>0 | warped_image(:,:,3)>0);
newImage = zeros(size(warped_image));
newImage(y:y+size(unwarped_image,1)-1, x: x+size(unwarped_image,2)-1,:) = unwarped_image;
mask = (newImage(:,:,1)>0 | newImage(:,:,2)>0 | newImage(:,:,3)>0);
mask = and(maskA, mask);
% GET THE OVERLAID REGION
[~,col] = find(mask);
left = min(col);
right = max(col);
mask = ones(size(mask));
if( x<2)
mask(:,left:right) = repmat(linspace(0,1,right-left+1),size(mask,1),1);
else
mask(:,left:right) = repmat(linspace(1,0,right-left+1),size(mask,1),1);
end
% BLEND EACH CHANNEL
warped_image(:,:,1) = warped_image(:,:,1).*mask;
warped_image(:,:,2) = warped_image(:,:,2).*mask;
warped_image(:,:,3) = warped_image(:,:,3).*mask;
% REVERSE THE ALPHA VALUE
if( x<2)
mask(:,left:right) = repmat(linspace(1,0,right-left+1),size(mask,1),1);
else
mask(:,left:right) = repmat(linspace(0,1,right-left+1),size(mask,1),1);
end
newImage(:,:,1) = newImage(:,:,1).*mask;
newImage(:,:,2) = newImage(:,:,2).*mask;
newImage(:,:,3) = newImage(:,:,3).*mask;
newImage(:,:,1) = warped_image(:,:,1) + newImage(:,:,1);
newImage(:,:,2) = warped_image(:,:,2) + newImage(:,:,2);
newImage(:,:,3) = warped_image(:,:,3) + newImage(:,:,3);