之前连续写了一些列关于OpenCV在Unity平台的入门基础,这次增加难度,来一个特征提取和拼接的案例。由于OpenCVForUnity没有实现Stitcher的功能,转而使用OpenCVSharp。可以去github的官方项目里找最新的OpenCVSharp.dll文件,不过官方提供的是.Net4.6版本(没有尝试是否可以自行编译成.Net3.5)。那我们就先把Unity项目设置为4.6的框架。
OpenCVSharp 对 Mat 和 Texture2D 转换没有做专门的API,所以我们先把这个复杂的转换过程做成一个工具类。
using UnityEngine;
using OpenCvSharp;
public class Utils
{
///
/// Mat转Texture2D
///
///
///
public static Texture2D MatToTexture2D(Mat mat)
{
Texture2D t2d = new Texture2D(mat.Width, mat.Height);
t2d.LoadImage(mat.ToBytes());
t2d.Apply();
//赋值完后为什么要Apply
//因为在贴图更改像素时并不是直接对显存进行更改,而是在另外一个内存空间中更改,这时候GPU还会实时读取旧的贴图位置。
//当Apply后,CPU会告诉GPU你要换个地方读贴图了。
return t2d;
}
}
案例中我使用的是ORB特征提取,其他主流的算法还有SURF,SIFT,FAST、BRISK、FREAK等,当然数学层面上都已经通过OpenCV的函数实现了。写两个函数热热身,熟悉一下使用ORB进行特征点检测。
void Detect()
{
var gray = new Mat(Application.streamingAssetsPath + "/Textures/p1.jpg", ImreadModes.GrayScale);
KeyPoint[] keyPoints = null;
using (var orb = ORB.Create(500))
{
keyPoints = orb.Detect(gray);
Debug.Log($"KeyPoint has {keyPoints.Length} items.");
}
}
void DetectAndCompute()
{
var gray = new Mat(Application.streamingAssetsPath + "/Textures/p1.jpg", ImreadModes.GrayScale);
KeyPoint[] keyPoints = null;
using (var orb = ORB.Create(500))
using (Mat descriptor = new Mat())
{
orb.DetectAndCompute(gray, new Mat(), out keyPoints, descriptor);
Debug.Log($"keyPoints has {keyPoints.Length} items.");
Debug.Log($"descriptor has {descriptor.Rows} items.");
}
}
上述函数打印成功,即提取成功。导入2张测试图片,开始匹配工作。
///
/// Orb特征提取
///
void OnOrb()
{
Mat image01 = Cv2.ImRead(Application.streamingAssetsPath + "/Textures/p1.jpg");
Mat image02 = Cv2.ImRead(Application.streamingAssetsPath + "/Textures/p2.jpg");
//灰度图转换
Mat image1 = new Mat(), image2 = new Mat();
Cv2.CvtColor(image01, image1, ColorConversionCodes.RGB2GRAY);
Cv2.CvtColor(image02, image2, ColorConversionCodes.RGB2GRAY);
KeyPoint[] keyPoint1 = null;
KeyPoint[] keyPoint2 = null;
using (ORB orb = ORB.Create(500))
using (Mat descriptor1 = new Mat())
using (Mat descriptor2 = new Mat())
using (var matcher = new BFMatcher())
{
//特征点提取并计算
orb.DetectAndCompute(image1, new Mat(), out keyPoint1, descriptor1);
orb.DetectAndCompute(image2, new Mat(), out keyPoint2, descriptor2);
Debug.Log($"keyPoints has {keyPoint1.Length},{keyPoint2.Length} items.");
Debug.Log($"descriptor has {descriptor1.Rows},{descriptor2.Rows} items.");
//特征点匹配
DMatch[] matchePoints = null;
matchePoints = matcher.Match(descriptor1, descriptor2);
dstMat = new Mat();
Cv2.DrawMatches(image01, keyPoint1, image02, keyPoint2, matchePoints, dstMat);
t2d = Utils.MatToTexture2D(dstMat);
}
Sprite dst_sp = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);
m_srcImage.sprite = dst_sp;
m_srcImage.preserveAspect = true;
}
使用stitcher拼接其实比较容易,照stitcher.Stitch()函数的参数提示填值就可以了。花了好大功夫才实现,坑点主要在老版本的OpenCVSharp.dll内存奔溃(官方git上有说明,已经修复)。还有一开始一直在用手写算法去实现,这个暂时也写不出来了⁄(⁄ ⁄•⁄ω⁄•⁄ ⁄)⁄。有志在Unity中实现,可以参考这些文章。
http://www.pyimagesearch.com/2016/01/11/opencv-panorama-stitching/
https://github.com/tanaka0079/cpp/blob/master/opencv/image/panorama/orb.cpp
回到正题,开始拼接。
Mat p1Mat, p2Mat, dstMat;
bool tryUseGpu = true;
IEnumerable GenerateImages()
{
p1Mat = Cv2.ImRead(Application.streamingAssetsPath + "/Textures/p1.jpg");
p2Mat = Cv2.ImRead(Application.streamingAssetsPath + "/Textures/p2.jpg");
yield return p1Mat;
yield return p2Mat;
}
void OnStitch()
{
IEnumerable images = GenerateImages();
using (var stitcher = Stitcher.Create(tryUseGpu))
using (var panoMat = new Mat())
{
Debug.Log("Stitcher start...");
var status = stitcher.Stitch(images, panoMat);
if (status != Stitcher.Status.OK)
{
Debug.Log("Can't stitch images, error code = " + (int)status);
return;
}
stitcher.Dispose(); //处理掉
t2d = Utils.MatToTexture2D(panoMat);
}
Sprite sp = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);
m_dstImage.sprite = sp;
m_dstImage.preserveAspect = true;
}