【opencvsharp】opencvsharp_samples.core示例代码笔记

【opencvsharp】opencvsharp_samples.core示例代码笔记_第1张图片

源码网址:https://github.com/shimat/opencvsharp_samples

  • SamplesCore C# (.NET Core / .NET Framework) sample 笔记

#1、人脸检测 级联分类器
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Human face detection
    /// http://docs.opencv.org/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.html
    /// 
    class FaceDetection : ConsoleTestBase
    {
        public override void RunTest()
        {
            // 加载级联分类器
            using var haarCascade = new CascadeClassifier(TextPath.HaarCascade);
            using var lbpCascade = new CascadeClassifier(TextPath.LbpCascade);


            // 检测人脸
            Mat haarResult = DetectFace(haarCascade);
            Mat lbpResult = DetectFace(lbpCascade);


            Cv2.ImShow("Faces by Haar", haarResult);
            Cv2.ImShow("Faces by LBP", lbpResult);
            Cv2.WaitKey(0);
            Cv2.DestroyAllWindows();
        }


    //检测人脸
        private Mat DetectFace(CascadeClassifier cascade)
        {
            Mat result;


            using (var src = new Mat(ImagePath.Yalta, ImreadModes.Color))
            using (var gray = new Mat())
            {
                result = src.Clone();
                Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY);


                // 多尺度检测
                Rect[] faces = cascade.DetectMultiScale(
                    gray, 1.08, 2, HaarDetectionTypes.ScaleImage, new Size(30, 30));


                // 绘制所有检测到的人脸
                foreach (Rect face in faces)
                {
                    var center = new Point
                    {
                        X = (int)(face.X + face.Width * 0.5),
                        Y = (int)(face.Y + face.Height * 0.5)
                    };
                    var axes = new Size
                    {
                        Width = (int)(face.Width * 0.5),
                        Height = (int)(face.Height * 0.5)
                    };
                    Cv2.Ellipse(result, center, axes, 0, 0, 360, new Scalar(255, 0, 255), 4);
                }
            }
            return result;
        }
    }
}




#2、人脸检测DNN    caffemodel
using System;
using OpenCvSharp;
using OpenCvSharp.Dnn;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// To run this example first download the face model available here: https://github.com/spmallick/learnopencv/tree/master/FaceDetectionComparison/models
    /// Add the files to the bin folder.
    /// You should also prepare the input images (faces.jpg) yourself.
    /// 
    internal class FaceDetectionDNN : ConsoleTestBase
    {
        const string configFile = "deploy.prototxt"; //配置文件
        const string faceModel = "res10_300x300_ssd_iter_140000_fp16.caffemodel";  //网络模型
        const string image = "faces.jpg";//待检测图片


        public override void RunTest()
        {
            //读取示例图片image
            using var frame = Cv2.ImRead(image);
            int frameHeight = frame.Rows;
            int frameWidth = frame.Cols;
            using var faceNet = CvDnn.ReadNetFromCaffe(configFile, faceModel);//读取网络模型
      //Mat BlobFromImage(Mat image, double scaleFactor = 1, Size size = default, Scalar mean = default, bool swapRB = true, bool crop = true);
      //image:输入图像(1、3或者4通道)
      //scalefactor:图像各通道数值的缩放比例
      //size:输出图像的空间尺寸,如size=(200,300)表示高h=300,宽w=200
      //mean:用于各通道减去的值,以降低光照的影响(e.g. image为bgr3通道的图像,mean=[104.0, 177.0, 123.0],表示b通道的值-104,g-177,r-123)
      //swapRB:交换RB通道,默认为False.(cv2.imread读取的是彩图是bgr通道)
      //crop:图像裁剪,默认为False.当值为True时,先按比例缩放,然后从中心裁剪成size尺寸
      //ddepth:输出的图像深度,可选CV_32F 或者 CV_8U.
            using var blob = CvDnn.BlobFromImage(frame, 1.0, new Size(300, 300), new Scalar(104, 117, 123), false, false);
            faceNet.SetInput(blob, "data");//设置网络输入


            using var detection = faceNet.Forward("detection_out");//返回Mat   blob 用于指定层的第一个输出。需要输出的层的名称detection_out
            using var detectionMat = new Mat(detection.Size(2), detection.Size(3), MatType.CV_32F,
                detection.Ptr(0));//检测矩阵
            for (int i = 0; i < detectionMat.Rows; i++)//遍历检测矩阵Mat的每行
            {
                float confidence = detectionMat.At(i, 2);//每行第三列为置信率


                if (confidence > 0.7)
                {   //检测矩阵 第4-7列为 xmin  ymin  xmax ymax
                    int x1 = (int) (detectionMat.At(i, 3) * frameWidth);
                    int y1 = (int) (detectionMat.At(i, 4) * frameHeight);
                    int x2 = (int) (detectionMat.At(i, 5) * frameWidth);
                    int y2 = (int) (detectionMat.At(i, 6) * frameHeight);
          //绘制矩形 绿色
                    Cv2.Rectangle(frame, new Point(x1, y1), new Point(x2, y2), new Scalar(0, 255, 0), 2, LineTypes.Link4);
                }
            }
      //显示图像
            Window.ShowImages(frame);
        }
    }
}




#3. cv::FAST 使用 FAST 算法检测角点
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// cv::FAST  使用 FAST 算法检测角点
    /// 
    class FASTSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using Mat imgSrc = new Mat(ImagePath.Lenna, ImreadModes.Color);
            using Mat imgGray = new Mat();
            using Mat imgDst = imgSrc.Clone();
            Cv2.CvtColor(imgSrc, imgGray, ColorConversionCodes.BGR2GRAY, 0);
      //imgGray 检测到关键点(角)的灰度图像
      //50  中心像素的强度与围绕该像素的圆的像素之间的差异阈值。
      //true   如果为真,则对检测到的角点(关键点)应用非最大抑制。
            KeyPoint[] keypoints = Cv2.FAST(imgGray, 50, true); 


            foreach (KeyPoint kp in keypoints)//遍历在图像上检测到的关键点。
            {
                imgDst.Circle((Point)kp.Pt, 3, Scalar.Red, -1, LineTypes.AntiAlias, 0);//绘制圆点
            }


            Cv2.ImShow("FAST", imgDst);
            Cv2.WaitKey(0);
            Cv2.DestroyAllWindows();
        }
    }
}


#4. cv::flann   FLANN是快速最近邻搜索包(Fast_Library_for_Approximate_Nearest_Neighbors)
//对大数据集和高维特征进行最近邻搜索的算法的集合,而且这些算法都已经被优化过了。在面对大数据集是它的效果要好于BFMatcher。
//使用FLANN匹配,我们需要传入两个字典作为参数。这两个用来确定要使用的算法和其他相关参数等。
using System;
using OpenCvSharp;
using OpenCvSharp.Flann;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// cv::flann
    /// 
    class FlannSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            Console.WriteLine("===== FlannTest =====");


            // 创建数据集
            using (var features = new Mat(10000, 2, MatType.CV_32FC1)) //10000个点
            {
                var rand = new Random();//随机数发生器
                for (int i = 0; i < features.Rows; i++)
                {
                    features.Set(i, 0, rand.Next(10000));//随机生成x坐标
                    features.Set(i, 1, rand.Next(10000));//随机生成y坐标
                }


                // 查询点 Mat格式
                var queryPoint = new Point2f(7777, 7777);//待搜索点
                var queries = new Mat(1, 2, MatType.CV_32FC1);//Mat格式 
                queries.Set(0, 0, queryPoint.X);//设置Mat的元素
                queries.Set(0, 1, queryPoint.Y);
                Console.WriteLine("query:({0}, {1})", queryPoint.X, queryPoint.Y);
                Console.WriteLine("-----");


                //K近邻搜索 knnSearch
        // features – CV _ 32F 类型的矩阵,包含要索引的特征(点)。矩阵的大小是  num_features x feature_dimensionality.。
                using var nnIndex = new OpenCvSharp.Flann.Index(features, new KDTreeIndexParams(4));//为给定数据集构造最近邻搜索索引。
                const int Knn = 1; //近邻数
                // queries  查询点,每行一个
                //indices  找到的最近邻的索引
                //dists 到最近邻居的距离
                //Knn  要搜索的最近邻居数
                //SearchParams(int checks = 32, float eps = 0, bool sorted = true); 搜索参数
                nnIndex.KnnSearch(queries, out int[] indices, out float[] dists, Knn, new SearchParams(32));


                for (int i = 0; i < Knn; i++)//遍历近邻
                {
                    int index = indices[i];//索引
                    float dist = dists[i];//距离
                    var pt = new Point2f(features.Get(index, 0), features.Get(index, 1));//近邻点
                    Console.Write("No.{0}\t", i);
                    Console.Write("index:{0}", index);
                    Console.Write(" distance:{0}", dist);
                    Console.Write(" data:({0}, {1})", pt.X, pt.Y);
                    Console.WriteLine();
                }
            }
            Console.Read();
        }
    }
}


#5.FREAK   使用 FREAK 算法检索关键点。
using OpenCvSharp;
using OpenCvSharp.XFeatures2D;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// 使用 FREAK 算法检索Retrieves关键点。
    /// 
    class FREAKSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var gray = new Mat(ImagePath.Lenna, ImreadModes.Grayscale); //灰度图
            using var dst = new Mat(ImagePath.Lenna, ImreadModes.Color); //结果图


            // ORB 关键点检测
            using var orb = ORB.Create(1000);
            KeyPoint[] keypoints = orb.Detect(gray);//灰度图检测ORB关键点,作为freak初始关键点


            // FREAK
            using var freak = FREAK.Create();
            Mat freakDescriptors = new Mat(); //FREAK 描述子
            freak.Compute(gray, ref keypoints, freakDescriptors);//计算FREAK关键点和描述子


            if (keypoints != null)
            {
                var color = new Scalar(0, 255, 0);//绿色
                foreach (KeyPoint kpt in keypoints)//遍历关键点
                {
                    float r = kpt.Size / 2;
                    Cv2.Circle(dst, (Point)kpt.Pt, (int)r, color);//绘制关键点
          //绘制十字
                    Cv2.Line(dst,
                        (Point)new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r),
                        (Point)new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r), 
                        color);
                    Cv2.Line(dst,
                        (Point)new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r),
                        (Point)new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r), 
                        color);
                }
            }


            using (new Window("FREAK", dst)) //显示图像
            {
                Cv2.WaitKey();
            }
        }
    }
}




#6. 手势HandPose检测
using System.Collections.Generic;
using OpenCvSharp;
using OpenCvSharp.Dnn;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// To run this example first download the hand model available here: http://posefs1.perception.cs.cmu.edu/OpenPose/models/hand/pose_iter_102000.caffemodel
    /// Or also available here https://github.com/CMU-Perceptual-Computing-Lab/openpose/tree/master/models
    /// Add the files to the bin folder
    /// 
    internal class HandPose : ConsoleTestBase
    {
        public override void RunTest()
        {
            const string model = "pose_iter_102000.caffemodel";//手势检测网络模型
            const string modelTxt = "pose_deploy.prototxt";//模型配置文件
            const string sampleImage = "hand.jpg";//示例图片
            const string outputLoc = "Output_Hand.jpg";//输出图片位置
            const int nPoints = 22; //点数
            const double thresh = 0.01;//阈值
      //手势对
            int[][] posePairs =
            {
                new[] {0, 1}, new[] {1, 2}, new[] {2, 3}, new[] {3, 4}, //thumb拇指
                new[] {0, 5}, new[] {5, 6}, new[] {6, 7}, new[] {7, 8}, //index 食指
                new[] {0, 9}, new[] {9, 10}, new[] {10, 11}, new[] {11, 12}, //middle 中指
                new[] {0, 13}, new[] {13, 14}, new[] {14, 15}, new[] {15, 16}, //ring 无名指  戒指指头
                new[] {0, 17}, new[] {17, 18}, new[] {18, 19}, new[] {19, 20}, //small 小指
            };


            using var frame = Cv2.ImRead(sampleImage);//读取示例图片
            using var frameCopy = frame.Clone();//备份示例图片用于绘制结果
            int frameWidth = frame.Cols;
            int frameHeight = frame.Rows;


            float aspectRatio = frameWidth / (float) frameHeight;//宽高比率
            int inHeight = 368;//处理后高度
            int inWidth = ((int) (aspectRatio * inHeight) * 8) / 8;//处理后宽度


            using var net = CvDnn.ReadNetFromCaffe(modelTxt, model);//读取网络模型
      //BlobFromImage 对图像进行预处理,包括减均值,比例缩放,裁剪,交换通道等,返回一个4通道的blob(blob可以简单理解为一个N维的数组,用于神经网络的输入)
            using var inpBlob = CvDnn.BlobFromImage(frame, 1.0 / 255, new Size(inWidth, inHeight),
                new Scalar(0, 0, 0), false, false);


            net.SetInput(inpBlob);//设置网络输入


            using var output = net.Forward();//计算输出
            int H = output.Size(2);//高
            int W = output.Size(3);//宽


            var points = new List();


            for (int n = 0; n < nPoints; n++)
            {
                //对应身体部位的概率图。Probability map of corresponding body's part.
                using var probMap = new Mat(H, W, MatType.CV_32F, output.Ptr(0, n));
                Cv2.Resize(probMap, probMap, new Size(frameWidth, frameHeight));//缩放为原图尺寸
                Cv2.MinMaxLoc(probMap, out _, out var maxVal, out _, out var maxLoc);//找到最大点索引和位置


                if (maxVal > thresh)//置信度大于阈值
                {  //绘制最大概率点
                    Cv2.Circle(frameCopy, maxLoc.X, maxLoc.Y, 8, new Scalar(0, 255, 255), -1,
                        LineTypes.Link8);
          //点序号
                    Cv2.PutText(frameCopy, Cv2.Format(n), new OpenCvSharp.Point(maxLoc.X, maxLoc.Y),
                        HersheyFonts.HersheyComplex, 1, new Scalar(0, 0, 255), 2, LineTypes.AntiAlias);
                }


                points.Add(maxLoc);//添加到点集合
            }


            int nPairs = 20; //(POSE_PAIRS).Length / POSE_PAIRS[0].Length;


            for (int n = 0; n < nPairs; n++)
            {
                //  lookup 2 connected body/hand parts
                Point partA = points[posePairs[n][0]];
                Point partB = points[posePairs[n][1]];


                if (partA.X <= 0 || partA.Y <= 0 || partB.X <= 0 || partB.Y <= 0)
                    continue;
        //连线相连部分
                Cv2.Line(frame, partA, partB, new Scalar(0, 255, 255), 8);
                Cv2.Circle(frame, partA.X, partA.Y, 8, new Scalar(0, 0, 255), -1);
                Cv2.Circle(frame, partB.X, partB.Y, 8, new Scalar(0, 0, 255), -1);
            }


            var finalOutput = outputLoc;
            Cv2.ImWrite(finalOutput, frame);
        }
    }
}




#7. 直方图Histogram 示例
using System;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Histogram sample
    /// http://book.mynavi.jp/support/pc/opencv2/c3/opencv_img.html
    /// 
    class HistSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var src = Cv2.ImRead(ImagePath.Lenna, ImreadModes.Grayscale);


            // 直方图视图 Histogram view
            const int Width = 260, Height = 200;
            using var render = new Mat(new Size(Width, Height), MatType.CV_8UC3, Scalar.All(255));//白色背景260x200


            // 计算直方图Calculate histogram
            var hist = new Mat();
            int[] hdims = {256}; // 每个维度的直方图尺寸 Histogram size for each dimension
            Rangef[] ranges = { new Rangef(0,256), }; // 范围 min/max 
            Cv2.CalcHist(
                new Mat[]{src}, 
                new int[]{0}, 
                null,
                hist, //直方图
                1, 
                hdims, //统计256个像素的数量
                ranges);//计算直方图
  
            // 获取直方图最大值  Get the max value of histogram
            Cv2.MinMaxLoc(hist, out _, out double maxVal);


            var color = Scalar.All(100);//颜色
            // 缩放和绘制直方图 Scales and draws histogram
            hist = hist * (maxVal != 0 ? Height / maxVal : 0.0); //直方图归一化后 缩放到目标高度
            for (int j = 0; j < hdims[0]; ++j) 
            {
                int binW = (int)((double)Width / hdims[0]);//每个 矩形的宽度
                render.Rectangle(
                    new Point(j * binW, render.Rows - (int)hist.Get(j)),
                    new Point((j + 1) * binW, render.Rows),
                    color, 
                    -1);//绘制矩形
            }


            using (new Window("Image", src, WindowFlags.AutoSize | WindowFlags.FreeRatio))//显示原图像
            using (new Window("Histogram", render, WindowFlags.AutoSize | WindowFlags.FreeRatio))//显示直方图
            {
                Cv2.WaitKey();
            }
        }
    }
}




#8.  Hog 示例
using System;
using System.Diagnostics;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// samples/c/peopledetect.c
    /// 
    internal class HOGSample : ConsoleTestBase
    {
        public HOGSample()
        {
        }


        public override void RunTest()
        {
            using var img = Cv2.ImRead(ImagePath.Asahiyama, ImreadModes.Color);//读取示例图片


            using var hog = new HOGDescriptor();
            hog.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector());//设置hog特征的支持向量机检测器


            bool b = hog.CheckDetectorSize();//检测尺度
            Console.WriteLine("CheckDetectorSize: {0}", b);


            var watch = Stopwatch.StartNew();


            // run the detector with default parameters. to get a higher hit-rate
            // (and more false alarms, respectively), decrease the hitThreshold and
            // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
      //使用默认参数运行检测器。要获得更高的命中率(以及更多的误报),
      //请降低 hitThreshold 和 groupThreshold(将 groupThreshold 设置为 0 以完全关闭分组)。
      //Rect[] DetectMultiScale(Mat img, double hitThreshold = 0, Size? winStride = null, Size? padding = null, double scale = 1.05, int groupThreshold = 2);
            Rect[] found = hog.DetectMultiScale(img, 0, new Size(8, 8), new Size(24, 16), 1.05, 2);//多尺度检测 


            watch.Stop();
            Console.WriteLine("Detection time = {0}ms", watch.ElapsedMilliseconds);//检测时间
            Console.WriteLine("{0} region(s) found", found.Length);//找到多少区域


            foreach (Rect rect in found)
            {
                // the HOG detector returns slightly larger rectangles than the real objects.
                // so we slightly shrink the rectangles to get a nicer output.
        //HOG 检测器返回比真实对象稍大的矩形。
                 // 所以我们稍微缩小矩形以获得更好的输出。
                var r = new Rect
                {
                    X = rect.X + (int)Math.Round(rect.Width * 0.1),
                    Y = rect.Y + (int)Math.Round(rect.Height * 0.1),
                    Width = (int)Math.Round(rect.Width * 0.8),
                    Height = (int)Math.Round(rect.Height * 0.8)
                };
                img.Rectangle(r.TopLeft, r.BottomRight, Scalar.Red, 3);//绘制矩形
            }


            using var window = new Window("people detector", img, WindowFlags.Normal);//显示检测结果
            window.SetProperty(WindowPropertyFlags.Fullscreen, 1);
            Cv2.WaitKey(0);
        }
    }
}


#9.  霍夫变换示例 /通过霍夫变换进行直线检测
using System;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Hough Transform Sample / ハフ変換による直線検出
    /// 
    /// http://opencv.jp/sample/special_transforms.html#hough_line
    class HoughLinesSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            SampleCpp();      
        }


        /// 
        /// sample of new C++ style wrapper
        /// 
        private void SampleCpp()
        {
            // (1) 加载图像
            using var imgGray = new Mat(ImagePath.Goryokaku, ImreadModes.Grayscale);//灰度图 
            using var imgStd = new Mat(ImagePath.Goryokaku, ImreadModes.Color);//标准直线检测结果图
            using var imgProb = imgStd.Clone();//概率直线检测结果图
            // Canny边缘检测 Preprocess
            Cv2.Canny(imgGray, imgGray, 50, 200, 3, false);


            // (3)运行标准霍夫变换 Run Standard Hough Transform 
      //HoughLines(InputArray image, double rho, double theta, int threshold, double srn = 0, double stn = 0);
      //image  8 位、单通道、二进制源图像。图片可以通过函数修改
      //rho  累加器的距离分辨率(以像素为单位)
      //theta  累加器的角度分辨率(以弧度为单位)
      //threshold  累加器阈值参数。仅返回获得足够票数的行(> 阈值)
      //srn  对于多尺度霍夫变换,它是距离分辨率 rho 的除数。[默认为 0]
      //stn 对于多尺度霍夫变换,它是距离分辨率 theta 的除数。[默认为 0]
      // 线的输出向量。每条线由一个二元素向量 (rho, theta) 表示。rho 是距坐标原点 (0,0)(图像左上角)的距离,theta 是以弧度为单位的直线旋转角度
            LineSegmentPolar[] segStd = Cv2.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);//使用标准霍夫变换在二值图像中查找线。
            int limit = Math.Min(segStd.Length, 10);//最多绘制十条线
            for (int i = 0; i < limit; i++)
            {
                // 绘制结果直线
                float rho = segStd[i].Rho;
                float theta = segStd[i].Theta;
                double a = Math.Cos(theta);
                double b = Math.Sin(theta);
                double x0 = a * rho;//直线位置x
                double y0 = b * rho;
                Point pt1 = new Point { X = (int)Math.Round(x0 + 1000 * (-b)), Y = (int)Math.Round(y0 + 1000 * (a)) };//直线两端延长1000 获得端点
                Point pt2 = new Point { X = (int)Math.Round(x0 - 1000 * (-b)), Y = (int)Math.Round(y0 - 1000 * (a)) };
                imgStd.Line(pt1, pt2, Scalar.Red, 3, LineTypes.AntiAlias, 0);//绘制直线
            }


            // (4) 运行概率霍夫变换  Run Probabilistic Hough Transform
      //LineSegmentPoint[] HoughLinesP(InputArray image, double rho, double theta, int threshold, double minLineLength = 0, double maxLineGap = 0);
      //rho 累加器的距离分辨率(以像素为单位)
      //theta 累加器的角度分辨率(以弧度为单位)
      //threshold 累加器阈值参数。仅返回获得足够票数的行(> 阈值)
      //minLineLength。比这更短的线段将被拒绝。[默认为 0]
      //maxLineGap 同一条线上的点之间连接它们的最大允许间隙。[默认为 0]
      //输出line。每条线由一个 4 元素向量 (x1, y1, x2,y2) 表示
            LineSegmentPoint[] segProb = Cv2.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
            foreach (LineSegmentPoint s in segProb)
            {
                imgProb.Line(s.P1, s.P2, Scalar.Red, 3, LineTypes.AntiAlias, 0);//绘制直线
            }


            // (5) 显示结果 Show results
            using (new Window("Hough_line_standard", imgStd, WindowFlags.AutoSize))
            using (new Window("Hough_line_probabilistic", imgProb, WindowFlags.AutoSize))
            {
                Window.WaitKey(0);
            }
        }
    }
}




#10.  修复,去水印     修复是图像插值。数字修复算法在图像插值,照片恢复,缩放和超分辨率等方面具有广泛的应用。
using System;
using System.IO;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Inpainting 基本思路很简单:用邻近的像素替换那些坏标记,使其看起来像是邻居
    /// 
    /// http://opencv.jp/sample/special_transforms.html#inpaint
    public class InpaintSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            // cvInpaint


            Console.WriteLine(
                "Hot keys: \n" +
                "\tESC - quit the program\n" +
                "\tr - restore the original image恢复原始图片\n" +
                "\ti or ENTER - run inpainting algorithm\n" +
                "\t\t(before running it, paint something on the image)\n" +
                "\ts - save the original image, mask image, original+mask image and inpainted image to desktop,将原始图像、蒙版图像、原始+蒙版图像和修复图像保存到桌面"
            );


            using var img0 = Cv2.ImRead(ImagePath.Fruits, ImreadModes.AnyDepth | ImreadModes.AnyColor);//原始图像
            using var img = img0.Clone();//用作绘图的原始图片
            using var inpaintMask = new Mat(img0.Size(), MatType.CV_8U, Scalar.Black); //蒙版
            using var inpainted = img0.EmptyClone();


            using var wImage = new Window("image", img);//
            var prevPt = new Point(-1, -1);
      //设置鼠标回调
            wImage.SetMouseCallback((MouseEventTypes ev, int x, int y, MouseEventFlags flags, IntPtr userdata) =>
            {
                if (ev == MouseEventTypes.LButtonUp || (flags & MouseEventFlags.LButton) == 0)
                {
                    prevPt = new Point(-1, -1);
                }
                else if (ev == MouseEventTypes.LButtonDown)
                {
                    prevPt = new Point(x, y);
                }
                else if (ev == MouseEventTypes.MouseMove && (flags & MouseEventFlags.LButton) != 0)
                {
                    Point pt = new Point(x, y);
                    if (prevPt.X < 0)
                    {
                        prevPt = pt;//第一个点
                    }
                    inpaintMask.Line(prevPt, pt, Scalar.White, 5, LineTypes.AntiAlias, 0);//蒙版上绘制直线
                    img.Line(prevPt, pt, Scalar.White, 5, LineTypes.AntiAlias, 0);//原图上绘制直线
                    prevPt = pt;//更新起点
                    wImage.ShowImage(img);//显示绘制的图片
                }
            });


            Window wInpaint1 = null;
            Window wInpaint2 = null;
            try
            {
                for (; ; )
                {
                    switch ((char)Window.WaitKey(0))
                    {
                        case (char)27:    // exit
                            return;
                        case 'r':   // 恢复原始图像restore original image
                            inpaintMask.SetTo(Scalar.Black);// 蒙版初始化为黑色
                            img0.CopyTo(img);//
                            wImage.ShowImage(img);//显示原始图片
                            break;
                        case 'i':   // do Inpaint
                        case '\r':
              //src:输入8位1通道或3通道图像。
              //inpaintMask:修复掩码,8位1通道图像。非零像素表示需要修复的区域。
              //dst:输出与src具有相同大小和类型的图像。
              //inpaintRadius:算法考虑的每个点的圆形邻域的半径。
              //flags:
              //INPAINT_NS基于Navier-Stokes的方法
              //Alexandru Telea的INPAINT_TELEA方法
                            Cv2.Inpaint(img, inpaintMask, inpainted, 3, InpaintMethod.Telea);
                            wInpaint1 ??= new Window("inpainted image (algorithm by Alexandru Telea)", WindowFlags.AutoSize);
                            wInpaint1.ShowImage(inpainted);
                            Cv2.Inpaint(img, inpaintMask, inpainted, 3, InpaintMethod.NS);
                            wInpaint2 ??= new Window("inpainted image (algorithm by Navier-Strokes)", WindowFlags.AutoSize);
                            wInpaint2.ShowImage(inpainted);
                            break;
                        case 's': //保存图片 save images
                            string desktop = Environment.GetFolderPath(Environment.SpecialFolder.Desktop);//桌面路径
                            img0.SaveImage(Path.Combine(desktop, "original.png"));
                            inpaintMask.SaveImage(Path.Combine(desktop, "mask.png"));//修复掩码
                            img.SaveImage(Path.Combine(desktop, "original+mask.png"));//原始图像+掩码
                            inpainted.SaveImage(Path.Combine(desktop, "inpainted.png"));//修复的图像
                            break;
                    }
                }
            }
            finally
            {
                wInpaint1?.Dispose();
                wInpaint2?.Dispose();
                Window.DestroyAllWindows();
            }
        }
    }
}




#11. 使用 KAZE 和 AKAZE 算法检索关键点。
using System;
using System.Diagnostics;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Retrieves keypoints using the KAZE and AKAZE algorithm.
    /// 
    internal class KAZESample : ConsoleTestBase
    {
        public override void RunTest()
        {
            var gray = new Mat(ImagePath.Lenna, ImreadModes.Grayscale);//灰度图
      //创建 KAZE  AKAZE
            var kaze = KAZE.Create();
            var akaze = AKAZE.Create();
      //初始化描述子
            var kazeDescriptors = new Mat();//
            var akazeDescriptors = new Mat();
      //初始化关键点 kazeKeyPoints  akazeKeyPoints
            KeyPoint[] kazeKeyPoints = null, akazeKeyPoints = null;
      //检测和计算  关键点和描述子
            var kazeTime = MeasureTime(() =>
                kaze.DetectAndCompute(gray, null, out kazeKeyPoints, kazeDescriptors));
            var akazeTime = MeasureTime(() =>
                akaze.DetectAndCompute(gray, null, out akazeKeyPoints, akazeDescriptors));
      //检测结果图像初始化 dstKaze   dstAkaze 
            var dstKaze = new Mat();
            var dstAkaze = new Mat();
      //绘制关键点
            Cv2.DrawKeypoints(gray, kazeKeyPoints, dstKaze);
            Cv2.DrawKeypoints(gray, akazeKeyPoints, dstAkaze);
      //显示检测结果图
            using (new Window(String.Format("KAZE [{0:F2}ms]", kazeTime.TotalMilliseconds), dstKaze))
            using (new Window(String.Format("AKAZE [{0:F2}ms]", akazeTime.TotalMilliseconds), dstAkaze))
            {
                Cv2.WaitKey();
            }
        }
    //计算时间
        private TimeSpan MeasureTime(Action action)
        {
            var watch = Stopwatch.StartNew();
            action();
            watch.Stop();
            return watch.Elapsed;
        }
    }
}


#12.  关键点长度不匹配时的 单映射变换 矩阵 H计算
using System;
using System.Collections.Generic;
using System.Linq;
using System.Runtime.InteropServices;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// https://github.com/shimat/opencvsharp/issues/176
  ///关键点长度不匹配时的 FindHomography
    /// 
    class KAZESample2 : ConsoleTestBase
    {
        public static Point2d Point2fToPoint2d(Point2f pf)
        {
            return new Point2d(((int) pf.X), ((int) pf.Y));
        }


        public override void RunTest()
        {
      //加载两张图像
            using var img1 = new Mat(ImagePath.SurfBox);
            using var img2 = new Mat(ImagePath.SurfBoxinscene);
            using var descriptors1 = new Mat();
            using var descriptors2 = new Mat();
      //发现两幅图片分别提取出来N,M个特征向量
      //然后对N和M的特征向量进行匹配,找到最佳匹配
      //然后再画出匹配的特征显示出来
            using var matcher = new BFMatcher(NormTypes.L2SQR);//Brute Force(暴力法)opencv二维特征点匹配常见的办法,BFMatcher总是尝试所有可能的匹配,从而使得它总能够找到最佳匹配
            using var kaze = KAZE.Create();
      //计算KAZE关键点和描述子
            kaze.DetectAndCompute(img1, null, out var keypoints1, descriptors1);
            kaze.DetectAndCompute(img2, null, out var keypoints2, descriptors2);
      //匹配描述子
            DMatch[][] matches = matcher.KnnMatch(descriptors1, descriptors2, 2);
            using Mat mask = new Mat(matches.Length, 1, MatType.CV_8U);//构造蒙版nx1
            mask.SetTo(new Scalar(255));//白色背景
            int nonZero = Cv2.CountNonZero(mask);//返回灰度值不为0的像素数,可用来判断图像是否全黑
            VoteForUniqueness(matches, mask);//唯一性投票
            nonZero = Cv2.CountNonZero(mask);//返回有唯一匹配点的像素数
            nonZero = VoteForSizeAndOrientation(keypoints2, keypoints1, matches, mask, 1.5f, 20);//返回mask非零像素数


            List obj = new List();//对象上的点
            List scene = new List();//场景上的点
            List goodMatchesList = new List();//好的匹配
            //iterate through the mask only pulling out nonzero items because they're matches
      //遍历掩码仅提取非零项,因为它们是匹配项
            for (int i = 0; i < mask.Rows; i++)
            {
                MatIndexer maskIndexer = mask.GetGenericIndexer();
                if (maskIndexer[i] > 0)
                {
                    obj.Add(keypoints1[matches[i][0].QueryIdx].Pt);//对象上关键点
                    scene.Add(keypoints2[matches[i][0].TrainIdx].Pt);//场景上关键点
                    goodMatchesList.Add(matches[i][0]);//好的匹配
                }
            }
      //double 关键点
            List objPts = obj.ConvertAll(Point2fToPoint2d);
            List scenePts = scene.ConvertAll(Point2fToPoint2d);
            if (nonZero >= 4)//匹配点数4个以上
            {  //https://blog.csdn.net/fengyeer20120/article/details/87798638
        //计算多个二维点对之间的最优单映射变换矩阵 H(3行x3列) ,使用最小均方误差或者RANSAC方法
        //函数功能:找到两个平面之间的转换矩阵。https://blog.csdn.net/xull88619814/article/details/81587595
                Mat homography = Cv2.FindHomography(objPts, scenePts, HomographyMethods.Ransac, 1.5, mask);//
                nonZero = Cv2.CountNonZero(mask);


                if (homography != null)
                {
                    Point2f[] objCorners = { new Point2f(0, 0),
                                      new Point2f(img1.Cols, 0),
                                      new Point2f(img1.Cols, img1.Rows),
                                      new Point2f(0, img1.Rows) };//对象四个角点


                    Point2d[] sceneCorners = MyPerspectiveTransform3(objCorners, homography);//场景的四个角点:对象四个角点通过坐标变换计算得到


                    //This is a good concat horizontal 这是一个很好的水平拼接
                    using Mat img3 = new Mat(Math.Max(img1.Height, img2.Height), img2.Width + img1.Width, MatType.CV_8UC3);
                    using Mat left = new Mat(img3, new Rect(0, 0, img1.Width, img1.Height));
                    using Mat right = new Mat(img3, new Rect(img1.Width, 0, img2.Width, img2.Height));
                    img1.CopyTo(left);
                    img2.CopyTo(right);
          //
                    mask.GetArray(out byte[] maskBytes);
          //绘制匹配点对
                    Cv2.DrawMatches(img1, keypoints1, img2, keypoints2, goodMatchesList, img3, Scalar.All(-1), Scalar.All(-1), maskBytes, DrawMatchesFlags.NotDrawSinglePoints);


                    List> listOfListOfPoint2D = new List>();//
                    List listOfPoint2D = new List
                            {
                                new Point(sceneCorners[0].X + img1.Cols, sceneCorners[0].Y),
                                new Point(sceneCorners[1].X + img1.Cols, sceneCorners[1].Y),
                                new Point(sceneCorners[2].X + img1.Cols, sceneCorners[2].Y),
                                new Point(sceneCorners[3].X + img1.Cols, sceneCorners[3].Y)
                            };//平移后的场景四个角点
                    listOfListOfPoint2D.Add(listOfPoint2D);
                    img3.Polylines(listOfListOfPoint2D, true, Scalar.LimeGreen, 2);//绘制场景四个角点多边形


                    //This works too
                    //Cv2.Line(img3, scene_corners[0] + new Point2d(img1.Cols, 0), scene_corners[1] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
                    //Cv2.Line(img3, scene_corners[1] + new Point2d(img1.Cols, 0), scene_corners[2] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
                    //Cv2.Line(img3, scene_corners[2] + new Point2d(img1.Cols, 0), scene_corners[3] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);
                    //Cv2.Line(img3, scene_corners[3] + new Point2d(img1.Cols, 0), scene_corners[0] + new Point2d(img1.Cols, 0), Scalar.LimeGreen);


                    img3.SaveImage("Kaze_Output.png");//保存结果
                    Window.ShowImages(img3);//显示结果
                }
            }
        }


        //未使用 to avoid opencvsharp's bug
        static Point2d[] MyPerspectiveTransform1(Point2f[] yourData, Mat transformationMatrix)
        {
            using Mat src = new Mat(yourData.Length, 1, MatType.CV_32FC2, yourData);
            using Mat dst = new Mat();
            Cv2.PerspectiveTransform(src, dst, transformationMatrix);
            dst.GetArray(out Point2f[] dstArray);
            Point2d[] result = Array.ConvertAll(dstArray, Point2fToPoint2d);
            return result;
        }


        //未使用 fixed FromArray behavior
        static Point2d[] MyPerspectiveTransform2(Point2f[] yourData, Mat transformationMatrix)
        {
            using var s = Mat.FromArray(yourData);
            using var d = new Mat();
            Cv2.PerspectiveTransform(s, d, transformationMatrix);
            Point2f[] f = d.ToArray();
            return f.Select(Point2fToPoint2d).ToArray();
        }


        // new API
        static Point2d[] MyPerspectiveTransform3(Point2f[] yourData, Mat transformationMatrix)
        {
            Point2f[] ret = Cv2.PerspectiveTransform(yourData, transformationMatrix);
            return ret.Select(Point2fToPoint2d).ToArray();
        }
    // 有唯一匹配点的mask 为白色255
        static int VoteForSizeAndOrientation(KeyPoint[] modelKeyPoints, KeyPoint[] observedKeyPoints, DMatch[][] matches, Mat mask, float scaleIncrement, int rotationBins)
        {
            int idx = 0;
            int nonZeroCount = 0;
            byte[] maskMat = new byte[mask.Rows];
            GCHandle maskHandle = GCHandle.Alloc(maskMat, GCHandleType.Pinned);
            using (Mat m = new Mat(mask.Rows, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject()))
            {
                mask.CopyTo(m);///
                List logScale = new List();//log比例 列表
                List rotations = new List();//旋转角度 列表
                double s, maxS, minS, r;
                maxS = -1.0e-10f; minS = 1.0e10f;//    


                //if you get an exception here, it's because you're passing in the model and observed keypoints backwards.  Just switch the order.
        //如果您在这里遇到异常,那是因为您正在传入模型并向后观察关键点。换个顺序就行了。
                for (int i = 0; i < maskMat.Length; i++)//遍历有唯一匹配点的 匹配项
                {
                    if (maskMat[i] > 0)
                    {
                        KeyPoint observedKeyPoint = observedKeyPoints[i];// 观察图像上匹配点
                        KeyPoint modelKeyPoint = modelKeyPoints[matches[i][0].TrainIdx];//模型上的关键点
                        s = Math.Log10(observedKeyPoint.Size / modelKeyPoint.Size);//
                        logScale.Add((float)s);
                        maxS = s > maxS ? s : maxS;
                        minS = s < minS ? s : minS;


                        r = observedKeyPoint.Angle - modelKeyPoint.Angle;
                        r = r < 0.0f ? r + 360.0f : r;
                        rotations.Add((float)r);
                    }
                }


                int scaleBinSize = (int)Math.Ceiling((maxS - minS) / Math.Log10(scaleIncrement));
                if (scaleBinSize < 2)
                    scaleBinSize = 2;
                float[] scaleRanges = { (float)minS, (float)(minS + scaleBinSize + Math.Log10(scaleIncrement)) };


                using var scalesMat = new Mat(rows: logScale.Count, cols: 1, data: logScale.ToArray());
                using var rotationsMat = new Mat(rows: rotations.Count, cols: 1, data: rotations.ToArray());
                using var flagsMat = new Mat(logScale.Count, 1);
                using Mat hist = new Mat();
                flagsMat.SetTo(new Scalar(0.0f));
                float[] flagsMatFloat1 = flagsMat.ToArray();


                int[] histSize = { scaleBinSize, rotationBins };
                float[] rotationRanges = { 0.0f, 360.0f };
                int[] channels = { 0, 1 };
                Rangef[] ranges = { new Rangef(scaleRanges[0], scaleRanges[1]), new Rangef(rotations.Min(), rotations.Max()) };


                Mat[] arrs = { scalesMat, rotationsMat };
                Cv2.CalcHist(arrs, channels, null, hist, 2, histSize, ranges);
                Cv2.MinMaxLoc(hist, out double minVal, out double maxVal);


                Cv2.Threshold(hist, hist, maxVal * 0.5, 0, ThresholdTypes.Tozero);
                Cv2.CalcBackProject(arrs, channels, hist, flagsMat, ranges);


                MatIndexer flagsMatIndexer = flagsMat.GetIndexer();


                for (int i = 0; i < maskMat.Length; i++)
                {
                    if (maskMat[i] > 0)
                    {
                        if (flagsMatIndexer[idx++] != 0.0f)
                        {
                            nonZeroCount++;
                        }
                        else
                            maskMat[i] = 0;
                    }
                }
                m.CopyTo(mask);/
            }
            maskHandle.Free();


            return nonZeroCount;
        }
    //为唯一性投票
        private static void VoteForUniqueness(DMatch[][] matches, Mat mask, float uniqnessThreshold = 0.80f)
        {
            byte[] maskData = new byte[matches.Length];
            GCHandle maskHandle = GCHandle.Alloc(maskData, GCHandleType.Pinned);//该方法创建托管对象的句柄,从而阻止收集托管对象
            using (Mat m = new Mat(matches.Length, 1, MatType.CV_8U, maskHandle.AddrOfPinnedObject()))
            {
                mask.CopyTo(m);
                for (int i = 0; i < matches.Length; i++)
                {
                    //这也称为 NNDR 最近邻距离比 This is also known as NNDR Nearest Neighbor Distance Ratio
                    if ((matches[i][0].Distance / matches[i][1].Distance) <= uniqnessThreshold)
                        maskData[i] = 255;//:白色  有唯一匹配点
                    else
                        maskData[i] = 0; //没有唯一匹配点
                }
                m.CopyTo(mask);
            }
            maskHandle.Free();
        }
    }
}




#13.  Mat 子矩阵操作
using System;
using System.Threading.Tasks;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// 
    /// 
    class MatOperations : ConsoleTestBase
    {
        public override void RunTest()
        {
            SubMat();
            RowColRangeOperation();
            RowColOperation();
        }


        /// 
        /// 子矩阵运算  Submatrix operations
        /// 
        private void SubMat()
        {
            using var src = Cv2.ImRead(ImagePath.Lenna);//读取源图像


            // Assign small image to mat
            using var small = new Mat();
            Cv2.Resize(src, small, new Size(100, 100));//缩放源图像
            src[10, 110, 10, 110] = small;//缩放的小图像 替换源图像部分
            src[370, 470, 400, 500] = small.T(); //缩放的小图像旋转后 替换源图像部分 
            // ↑这与以下相同:
            //small.T().CopyTo(src[370, 470, 400, 500]);


            //获取部分mat(类似于cvSetImageROI) Get partial mat (similar to cvSetImageROI)
            Mat part = src[200, 400, 200, 360];
            // 反转部分像素值Invert partial pixel values
            Cv2.BitwiseNot(part, part);


            // 用颜色 (128, 0, 0) 填充区域 (50..100, 100..150)  
            part = src.SubMat(50, 100, 400, 450);
            part.SetTo(128);


            using (new Window("SubMat", src))//显示结果
            {
                Cv2.WaitKey();
            }


            part.Dispose();
        }


        /// 
        /// Submatrix operations
        /// 
        private void RowColRangeOperation()
        {
            using var src = Cv2.ImRead(ImagePath.Lenna);//源图像


            Cv2.GaussianBlur(
                src.RowRange(100, 200),
                src.RowRange(200, 300),
                new Size(7, 7), 20);//子矩阵高斯滤波


            Cv2.GaussianBlur(
                src.ColRange(200, 300),
                src.ColRange(100, 200),
                new Size(7, 7), 20);


            using (new Window("RowColRangeOperation", src))
            {
                Cv2.WaitKey();
            }
        }


        /// 
        /// Submatrix expression operations
        /// 
        private void RowColOperation()
        {
            using var src = Cv2.ImRead(ImagePath.Lenna);//读取源图像


            var rand = new Random();
      //随机交换两行像素
            for (int i = 0; i < 200; i++)//
            {
                int c1 = rand.Next(100, 400);
                int c2 = rand.Next(100, 400);
                using Mat temp = src.Row(c1).Clone();//随机一行
                src.Row(c2).CopyTo(src.Row(c1));
                temp.CopyTo(src.Row(c2));
            }


            ((Mat)~src.ColRange(450, 500)).CopyTo(src.ColRange(0, 50));//复制指定区域图像


            src.RowRange(450, 460).SetTo(new Scalar(0, 0, 255)); //设置指定区域图像为 红色


            using (new Window("RowColOperation", src))
            {
                Cv2.WaitKey();
            }
        }
    }
}




#14.           多次元尺度構成法
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Multidimensional Scaling (多次元尺度構成法)
    /// for C++ cv::Mat testing
    /// 
    class MDS : ConsoleTestBase
    {
        /// 
        ///美国10个城市之间的距离
        /// 
        /// 
        /// *亚特兰大和芝加哥之间的直线距离为 587 公里。The linear distance between Atlanta and Chicago is 587km.
        /// 
        static readonly double[,] CityDistance = 
        {
            /*Atlanta*/         {0,      587,    1212,   701,    1936,   604,    748,    2139,   2182,   543},
            /*Chicago*/         {587,    0,      920,    940,    1745,   1188,   713,    1858,   1737,   597},
            /*Denver*/          {1212,   920,    0,      879,    831,    1726,   1631,   949,    1021,   1494},
            /*Houston*/         {701,    940,    879,    0,      1734,   968,    1420,   1645,   1891,   1220},
            /*Los Angeles*/     {1936,   1745,   831,    1734,   0,      2339,   2451,   347,    959,    2300},
            /*Miami*/           {604,    1188,   1726,   968,    2339,   0,      1092,   2594,   2734,   923},
            /*New York*/        {748,    713,    1631,   1420,   2451,   1092,   0,      2571,   2408,   205},
            /*San Francisco*/   {2139,   1858,   949,    1645,   347,    2594,   2571,   0,      678,    2442},
            /*Seattle*/         {2182,   1737,   1021,   1891,   959,    2734,   2408,   678,    0,      2329},
            /*Washington D.C.*/ {543,    597,    1494,   1220,   2300,   923,    205,    2442,   2329,   0}
        };


        /// 
        /// 城市名
        /// 
        static readonly string[] CityNames = 
        {
            "Atlanta","Chicago","Denver","Houston","Los Angeles","Miami","New York","San Francisco","Seattle","Washington D.C."
        };




        /// 
        /// 经典多维缩放 Classical Multidimensional Scaling
        /// 
        public override void RunTest()
        {
            // 创建距离矩阵
            int size = CityDistance.GetLength(0);
            var t = new Mat(size, size, MatType.CV_64FC1, CityDistance);
            //将 Torgerson 的加性常数添加到 t adds Torgerson's additive constant to t
            double torgarson = Torgerson(t);
            t += torgarson;
            //对 t 的所有元素求平方 squares all elements of t
            t = t.Mul(t);


            //居中矩阵 G centering matrix G
            using var g = CenteringMatrix(size);
            // 计算内积矩阵 Bcalculates inner product matrix B
            using var b = g * t * g.T() * -0.5;
            //计算 B 的特征值和特征向量   calculates eigenvalues and eigenvectors of B
            using var values = new Mat();
            using var vectors = new Mat();
            Cv2.Eigen(b, values, vectors);///计算 B 的特征值和特征向量
            for (int r = 0; r < values.Rows; r++)
            {
                if (values.Get(r) < 0)
                    values.Set(r, 0);
            }


            //Console.WriteLine(values.Dump());


            //将 sqrt(eigenvalue) 乘以特征向量  multiplies sqrt(eigenvalue) by eigenvector
            using var result = vectors.RowRange(0, 2);
            {
                var at = result.GetGenericIndexer();
                for (int r = 0; r < result.Rows; r++)
                {
                    for (int c = 0; c < result.Cols; c++)
                    {
                        at[r, c] *= Math.Sqrt(values.Get(r));
                    }
                }
            }


            //归一化 scaling
            Cv2.Normalize(result, result, 0, 800, NormTypes.MinMax);


            // opens a window
            using (Mat img = Mat.Zeros(600, 800, MatType.CV_8UC3))
            using (var window = new Window("City Location Estimation"))
            {
                var at = result.GetGenericIndexer();
                for (int c = 0; c < size; c++)
                {
                    double x = at[0, c];
                    double y = at[1, c];
                    x = x * 0.7 + img.Width * 0.1;
                    y = y * 0.7 + img.Height * 0.1;
                    img.Circle((int)x, (int)y, 5, Scalar.Red, -1);
                    Point textPos = new Point(x + 5, y + 10);
                    img.PutText(CityNames[c], textPos, HersheyFonts.HersheySimplex, 0.5, Scalar.White);
                }
                window.Image = img;
                Cv2.WaitKey();
            }
        }


        /// 
        /// 返回 Torgerson 的加法常数Returns Torgerson's additive constant
        /// 
        /// 
        /// 
        private double Torgerson(Mat mat)
        {
            if (mat == null)
                throw new ArgumentNullException();
            if (mat.Rows != mat.Cols) //矩阵为方阵
                throw new ArgumentException();


            int n = mat.Rows;
            //负值时的  加法常数 Additive constant in case of negative value
            Cv2.MinMaxLoc(-mat, out _, out double max);
            double c2 = max;
            //三角不等式的  加法常数 Additive constant from triangular inequality
            double c1 = 0;


            var at = mat.GetGenericIndexer();//获取特定于类型的索引器。索引器具有获取器/设置器来访问每个矩阵元素。
            for (int i = 0; i < n; i++)
            {
                for (int j = 0; j < n; j++)
                {
                    for (int k = 0; k < n; k++)
                    {
                        double v = at[i, k] - at[i, j] - at[j, k];
                        if (v > c1)
                        {
                            c1 = v;
                        }
                    }
                }
            }
            return Math.Max(Math.Max(c1, c2), 0);
        }


        /// 
        /// Returns centering matrix
        /// 
        /// Size of matrix
        /// 
        private Mat CenteringMatrix(int n)
        {
            using var eye = Mat.Eye(n, n, MatType.CV_64FC1) ;
            return (eye - 1.0 / n);
        }
    }
}




#15. 通道拆分/合并 测试
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    class MergeSplitSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            // Split/Merge Test
            {
                using var src = new Mat(ImagePath.Lenna, ImreadModes.Color);//源图像


                // Split each plane
                Cv2.Split(src, out var planes);//分离通道


                Cv2.ImShow("planes 0", planes[0]);
                Cv2.ImShow("planes 1", planes[1]);
                Cv2.ImShow("planes 2", planes[2]);
                Cv2.WaitKey();
                Cv2.DestroyAllWindows();


                // Invert G plane
                Cv2.BitwiseNot(planes[1], planes[1]);//G通道取反


                // Merge
                using var merged = new Mat();
                Cv2.Merge(planes, merged);//混合通道


                Cv2.ImShow("src", src);
                Cv2.ImShow("merged", merged);
                Cv2.WaitKey();
                Cv2.DestroyAllWindows();
            }


            // 混合通道测试  MixChannels Test
            {
                using var rgba = new Mat(300, 300, MatType.CV_8UC4, new Scalar(50, 100, 150, 200));//纯色图
                using var bgr = new Mat(rgba.Rows, rgba.Cols, MatType.CV_8UC3);
                using var alpha = new Mat(rgba.Rows, rgba.Cols, MatType.CV_8UC1);


                Mat[] input = { rgba };//4通道
                Mat[] output = { bgr, alpha };//4通道
                // rgba[0] -> bgr[2], rgba[1] -> bgr[1],
                // rgba[2] -> bgr[0], rgba[3] -> alpha[0]
                int[] fromTo = { 0, 2, 1, 1, 2, 0, 3, 3 };
                Cv2.MixChannels(input, output, fromTo); //混合通道


                Cv2.ImShow("rgba", rgba);
                Cv2.ImShow("bgr", bgr);
                Cv2.ImShow("alpha", alpha);
                Cv2.WaitKey();
                Cv2.DestroyAllWindows();
            }
        }
    }
}


#16. 
using OpenCvSharp;
using SampleBase;
using System.Threading.Tasks;


namespace SamplesCore
{
    class MorphologySample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var gray = new Mat(ImagePath.Lenna, ImreadModes.Grayscale);//灰度图
            using var binary = new Mat();//二值图
            using var dilate1 = new Mat(); //膨胀图1
            using var dilate2 = new Mat();
            byte[] kernelValues = {0, 1, 0, 1, 1, 1, 0, 1, 0}; // 内核值    cross (+)
            using var kernel = new Mat(3, 3, MatType.CV_8UC1, kernelValues);//卷积核


            // 灰度图二值化   Binarize
            Cv2.Threshold(gray, binary, 0, 255, ThresholdTypes.Otsu);


            // empty kernel
            Cv2.Dilate(binary, dilate1, null);//空内核
            // + kernel
            Cv2.Dilate(binary, dilate2, kernel);//膨胀


            Cv2.ImShow("binary", binary);
            Cv2.ImShow("dilate (kernel = null)", dilate1);
            Cv2.ImShow("dilate (kernel = +)", dilate2);
            Cv2.WaitKey(0);
            Cv2.DestroyAllWindows();
        }
    }
}


#17.  最大极值稳定区域  最大极值稳定区域,是一种类似分水岭图像的分割与匹配算法。它具有SIFT SURF及 ORB等特征不具备的仿射不变性,近年来广泛应用于图像分割与匹配领域。
//https://blog.csdn.net/hust_bochu_xuchao/article/details/52230694
//https://blog.csdn.net/qq_41685265/article/details/104096152
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    ///最大稳定极值区域 Maximally Stable Extremal Regions
    /// 
    class MSERSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using Mat src = new Mat(ImagePath.Distortion, ImreadModes.Color);//源图像
            using Mat gray = new Mat();
            using Mat dst = src.Clone();
            Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY);//灰度图


            CppStyleMSER(gray, dst);  // C++ style


            using (new Window("MSER src", src))
            using (new Window("MSER gray", gray))
            using (new Window("MSER dst", dst))
            {
                Cv2.WaitKey();
            }
        }
        
        /// 
        /// Extracts MSER by C++-style code (cv::MSER)
        /// 
        /// 
        /// 
        private void CppStyleMSER(Mat gray, Mat dst)
        {
            MSER mser = MSER.Create();
            mser.DetectRegions(gray, out Point[][] contours, out _);//提取特征区域           C++指定被检测区域的最小和最大尺寸,以便限制被检测特征的数量
            foreach (Point[] pts in contours)
            {
                Scalar color = Scalar.RandomColor();//随机颜色
                foreach (Point p in pts)
                {
                    dst.Circle(p, 1, color);//绘制轮廓点
                }
            }
        }
    }
}


#17. 
using System;
using System.Collections.Generic;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    class NormalArrayOperations : ConsoleTestBase
    {
        public override void RunTest()
        {
            Threshold1();
            Threshold2();
            Threshold3();
        }
  
        /// 
        /// 对字节数组运行阈值 Run thresholding to byte array 
        /// 
        private void Threshold1()
        {
            const int T = 3;//阈值
            const int Max = 5;//最大值


            byte[] input = {1, 2, 3, 4, 5, };
            var output = new List();


            Cv2.Threshold(InputArray.Create(input), OutputArray.Create(output),
                T, Max, ThresholdTypes.Binary);//字节数组 阈值处理


            Console.WriteLine("Threshold: {0}", T);
            Console.WriteLine("input:  {0}", string.Join(",", input));
            Console.WriteLine("output: {0}", string.Join(",", output));
        }


        /// 
        /// Run thresholding to short array 
        /// 
        private void Threshold2()
        {
            const int T = 150;
            const int Max = 250;


            short[] input = { 50, 100, 150, 200, 250, };
            var output = new List();


            Cv2.Threshold(InputArray.Create(input), OutputArray.Create(output),
                T, Max, ThresholdTypes.Binary);


            Console.WriteLine("Threshold: {0}", T);
            Console.WriteLine("input:  {0}", string.Join(",", input));
            Console.WriteLine("output: {0}", string.Join(",", output));
        }


        /// 
        /// Run thresholding to struct array 
        /// 
        private void Threshold3()
        {
            const double T = 2000;
            const double Max = 5000;


            // threshold does not support Point (int)     对结构数组运行阈值处理
            Point2f[] input = { 
                                  new Point2f(1000, 1500),
                                  new Point2f(2000, 2001),
                                  new Point2f(500, 5000), 
                              };
            var output = new List();


            Cv2.Threshold(InputArray.Create(input), OutputArray.Create(output),
                T, Max, ThresholdTypes.Binary);


            Console.WriteLine("Threshold: {0}", T);
            Console.WriteLine("input:  {0}", string.Join(",", input));
            Console.WriteLine("output: {0}", string.Join(",", output));
        }
    }
}


#18.OpenVino 深度学习    https://zhuanlan.zhihu.com/p/91882515
using System.Collections.Generic;
using OpenCvSharp;
using OpenCvSharp.Dnn;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// To run this example first you nedd to compile OPENCV with Intel OpenVino
    /// Download the face detection model available here: https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001
    /// Add the files to the bin folder
  ///要首先运行此示例,您需要使用英特尔 OpenVino 编译 OPENCV。在此处下载人脸检测模型:https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-adas-0001。将文件添加到 bin 文件夹
    /// 
    internal class OpenVinoFaceDetection : ConsoleTestBase
    {
        const string modelFace = "face-detection-adas-0001.bin"; //  权重文件
        const string modelFaceTxt = "face-detection-adas-0001.xml";// 网络结构的描述
        const string sampleImage = "sample.jpg";
        const string outputLoc = "sample_output.jpg";


        public override void RunTest()
        {
            using var frame = Cv2.ImRead(sampleImage);//待检测图
            int frameHeight = frame.Rows;
            int frameWidth = frame.Cols;


            using var netFace = CvDnn.ReadNet(modelFace, modelFaceTxt);  //读取模型    
            netFace.SetPreferableBackend(Backend.INFERENCE_ENGINE);
            netFace.SetPreferableTarget(Target.CPU);
      
            using var blob = CvDnn.BlobFromImage(frame, 1.0, new Size(672, 384), new Scalar(0, 0, 0), false, false);
      netFace.SetInput(blob);//设置输入
      
            using (var detection = netFace.Forward())//人脸检测
            {
                using var detectionMat = new Mat(detection.Size(2), detection.Size(3), MatType.CV_32F, detection.Ptr(0));//检测矩阵


                for (int i = 0; i < detectionMat.Rows; i++)
                {
                    float confidence = detectionMat.At(i, 2);//置信度


                    if (confidence > 0.7)
                    {
                        int x1 = (int)(detectionMat.At(i, 3) * frameWidth); //xmin
                        int y1 = (int)(detectionMat.At(i, 4) * frameHeight); //ymin
                        int x2 = (int)(detectionMat.At(i, 5) * frameWidth); //xmax
                        int y2 = (int)(detectionMat.At(i, 6) * frameHeight); //ymax                            


                        var roi = new Rect(x1, y1, (x2 - x1), (y2 - y1));              
                        roi = AdjustBoundingBox(roi);              
                        Cv2.Rectangle(frame, roi, new Scalar(0, 255, 0), 2, LineTypes.Link4);//绘制矩形
                    }
                }
            }
                
            var finalOutput = outputLoc;
            Cv2.ImWrite(finalOutput, frame);
        }
    //调整边界框
        private Rect AdjustBoundingBox(Rect faceRect)
        {
            int w = faceRect.Width;
            int h = faceRect.Height;


            faceRect.X -= (int)(0.067 * w);
            faceRect.Y -= (int)(0.028 * h);


            faceRect.Width += (int)(0.15 * w);
            faceRect.Height += (int)(0.13 * h);


            if (faceRect.Width < faceRect.Height)
            {
                var dx = (faceRect.Height - faceRect.Width);
                faceRect.X -= dx / 2;
                faceRect.Width += dx;
            }
            else
            {
                var dy = (faceRect.Width - faceRect.Height);
                faceRect.Y -= dy / 2;
                faceRect.Height += dy;
            }
            return faceRect;
        }
    }
}


#19.  透视变换示例 Perspective Transform Sample
using OpenCvSharp;
using System;
using System.Collections.Generic;
using SampleBase;


namespace SamplesCore
{
    public class PerspectiveTransformSample : ConsoleTestBase
    {
        private readonly List point2Fs = new List();


        private Point2f[] srcPoints = new Point2f[] {
            new Point2f(0, 0),
            new Point2f(0, 0),
            new Point2f(0, 0),
            new Point2f(0, 0),
        };//  源图像上的鼠标选择的四个点


        private readonly Point2f[] dstPoints = new Point2f[] {
            new Point2f(0, 0),
            new Point2f(0, 480),
            new Point2f(640, 480),
            new Point2f(640, 0),
        };//目标图像上四个角点


        private Mat OriginalImage;//原始图像


        public override void RunTest()
        {
            OriginalImage = new Mat(ImagePath.SurfBoxinscene, ImreadModes.AnyColor);//读取图像
            using var Window = new Window("result", OriginalImage);//显示原始图像


            Cv2.SetMouseCallback(Window.Name, CallbackOpenCVAnnotate);//设置鼠标回调
            Window.WaitKey();
        }


        private void CallbackOpenCVAnnotate(MouseEventTypes e, int x, int y, MouseEventFlags flags, IntPtr userdata)
        {
            if (e == MouseEventTypes.LButtonDown)
            {
                point2Fs.Add(new Point2f(x, y)); //左键点击四个点
                if (point2Fs.Count == 4)
                {
                    srcPoints = point2Fs.ToArray();//列表转数组
                    using var matrix = Cv2.GetPerspectiveTransform(srcPoints, dstPoints);//获取透视变换
                    using var dst = new Mat(new Size(640, 480), MatType.CV_8UC3);
          //对图像进行透视变换,就是变形
                    Cv2.WarpPerspective(OriginalImage, dst, matrix, dst.Size());//源图像上四个点包围区域 透视映射 到目标图像上
                    using var dsts = new Window("dst", dst);//显示目标图像
                    point2Fs.Clear();
                    Window.WaitKey();
                }
            }
        }
    }
}


#20.  边缘滤波  细节增强滤波    铅笔素描滤波   水彩滤镜
//https://learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
//https://blog.csdn.net/ellispy/article/details/118974305
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// sample of photo module methods 拍照模块方法示例
    /// 
    class PhotoMethods : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var src = new Mat(ImagePath.Fruits, ImreadModes.Color);//源图像


            using var normconv = new Mat(); 
            using var recursFiltered = new Mat();
      
      //EdgePreservingFilter(InputArray src, OutputArray dst, EdgePreservingMethods flags = EdgePreservingMethods.RecursFilter, float sigmaS = 60, float sigmaR = 0.4F);
      //保边滤波器(Edge Preserving Filter)是指在滤波过程中能够有效的保留图像中的边缘信息的一类特殊滤波器。其中双边滤波器(Bilateral filter)、引导滤波器(Guided image filter)、加权最小二乘法滤波器(Weighted least square filter)为几种比较广为人知的保边滤波器。
      //高斯双边模糊与mean shift均值模糊两种边缘保留滤波算法,都因为计算量比较大,无法实时实现图像边缘保留滤波,限制了它们的使用场景,OpenCV中还实现了一种快速的边缘保留滤波算法。
      //src:输入 8 位 3 通道图像。
      //dst:输出 8 位 3 通道图像。
      //flags:边缘保护滤波  cv::RECURS_FILTER 或 cv::NORMCONV_FILTER。
      //sigma_s:取值范围为 0~200。
      //sigma_r:取值范围为 0~1。
      //当sigma_s 取值不变时候,sigma_r 越大图像滤波效果越明显;
      //当sigma_r 取值不变时候,窗口 sigma_s 越大图像模糊效果越明显;
      //当sgma_r取值很小的时候,窗口 sigma_s 取值无论如何变化,图像双边滤波效果都不好!
      //https://blog.csdn.net/kingkee/article/details/95942906
            Cv2.EdgePreservingFilter(src, normconv, EdgePreservingMethods.NormconvFilter); //  归一化卷积滤波
            Cv2.EdgePreservingFilter(src, recursFiltered, EdgePreservingMethods.RecursFilter); //递归过滤
      
      //图像特效 OpenCV 油画 与 非真实感渲染 (Stylization水彩, edgePreservingFilter, detailEnhance, pencilSketch描绘)
      //细节增强过滤器 ( detailEnhance )
      //顾名思义,滤镜增强了细节,使图像看起来更清晰。
            using var detailEnhance = new Mat();
            Cv2.DetailEnhance(src, detailEnhance);
      
      //铅笔素描滤镜(pencilSketch)
      //这个过滤器产生一个看起来像铅笔素描的输出。有两个输出,一个是将过滤器应用于彩色输入图像的结果,另一个是将其应用于输入图像的灰度版本的结果。坦率地说,我对这个过滤器印象不深,因为结果看起来并不好。
            using var pencil1 = new Mat(); 
            using var pencil2 = new Mat();
            Cv2.PencilSketch(src, pencil1, pencil2);
      
      //Stylization Filter ( stylization )
      //水彩滤镜产生的输出看起来像是使用水彩画的图像。
            using var stylized = new Mat();
            Cv2.Stylization(src, stylized);


            using (new Window("src", src))
            using (new Window("edgePreservingFilter - NormconvFilter", normconv))
            using (new Window("edgePreservingFilter - RecursFilter", recursFiltered))
            using (new Window("detailEnhance", detailEnhance))
            using (new Window("pencilSketch grayscale", pencil1))
            using (new Window("pencilSketch color", pencil2))
            using (new Window("stylized", stylized))
            {
                Cv2.WaitKey();
            }
        }
    }
}


#21.  像素访问
using System;
using System.Diagnostics;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Swaps B for R 交换B R通道
    /// 
    class PixelAccess : ConsoleTestBase
    {
        public override void RunTest()
        {
            Console.WriteLine("Get/Set: {0}ms", MeasureTime(GetSet));
            Console.WriteLine("GenericIndexer: {0}ms", MeasureTime(GenericIndexer));//索引器方法访问和设置元素值
            Console.WriteLine("TypeSpecificMat: {0}ms", MeasureTime(TypeSpecificMat));
            Console.Read();
        }


        /// 
        /// 慢Slow
        /// 
        private void GetSet()
        {
            using var mat = new Mat(ImagePath.Lenna, ImreadModes.Color);//源图像
            for (int y = 0; y < mat.Height; y++)
            {
                for (int x = 0; x < mat.Width; x++)
                {
                    Vec3b color = mat.Get(y, x);//获取像素颜色
                    Vec3b newColor = new Vec3b(color.Item2, color.Item1, color.Item0);
                    mat.Set(y, x, newColor);//设置像素颜色 
                }
            }
            //Cv2.ImShow("Slow", mat);
            //Cv2.WaitKey(0);
            //Cv2.DestroyAllWindows();
        }


        /// 
        /// 相当快 Reasonably fast
        /// 
        private void GenericIndexer()
        {
            using var mat = new Mat(ImagePath.Lenna, ImreadModes.Color);//源图像
            var indexer = mat.GetGenericIndexer(); //获取特定于类型的索引器。索引器具有获取器/设置器来访问每个矩阵元素。
            for (int y = 0; y < mat.Height; y++)
            {
                for (int x = 0; x < mat.Width; x++)
                {
                    Vec3b color = indexer[y, x];//索引器访问像素值
                    Vec3b newColor = new Vec3b(color.Item2, color.Item1, color.Item0);
                    indexer[y, x] = newColor;//索引器设置元素值
                }
            }
            //Cv2.ImShow("GenericIndexer", mat);
            //Cv2.WaitKey(0);
            //Cv2.DestroyAllWindows();
        }


        /// 
        /// Faster 比较快
        /// 
        private void TypeSpecificMat()
        {
            using var mat = new Mat(ImagePath.Lenna, ImreadModes.Color);//源图像
            var mat3 = new Mat(mat);//指定类型
            var indexer = mat3.GetIndexer();//获取特定于类型的索引器。索引器具有获取器/设置器来访问每个矩阵元素。
            for (int y = 0; y < mat.Height; y++)
            {
                for (int x = 0; x < mat.Width; x++)
                {
                    Vec3b color = indexer[y, x];//访问
                    Vec3b newColor = new Vec3b(color.Item2, color.Item1, color.Item0);
                    indexer[y, x] = newColor;//设置
                }
            }
            //Cv2.ImShow("TypeSpecificMat", mat);
            //Cv2.WaitKey(0);
            //Cv2.DestroyAllWindows();
        }
    //方法运行时长  计时 
        private static long MeasureTime(Action action)
        {
            var watch = Stopwatch.StartNew();
            action();
            watch.Stop();
            return watch.ElapsedMilliseconds;
        }
    }
}




#22.   caffemodel 深度学习模型 推理
using System.Collections.Generic;
using OpenCvSharp;
using OpenCvSharp.Dnn;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// To run this example first download the pose model available here: https://github.com/CMU-Perceptual-Computing-Lab/openpose/tree/master/models
    /// Add the files to the bin folder要运行此示例,请首先在此处下载可用的姿势模型:
  ///https://github.com/CMU-Perceptual-Computing-Lab/openpose/tree/master/models。将文件添加到 bin 文件夹
    /// 
    internal class Pose : ConsoleTestBase
    {
        public override void RunTest()
        {
            const string model = "pose_iter_160000.caffemodel";//权重文件
            const string modelTxt = "pose_deploy_linevec_faster_4_stages.prototxt";//配置文件 
            const string sampleImage = "single.jpeg";
            const string outputLoc = "Output-Skeleton.jpg";
            const int nPoints = 15;
            const double thresh = 0.1;


            int[][] posePairs =
            {
                new[] {0, 1}, new[] {1, 2}, new[] {2, 3},
                new[] {3, 4}, new[] {1, 5}, new[] {5, 6},
                new[] {6, 7}, new[] {1, 14}, new[] {14, 8}, new[] {8, 9},
                new[] {9, 10}, new[] {14, 11}, new[] {11, 12}, new[] {12, 13},
            };
            
            using var frame = Cv2.ImRead(sampleImage);
            using var frameCopy = frame.Clone();
            int frameWidth = frame.Cols;
            int frameHeight = frame.Rows;


            const int inWidth = 368;
            const int inHeight = 368;


            using var net = CvDnn.ReadNetFromCaffe(modelTxt, model);//读取模型
            net.SetPreferableBackend(Backend.OPENCV);
            net.SetPreferableTarget(Target.CPU);


            using var inpBlob = CvDnn.BlobFromImage(frame, 1.0 / 255, new Size(inWidth, inHeight), new Scalar(0, 0, 0), false, false);


            net.SetInput(inpBlob);//设置输入


            using var output = net.Forward();//前馈计算
            int H = output.Size(2);
            int W = output.Size(3);


            var points = new List();


            for (int n = 0; n < nPoints; n++)
            {
                // 对应身体部位的概率图。Probability map of corresponding body's part.
                using var probMap = new Mat(H, W, MatType.CV_32F, output.Ptr(0, n));
                var p = new Point2f(-1,-1);


                Cv2.MinMaxLoc(probMap, out _, out var maxVal, out _, out var maxLoc);


                var x = (frameWidth * maxLoc.X) / W;
                var y = (frameHeight * maxLoc.Y) / H;


                if (maxVal > thresh)
                {
                    p = maxLoc;
                    p.X *= (float)frameWidth / W;
                    p.Y *= (float)frameHeight / H;


                    Cv2.Circle(frameCopy, (int)p.X, (int)p.Y, 8, new Scalar(0, 255, 255), -1);
                    Cv2.PutText(frameCopy, Cv2.Format(n), new Point((int)p.X, (int)p.Y), HersheyFonts.HersheyComplex, 1, new Scalar(0, 0, 255), 2);
                }


                points.Add((Point)p);
            }
            int nPairs = 14; //(POSE_PAIRS).Length / POSE_PAIRS[0].Length;


            for (int n = 0; n < nPairs; n++)
            {
                // 查找 2 个连接的身体/手部零件 lookup 2 connected body/hand parts
                Point partA = points[posePairs[n][0]];
                Point partB = points[posePairs[n][1]];


                if (partA.X <= 0 || partA.Y <= 0 || partB.X <= 0 || partB.Y <= 0)
                    continue;


                Cv2.Line(frame, partA, partB, new Scalar(0, 255, 255), 8);
                Cv2.Circle(frame, partA.X, partA.Y, 8, new Scalar(0, 0, 255), -1);
                Cv2.Circle(frame, partB.X, partB.Y, 8, new Scalar(0, 0, 255), -1);
            }
      
            var finalOutput = outputLoc;
            Cv2.ImWrite(finalOutput, frame);
        }
    }
}


#23.  图像无缝融合-seamlessClone
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// cv::seamlessClone
    /// 
    class SeamlessClone : ConsoleTestBase
    {
        public override void RunTest()
        {
            Mat src = new Mat(ImagePath.Girl, ImreadModes.Color);//源图像
            Mat dst = new Mat(ImagePath.Lenna, ImreadModes.Color);//目标图像
            Mat src0 = src.Resize(dst.Size(), 0, 0, InterpolationFlags.Lanczos4);//源图像缩放到与目标图像尺寸一致
            Mat mask = Mat.Zeros(src0.Size(), MatType.CV_8UC3);//蒙版初始化:源图像尺寸
      //https://www.jianshu.com/p/5b1f98f10518
            mask.Circle(200, 200, 100, Scalar.White, -1);//白色圆  蒙版中黑色的区域表示删除掉该区域像素,白色表示保留该区域像素。黑色是0,白色是255;


            Mat blend1 = new Mat();
            Mat blend2 = new Mat();
            Mat blend3 = new Mat();
      //SeamlessClone(InputArray src, InputArray dst, InputArray? mask, Point p, OutputArray blend, SeamlessCloneMethods flags);
      //src  输入8位3通道图像(截取目标的大图)
      //dst  输入8位3通道图像(待粘贴融合的目标背景图标)
      //mask  输入8位1或3通道图像(目标掩码区域图像)
      //p  对象被放置在目标图像dst中的位置
      //blend  输出图像,与dst具有相同大小和类型。
      //flags  克隆方法可以是cv :: NORMAL_CLONE,cv :: MIXED_CLONE或cv :: MONOCHROME_TRANSFER
            Cv2.SeamlessClone(
                src0, dst, mask, new Point(260, 270), blend1,
                SeamlessCloneMethods.NormalClone);//NORMAL_CLONE: 不保留dst 图像的texture细节。目标区域的梯度只由源图像决定
            Cv2.SeamlessClone(
                src0, dst, mask, new Point(260, 270), blend2,
                SeamlessCloneMethods.MonochromeTransfer);//MONOCHROME_TRANSFER: 不保留src图像的颜色细节,只有src图像的质地,颜色和目标图像一样,可以用来进行皮肤质地填充。
            Cv2.SeamlessClone(
                src0, dst, mask, new Point(260, 270), blend3,
                SeamlessCloneMethods.MixedClone);//MIXED_CLONE: 保留dest图像的texture 细节。目标区域的梯度是由原图像和目的图像的组合计算出来(计算dominat gradient)。


            using (new Window("src", src0))
            using (new Window("dst", dst))
            using (new Window("mask", mask))
            using (new Window("blend NormalClone", blend1))
            using (new Window("blend MonochromeTransfer", blend2))
            using (new Window("blend MixedClone", blend3))
            {
                Cv2.WaitKey();
            }
        }
    }
}


#24. SIFT and SURF 特征匹配示例 
//https://blog.csdn.net/qq_38338086/article/details/121673036
using OpenCvSharp;
using OpenCvSharp.Features2D;
using OpenCvSharp.XFeatures2D;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// SIFT and SURF sample
    /// http://www.prism.gatech.edu/~ahuaman3/docs/OpenCV_Docs/tutorials/nonfree_1/nonfree_1.html
    /// 
    class SiftSurfSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var src1 = new Mat(ImagePath.Match1, ImreadModes.Color);
            using var src2 = new Mat(ImagePath.Match2, ImreadModes.Color);


            MatchBySift(src1, src2);
            MatchBySurf(src1, src2);
        }
    //Scale Invariant Feature Transform(SIFT) 尺度不变特征变换。SIFT特征用于描述图像这种的局部特征。
    //是一种关键点(或者叫做特征点)的检测和描述的算法。SIFT算法应用于图像特征点的提取,
    //首先建立图像的尺度空间表示,接着在尺度空间中搜索图像的极值点,通过这些极值点(也称关键点,特征点。
    //包含三个主要信息:位置、尺度、方向),从而建立特征描述向量。通过特征描述向量来做图像识别与检测方面的问题。
        private void MatchBySift(Mat src1, Mat src2)
        {
            using var gray1 = new Mat();
            using var gray2 = new Mat();
      //两张图像的灰度图
            Cv2.CvtColor(src1, gray1, ColorConversionCodes.BGR2GRAY);
            Cv2.CvtColor(src2, gray2, ColorConversionCodes.BGR2GRAY);


            using var sift = SIFT.Create();//实例化sift


            // 使用 SIFT 检测关键点并生成它们的描述符(特征描述向量)
            using var descriptors1 = new Mat();
            using var descriptors2 = new Mat();
            sift.DetectAndCompute(gray1, null, out var keypoints1, descriptors1);
            sift.DetectAndCompute(gray2, null, out var keypoints2, descriptors2);


            // 匹配描述符向量 Match descriptor vectors
            using var bfMatcher = new BFMatcher(NormTypes.L2, false);
            using var flannMatcher = new FlannBasedMatcher();
            DMatch[] bfMatches = bfMatcher.Match(descriptors1, descriptors2);
            DMatch[] flannMatches = flannMatcher.Match(descriptors1, descriptors2);


            //绘制匹配项 Draw matches
            using var bfView = new Mat();
            Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, bfMatches, bfView);
            using var flannView = new Mat();
            Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, flannMatches, flannView);


            using (new Window("SIFT matching (by BFMather)", bfView))
            using (new Window("SIFT matching (by FlannBasedMatcher)", flannView))
            {
                Cv2.WaitKey();
            }
        }
    //SURF英文全称为Speeded Up Robust Features,直接翻译就是“加速版的具有鲁棒性的特征”。
    //是由Herbert Bay等人在2006年提出的。是一种类似于SIFT的特征点检测及描述的算法,是SIFT的加速版。
    //SIFT算法的最大缺点是如果不借助硬件或专门的图像处理器很难达到实时。
    //而SURF算法的实现原理借鉴了SIFT中DOG简化近似的思想,采用海森矩阵(Hessian matrix)行列式近似值图像。
    //SURF通过Hessian矩阵的行列式来确定兴趣点的位置,在根据兴趣点邻域的Haar小波响应来确定描述子。
        private void MatchBySurf(Mat src1, Mat src2)
        {
            using var gray1 = new Mat();
            using var gray2 = new Mat();


            Cv2.CvtColor(src1, gray1, ColorConversionCodes.BGR2GRAY);
            Cv2.CvtColor(src2, gray2, ColorConversionCodes.BGR2GRAY);


            using var surf = SURF.Create(200, 4, 2, true);


            // Detect the keypoints and generate their descriptors using SURF
            using var descriptors1 = new Mat();
            using var descriptors2 = new Mat();
            surf.DetectAndCompute(gray1, null, out var keypoints1, descriptors1);
            surf.DetectAndCompute(gray2, null, out var keypoints2, descriptors2);


            // Match descriptor vectors 
            using var bfMatcher = new BFMatcher(NormTypes.L2, false);
            using var flannMatcher = new FlannBasedMatcher();
            DMatch[] bfMatches = bfMatcher.Match(descriptors1, descriptors2);
            DMatch[] flannMatches = flannMatcher.Match(descriptors1, descriptors2);


            // Draw matches
            using var bfView = new Mat();
            Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, bfMatches, bfView);
            using var flannView = new Mat();
            Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, flannMatches, flannView);


            using (new Window("SURF matching (by BFMather)", bfView))
            using (new Window("SURF matching (by FlannBasedMatcher)", flannView))
            {
                Cv2.WaitKey();
            }
        }


    }
}
/*
https://blog.csdn.net/qq_38338086/article/details/121673036
ORB(Oriented FAST and Rotated BRIEF)是一种快速特征点提取和描述的算法。这个算法是由Ethan Rublee, Vincent Rabaud, Kurt Konolige以及Gary R.Bradski在2011年一篇名为《ORB:An Efficient Alternative to SIFT or SURF》的文章中提出。ORB算法分为两部分,分别是特征点提取和特征点描述。特征提取是由FAST(Features from  Accelerated Segment Test)算法发展来的,特征点描述是根据BRIEF(Binary Robust IndependentElementary Features)特征描述算法改进的。ORB特征是将FAST特征点的检测方法与BRIEF特征描述子结合起来,并在它们原来的基础上做了改进与优化。据说,ORB算法的速度是sift的100倍,是surf的10倍。可用于实时性特征检测。


FAST(Features fromaccelerated segment test)是一种角点检测方法,它可以用于特征点的提取。FAST算法是公认的最快的特征点提取方法。FAST算法提取的特征点非常接近角点类型。FAST角点检测算法最初是由Edward Rosten和Tom Drummond提出,该算法最突出的优点是它的计算效率。FAST关键点检测是对兴趣点所在圆周上的16个像素点进行判断,若判断后的当前中心像素为暗或亮,将决定其是否为角点。该算法的基本原理是使用圆周长为16个像素点(半径为3的Bresenham圆)来判定其圆心像素P是否为角点。在圆周上按顺时针方向从1到16的顺序对圆周像素点进行编号。如果在圆周上有N个连续的像素的亮度都比圆心像素的亮度Ip加上阈值t还要亮,或者比圆心像素的亮度减去阈值还要暗,则圆心像素被称为角点。


ORB中的特征提取是由FAST算法改进得来的。称为oFAST(FAST Keypoint Orientation)。也就是在使用FAST提取出特征点后,给其定义一个特征点的方向,以此来实现特征点的旋转不变性。


BRIEF(Binary Robust Independent Elementary Features)在2010年被提出。BRIEF是对已检测到的特征点进行描述,它是一种二进制编码的描述子,摈弃了利用区域灰度直方图描述特征点的传统方法,大大的加快了特征描述符建立的速度,同时也极大的降低了特征匹配的时间,是一种非常快速,很有潜力的算法。


总结:三种算法其实非常相似,在opencv中表现只是特征提取的函数以及特征描述的函数不一样而已。


三种算法用于识别两张图的目标是否相同,总结基本流程总结如下:


1、分别找出这两张图中的特征点。通过特征检测器进行特征检测,检测的结果放于KeyPoint类型的vector中。


2、描述这些特征点的属性。特征的描述也叫特征的提取,就是第一步获得的仅仅是一系列特征点,第二步就要生成特征向量,用特征提取器获得描述子,并放于特征描述矩阵中。


3、比较这两张图片的特征点的属性,如果有足够多的特征点具有相同的属性,那么可以认为这两张图片中的目标是相同的。通过匹配器进行特征匹配(匹配器分为FLANN和暴力匹配),匹配结果放于DMatch类型的vector中。
*/




#25.  斑点检测 https://blog.csdn.net/jsxyhelu2015/article/details/108251482
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    internal class SimpleBlobDetectorSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var src = Cv2.ImRead(ImagePath.Shapes);
            using var detectedCircles = new Mat();
            using var detectedOvals = new Mat();


            // Invert the image. Shapes has a black background and SimpleBlobDetector doesn't seem to work well with that.
      //反转图像。Shapes 有黑色背景,SimpleBlobDetector 似乎不能很好地处理它。
            Cv2.BitwiseNot(src, src);


            // Parameters tuned to detect only circles
      //调整为仅 检测圆 的参数
            var circleParams = new SimpleBlobDetector.Params
            {
                MinThreshold = 10,//二值化的起始阈值
                MaxThreshold = 230, //二值化的终止阈值


                // The area is the number of pixels in the blob.
        //面积是斑点中的像素数。
                FilterByArea = true,
                MinArea = 500,
                MaxArea = 50000,


                // Circularity is a ratio of the area to the perimeter. Polygons with more sides are more circular.
        //圆度是面积与周长的比值。具有更多边的多边形更圆。
                FilterByCircularity = true,
                MinCircularity = 0.9f,


                // Convexity is the ratio of the area of the blob to the area of its convex hull.
        //凸度是斑点面积与其凸包面积之比。
                FilterByConvexity = true,
                MinConvexity = 0.95f,


                // A circle's inertia ratio is 1. A line's is 0. An oval is between 0 and 1.
        //圆的惯性比为 1。直线的惯性比为 0。椭圆在 0 和 1 之间。
                FilterByInertia = true,
                MinInertiaRatio = 0.95f
            };


            // Parameters tuned to find the ovals in the Shapes image.
      //调整参数以在 Shapes 图像中找到 椭圆。
            var ovalParams = new SimpleBlobDetector.Params
            {
                MinThreshold = 10,
                MaxThreshold = 230,
                FilterByArea = true,
                MinArea = 500,
                // The ovals are the smallest blobs in Shapes, so we limit the max area to eliminate the larger blobs.
        //椭圆是 Shapes 中最小的斑点,因此我们限制最大区域以消除较大的斑点。
                MaxArea = 10000,
                FilterByCircularity = true,斑点圆度的限制变量,默认是不限制
                MinCircularity = 0.58f,
                FilterByConvexity = true,
                MinConvexity = 0.96f,
                FilterByInertia = true,
                MinInertiaRatio = 0.1f
            };


            using var circleDetector = SimpleBlobDetector.Create(circleParams);
            var circleKeyPoints = circleDetector.Detect(src);//检测圆
            Cv2.DrawKeypoints(src, circleKeyPoints, detectedCircles, Scalar.HotPink, DrawMatchesFlags.DrawRichKeypoints);


            using var ovalDetector = SimpleBlobDetector.Create(ovalParams);
            var ovalKeyPoints = ovalDetector.Detect(src);//检测椭圆斑点
            Cv2.DrawKeypoints(src, ovalKeyPoints, detectedOvals, Scalar.HotPink, DrawMatchesFlags.DrawRichKeypoints);


            using var w1 = new Window("Detected Circles", detectedCircles);
            using var w2 = new Window("Detected Ovals", detectedOvals);


            Cv2.WaitKey();
        }
    }
}


#26. 求解方程  AX = Y  
//https://blog.csdn.net/u014652390/article/details/52789591
using System;
using System.Collections.Generic;
using System.Linq;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// 求解方程
    /// 
    class SolveEquation : ConsoleTestBase
    {
        public override void RunTest()
        {
            ByMat();
            ByNormalArray();


            Console.Read();
        }


        /// 
        /// Solve equation AX = Y
        /// 
        private void ByMat()//Mat格式 
        {
            // x + y = 10
            // 2x + 3y = 26
            // (x=4, y=6)


            double[,] av = {{1, 1}, 
                          {2, 3}};
            double[] yv = {10, 26};


            using var a = new Mat(2, 2, MatType.CV_64FC1, av);
            using var y = new Mat(2, 1, MatType.CV_64FC1, yv);
            using var x = new Mat();
      // rank(A) =  n  方程个数等于未知数的个数, 方程存在唯一的精确解,解法通常有我们熟悉的消元法,LU分解法
            Cv2.Solve(a, y, x, DecompTypes.LU);//求解方程
      //rank(A) > n,方程个数多于未知数个数,这个时候约束过于严格,没有精确解,这种方程又称之为超定方程。通常工程应用都会遇到这种情况,找不到精确解的情况下,我们选取最优解。这个最优解,又称之为最小二乘解。


            Console.WriteLine("ByMat:");
            Console.WriteLine("X1 = {0}, X2 = {1}", x.At(0), x.At(1));
        }


        /// 
        /// Solve equation AX = Y 
        /// 
        private void ByNormalArray() //通用数组格式
        {
            // x + y = 10
            // 2x + 3y = 26
            // (x=4, y=6)


            double[,] a = {{1, 1}, 
                          {2, 3}};


            double[] y = { 10, 26 };


            var x = new List();


            Cv2.Solve(
                InputArray.Create(a), InputArray.Create(y),
                OutputArray.Create(x),
                DecompTypes.LU);


            Console.WriteLine("ByNormalArray:");
            Console.WriteLine("X1 = {0}, X2 = {1}", x[0], x[1]);
        }
    }
}




#27. 使用 StarDetector 算法检索关键点。
using OpenCvSharp;
using OpenCvSharp.XFeatures2D;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Retrieves keypoints using the StarDetector algorithm.
    /// 
    class StarDetectorSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            var dst = new Mat(ImagePath.Lenna, ImreadModes.Color);//目标图
            var gray = new Mat(ImagePath.Lenna, ImreadModes.Grayscale);//灰度图


            StarDetector detector = StarDetector.Create(45); //实例化star检测器
            KeyPoint[] keypoints = detector.Detect(gray);//检测关键点


            if (keypoints != null)
            {
                var color = new Scalar(0, 255, 0);
                foreach (KeyPoint kpt in keypoints)
                {
                    float r = kpt.Size / 2;
                    Cv2.Circle(dst, (Point)kpt.Pt, (int)r, color); //绘制圆 
          //绘制十字
                    Cv2.Line(dst,
                        (Point)new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r),
                        (Point)new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r), 
                        color);
                    Cv2.Line(dst,
                        (Point)new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r),
                        (Point)new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r), 
                        color);
                }
            }


            using (new Window("StarDetector features", dst))
            {
                Cv2.WaitKey();
            }
        }
    }
}




#28.  图像拼接  全景图
//https://blog.csdn.net/Thousand_drive/article/details/125084810
//https://blog.csdn.net/guduruyu/article/details/80405880
using System;
using System.Collections.Generic;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    class Stitching : ConsoleTestBase
    {
        public override void RunTest()
        {
            Mat[] images = SelectStitchingImages(200, 200, 10);


            using var stitcher = Stitcher.Create(Stitcher.Mode.Scans);//拼接器  缝合器
            using var pano = new Mat();


            Console.Write("Stitching start...");
            // TODO: does not work??
            var status = stitcher.Stitch(images, pano);//拼接图像
            Console.WriteLine(" finish (status:{0})", status);


            Window.ShowImages(pano);//拼接结果


            foreach (var image in images)
            {
                image.Dispose();
            }
        }
    //生成要拼接的图像  返回数组。
        private static Mat[] SelectStitchingImages(int width, int height, int count)
        {
            using var source = new Mat(@"Data\Image\lenna.png", ImreadModes.Color); //读取源图像
            using var result = source.Clone();


            var rand = new Random();
            var mats = new List();
            for (int i = 0; i < count; i++) // 随机选择count个子区域
            {
                int x1 = rand.Next(source.Cols - width);
                int y1 = rand.Next(source.Rows - height);
                int x2 = x1 + width;
                int y2 = y1 + height;
        //绘制随机四边形边框
                result.Line(new Point(x1, y1), new Point(x1, y2), new Scalar(0, 0, 255));
                result.Line(new Point(x1, y2), new Point(x2, y2), new Scalar(0, 0, 255));
                result.Line(new Point(x2, y2), new Point(x2, y1), new Scalar(0, 0, 255));
                result.Line(new Point(x2, y1), new Point(x1, y1), new Scalar(0, 0, 255));


                using var m = source[new Rect(x1, y1, width, height)];//随机取源图像的 特定大小子区域
                mats.Add(m.Clone());
            }


            using (new Window("stitching", result))
            {
                Cv2.WaitKey();
            }


            return mats.ToArray();
        }
    }
}




#29.  三角网剖分 SubDiv2D 
using System;
using System.Linq;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// cv::Subdiv2D test
    /// 
    class Subdiv2DSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            const int Size = 600;


            // 创建随机点列表
            var rand = new Random();
            var points = Enumerable.Range(0, 100).Select(_ =>
                new Point2f(rand.Next(0, Size), rand.Next(0, Size))).ToArray();


            using var imgExpr = Mat.Zeros(Size, Size, MatType.CV_8UC3);
            using var img = imgExpr.ToMat();//黑色背景
            foreach (var p in points)
            {
                img.Circle((Point)p, 4, Scalar.Red, -1); //绘制圆点
            }


            // 初始化 Subdiv2D
            using var subdiv = new Subdiv2D();
            subdiv.InitDelaunay(new Rect(0, 0, Size, Size));
            subdiv.Insert(points);


            // 绘制 voronoi 图 Draws voronoi diagram
            subdiv.GetVoronoiFacetList(null, out var facetList, out var facetCenters);//三角面 集合


            using var vonoroi = img.Clone();
            foreach (var list in facetList)
            {
                var before = list.Last();//最后一个点
                foreach (var p in list)//绘制三角形
                {
                    vonoroi.Line((Point)before, (Point)p, new Scalar(64, 255, 128), 1);
                    before = p;
                }
            }


            // 绘制德劳内图 Draws delaunay diagram
            Vec4f[] edgeList = subdiv.GetEdgeList();
            using var delaunay = img.Clone();
            foreach (var edge in edgeList)
            {
                var p1 = new Point(edge.Item0, edge.Item1);
                var p2 = new Point(edge.Item2, edge.Item3);
                delaunay.Line(p1, p2, new Scalar(64, 255, 128), 1);
            }


            Cv2.ImShow("voronoi", vonoroi);
            Cv2.ImShow("delaunay", delaunay);
            Cv2.WaitKey();
            Cv2.DestroyAllWindows();
        }
    }
}




#30. 超分辨率SuperResolution算法 
/*https://blog.csdn.net/LuohenYJ/article/details/108207700
图像超分辨率(Image Super Resolution)是指从低分辨率图像或图像序列得到高分辨率图像。图像超分辨率是计算机视觉领域中一个非常重要的研究问题,广泛应用于医学图像分析、生物识别、视频监控和安全等领域。随着深度学习技术的发展,基于深度学习的图像超分方法在多个测试任务上,相比传统图像超分方法,取得了更优的性能和效果。
*/
using System;
using System.Collections.Generic;
using System.Linq;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{


    class SuperResolutionSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            var capture = new VideoCapture();
            capture.Set(VideoCaptureProperties.FrameWidth, 640);
            capture.Set(VideoCaptureProperties.FrameHeight, 480);
            capture.Open(-1);
            if (!capture.IsOpened())
                throw new Exception("capture initialization failed");


            var fs = FrameSource.CreateFrameSource_Camera(-1);
            var sr = SuperResolution.CreateBTVL1();
            sr.SetInput(fs);


            using var normalWindow = new Window("normal");//正常分辨率图像
            using var srWindow = new Window("super resolution");//超分辨率图像
            var normalFrame = new Mat();
            var srFrame = new Mat();
            while (true)
            {
                capture.Read(normalFrame);//读取一帧到正常分辨率图像
                sr.NextFrame(srFrame);//取一帧超分辨率图像
                if (normalFrame.Empty() || srFrame.Empty())
                    break;
        //显示
                normalWindow.ShowImage(normalFrame);
                srWindow.ShowImage(srFrame);
                Cv2.WaitKey(100);
            }
        }
    }
}


#31.  SVM 支持向量机 示例
using System;
using OpenCvSharp;
using OpenCvSharp.ML;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Support Vector Machine
    /// 
    /// http://opencv.jp/sample/svm.html#svm
    internal class SVMSample : ConsoleTestBase
    {
        private static double Function(double x)
        {
            return x + 50 * Math.Sin(x / 15.0);
        }


        public override void RunTest()
        {
            // 测试数据       
            var points = new Point2f[500];//输入坐标点
            var responses = new int[points.Length];//响应:标签
            var rand = new Random();
            for (int i = 0; i < responses.Length; i++)
            {
                float x = rand.Next(0, 300);
                float y = rand.Next(0, 300);
                points[i] = new Point2f(x, y);//随机坐标点
                responses[i] = (y > Function(x)) ? 1 : 2;//曲线之上1  曲线之下2
            }


            // 显示训练数据  Show training data and f(x)
            using (Mat pointsPlot = Mat.Zeros(300, 300, MatType.CV_8UC3))
            {
                for (int i = 0; i < points.Length; i++)
                {
                    int x = (int)points[i].X;
                    int y = (int)(300 - points[i].Y);
                    int res = responses[i];
                    Scalar color = (res == 1) ? Scalar.Red : Scalar.GreenYellow;//标签1:红色,    标签2:绿黄色
                    pointsPlot.Circle(x, y, 2, color, -1); //绘制坐标点 
                }
                //  绘制曲线f(x)
                for (int x = 1; x < 300; x++) //遍历宽度方向 像素 x
                {
                    int y1 = (int)(300 - Function(x - 1));
                    int y2 = (int)(300 - Function(x));
                    pointsPlot.Line(x - 1, y1, x, y2, Scalar.LightBlue, 1);
                }
                Window.ShowImages(pointsPlot);
            }


            // 训练 SVM     Train
            var dataMat = new Mat(points.Length, 2, MatType.CV_32FC1, points);//构造数据mat
            var resMat = new Mat(responses.Length, 1, MatType.CV_32SC1, responses);//构造标签mat
            using var svm = SVM.Create();
            // 归一化数据  normalize data
            dataMat /= 300.0;


            // SVM parameters
            svm.Type = SVM.Types.CSvc;
            svm.KernelType = SVM.KernelTypes.Rbf; //
            svm.TermCriteria = TermCriteria.Both(1000, 0.000001);//迭代终止条件
            svm.Degree = 100.0;
            svm.Gamma = 100.0;
            svm.Coef0 = 1.0;
            svm.C = 1.0;
            svm.Nu = 0.5;
            svm.P = 0.1;


            svm.Train(dataMat, SampleTypes.RowSample, resMat);//训练


            // 预测每个像素Predict for each 300x300 pixel
            using Mat retPlot = Mat.Zeros(300, 300, MatType.CV_8UC3);
            for (int x = 0; x < 300; x++)
            {
                for (int y = 0; y < 300; y++)
                {
                    float[] sample = { x / 300f, y / 300f };//要预测的  像素坐标点 (原点在左上角)
                    var sampleMat = new Mat(1, 2, MatType.CV_32FC1, sample);//构造预测点的mat格式
                    int ret = (int)svm.Predict(sampleMat);//预测 
                    var plotRect = new Rect(x, 300 - y, 1, 1);//绘制曲线  坐标原点在 左下角 
                    if (ret == 1)  //第一类
                        retPlot.Rectangle(plotRect, Scalar.Red); //红色矩形点
                    else if (ret == 2) //第二类
                        retPlot.Rectangle(plotRect, Scalar.GreenYellow); //绿黄色矩形点
                }
            }
            Window.ShowImages(retPlot);//显示像素坐标预测结果图
        }


    }
}


#32.  视频读取 
using System;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{


    class VideoCaptureSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            // Opens MP4 file (ffmpeg is probably needed)
            using var capture = new VideoCapture(MoviePath.Bach); //打开MP4视频
            if (!capture.IsOpened())
                return;


            int sleepTime = (int)Math.Round(1000 / capture.Fps); //每帧持续时间


            using var window = new Window("capture");
            //帧图像缓冲区 Frame image buffer
            var image = new Mat();


            // When the movie playback reaches end, Mat.data becomes NULL.
      //当视频播放结束时,Mat.data 变为 NULL。
            while (true)
            {
                capture.Read(image); // same as cvQueryFrame
                if(image.Empty())
                    break;


                window.ShowImage(image);//显示图像
                Cv2.WaitKey(sleepTime);
            }
        }
    }
}


#33. 视频写入
using System;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    class VideoWriterSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            const string OutVideoFile = "out.avi"; //输出文件


            // Opens MP4 file (ffmpeg is probably needed)
            using var capture = new VideoCapture(MoviePath.Bach);//打开视频


            // 读取视频帧并将它们写入 VideoWriter Read movie frames and write them to VideoWriter 
            var dsize = new Size(640, 480); //目标输出尺寸
            using (var writer = new VideoWriter(OutVideoFile, -1, capture.Fps, dsize))//打开输出文件
            {
                Console.WriteLine("Converting each movie frames...");
                using var frame = new Mat();
                while(true)
                {
                    // 读取图片 Read image
                    capture.Read(frame);
                    if(frame.Empty())
                        break;


                    Console.CursorLeft = 0;
                    Console.Write("{0} / {1}", capture.PosFrames, capture.FrameCount);


                    // grayscale -> canny -> resize
                    using var gray = new Mat();
                    using var canny = new Mat();
                    using var dst = new Mat();
                    Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY);//灰度图
                    Cv2.Canny(gray, canny, 100, 180);  //边缘检测图
                    Cv2.Resize(canny, dst, dsize, 0, 0, InterpolationFlags.Linear); //缩放图
                    // Write mat to VideoWriter
                    writer.Write(dst);//写入输出文件
                } 
                Console.WriteLine();
            }


            //观看结果视频 Watch result movie
            using (var capture2 = new VideoCapture(OutVideoFile))//打开输出视频
            using (var window = new Window("result"))
            {
                int sleepTime = (int)(1000 / capture.Fps);//视频持续时间


                using var frame = new Mat(); 
                while (true)
                {
                    capture2.Read(frame);//读取一帧
                    if(frame.Empty())
                        break;


                    window.ShowImage(frame);//显示
                    Cv2.WaitKey(sleepTime);
                }
            }
        }


    }
}




#34.  分水岭算法示例
//https://blog.csdn.net/sugarannie/article/details/53080168
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Text;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// Watershed algorithm sample
    /// 
    /// http://opencv.jp/sample/segmentation_and_connection.html#watershed
    public class WatershedSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var srcImg = Cv2.ImRead(ImagePath.Lenna, ImreadModes.AnyDepth | ImreadModes.AnyColor);   //源图像         
            using var markers = new Mat(srcImg.Size(), MatType.CV_32SC1, Scalar.All(0)); //标记点图


            using (var window = new Window("image", srcImg))//显示源图像
            {
                using var dspImg = srcImg.Clone();//拷贝源图像


                //窗口鼠标事件 Mouse event  
                int seedNum = 0;
                window.SetMouseCallback((MouseEventTypes ev, int x, int y, MouseEventFlags flags, IntPtr userdata) =>
                {
                    if (ev == MouseEventTypes.LButtonDown)
                    {
                        seedNum++;
                        var pt = new Point(x, y);
                        markers.Circle(pt, 10, Scalar.All(seedNum), Cv2.FILLED, LineTypes.Link8);//绘制标记点
                        dspImg.Circle(pt, 10, Scalar.White, 3, LineTypes.Link8);//在源图像上绘制 标记点
                        window.Image = dspImg;//显示带标记点的图
                    }
                });
                Window.WaitKey();
            }


            Cv2.Watershed(srcImg, markers);//调用分水岭算法进行分割 得到 markers分水岭图


            // 绘制分水岭draws watershed
            using var dstImg = srcImg.Clone(); 
            for (int y = 0; y < markers.Height; y++)
            {
                for (int x = 0; x < markers.Width; x++)
                {
                    int idx = markers.Get(y, x);//获取markers 像素所属分类
                    if (idx == -1)
                    {
                        dstImg.Rectangle(new Rect(x, y, 2, 2), Scalar.Red, -1);//绘制 像素为4的红色矩形
                    }
                }
            }


            using (new Window("watershed transform", dstImg)) //显示分水岭变换图
            {
                Window.WaitKey();
            }
        }
    }
}


#35. aruco Markers识别
/*一个ArUco marker是一个二进制平方标记,它由一个宽的黑边和一个内部的二进制矩阵组成,内部的矩阵决定了它们的id。黑色的边界有利于快速检测到图像,二进制编码可以验证id,并且允许错误检测和矫正技术的应用。marker的大小决定了内部矩阵的大小。例如,一个4x4的marker由16bits组成。*/
//https://blog.csdn.net/u010260681/article/details/77089657


using System;
using System.Collections.Generic;
using OpenCvSharp;
using OpenCvSharp.Aruco;
using SampleBase;


namespace SamplesCore
{
    public class ArucoSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            // FilePath.Image.Aruco 中图像中标记的位置。The locations of the markers in the image at FilePath.Image.Aruco.
            const int upperLeftMarkerId = 160; //左上角aruco
            const int upperRightMarkerId = 268;
            const int lowerRightMarkerId = 176;
            const int lowerLeftMarkerId = 168;


            using var src = Cv2.ImRead(ImagePath.Aruco);//读取图片


            var detectorParameters = DetectorParameters.Create();  //cv::aruco::DetectorParameters parameters;
            detectorParameters.CornerRefinementMethod = CornerRefineMethod.Subpix;//亚像素
            detectorParameters.CornerRefinementWinSize = 9;


            using var dictionary = CvAruco.GetPredefinedDictionary(PredefinedDictionaryName.Dict4X4_1000);//预定义字典
      //检测出的markers存储在 corners 和 ids 结构中
            CvAruco.DetectMarkers(src, dictionary, out var corners, out var ids, detectorParameters, out var rejectedPoints);


            using var detectedMarkers = src.Clone();
            CvAruco.DrawDetectedMarkers(detectedMarkers, corners, ids, Scalar.Crimson);//绘制检测到的标记 


            // Find the index of the four markers in the ids array. We'll use this same index into the
            // corners array to find the corners of each marker.
      //在 ids 数组中找到四个标记的索引。我们将在corners 数组中使用相同的索引来查找每个标记的角。
      //搜索与指定谓词定义的条件匹配的元素,并返回整个 System.Array 中第一次出现的从零开始的索引。
            var upperLeftCornerIndex = Array.FindIndex(ids, id => id == upperLeftMarkerId);//
            var upperRightCornerIndex = Array.FindIndex(ids, id => id == upperRightMarkerId);
            var lowerRightCornerIndex = Array.FindIndex(ids, id => id == lowerRightMarkerId);
            var lowerLeftCornerIndex = Array.FindIndex(ids, id => id == lowerLeftMarkerId);


            //确保我们找到了所有四个标记。Make sure we found all four markers.
            if (upperLeftCornerIndex < 0 || upperRightCornerIndex < 0 
                 || lowerRightCornerIndex < 0 || lowerLeftCornerIndex < 0)
            {
                return;
            }


            // Marker corners are stored clockwise beginning with the upper-left corner.
            // Get the first (upper-left) corner of the upper-left marker.
      //标记角从左上角开始按顺时针方向存储。
             // 获取左上标记的第一个(左上)角。
            var upperLeftPixel = corners[upperLeftCornerIndex][0];//
            //获取右上角标记的第二个(右上角)。Get the second (upper-right) corner of the upper-right marker.
            var upperRightPixel = corners[upperRightCornerIndex][1];
            //获取右下标记的第三个(右下)角。Get the third (lower-right) corner of the lower-right marker.
            var lowerRightPixel = corners[lowerRightCornerIndex][2];
            //获取左下标记的第四个(左下)角点 Get the fourth (lower-left) corner of the lower-left marker.
            var lowerLeftPixel = corners[lowerLeftCornerIndex][3];


            //创建坐标以传递给 GetPerspectiveTransform   Create coordinates for passing to GetPerspectiveTransform
            var sourceCoordinates = new List
            {
                upperLeftPixel, upperRightPixel, lowerRightPixel, lowerLeftPixel
            };//角点坐标数组
            var destinationCoordinates = new List
            {
                new Point2f(0, 0),
                new Point2f(1024, 0),
                new Point2f(1024, 1024),
                new Point2f(0, 1024),
            };//坐标字典  


            using var transform = Cv2.GetPerspectiveTransform(sourceCoordinates, destinationCoordinates);//计算透视变换
            using var normalizedImage = new Mat();
            Cv2.WarpPerspective(src, normalizedImage, transform, new Size(1024, 1024));//进行透视变换


            using var _1 = new Window("Original Image", src, WindowFlags.AutoSize);//原点图
            using var _2 = new Window($"Found {ids.Length} Markers", detectedMarkers);//检测到的标记图
            using var _3 = new Window("Normalized Image", normalizedImage);//显示矫正的图像


            Cv2.WaitKey();
        }
    }
}






#36.   运动目标检测——背景差分法(Background subtraction)
//https://zhuanlan.zhihu.com/p/348113539
/*
如果有现成的、不变的背景图像当然最好,但是由于场景的复杂性、不可预知性、以及各种环境干扰和噪声的存在,如光照的突然变化、实际背景图像中有些物体的波动、摄像机的抖动、运动物体进出场景对原场景的影响等,背景建模算法通常要求在场景中存在运动目标的情况下获取背景图像,这成为背景建模的难点之一。同时,由于背景图像的动态变化,需要通过视频序列的帧间信息来估计和恢复背景,即背景重建,选择性的更新背景是背景建模的另一难点。
传统的背景建模方法包括中值法背景建模、均值法背景建模、单高斯分布模型、混合高斯分布模型、卡尔曼滤波器模型以及高级背景模型等等,这些方法都是基于像素的亮度值进行数学计算处理,所以我们说运动目标检测是基于统计学原理。*/
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    class BgSubtractorMOG : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var capture = new VideoCapture(MoviePath.Bach);
            using var mog = BackgroundSubtractorMOG.Create();  //创建混合高斯背景减法器
            using var windowSrc = new Window("src");
            using var windowDst = new Window("dst");


            using var frame = new Mat();
            using var fg = new Mat();
            while (true)
            {
                capture.Read(frame);
                if (frame.Empty())
                    break;
                mog.Apply(frame, fg, 0.01);//背景去除


                windowSrc.Image = frame;
                windowDst.Image = fg;
                Cv2.WaitKey(50);
            }
        }
    }
}




37.局部二值化算法Niblack
/* https://blog.csdn.net/lx_Angel/article/details/109843948
Niblack 算法的应用于文本图像二值化领域较多,算是比较经典的局部二值化处理方法,其局部二值化方法的提出也很有借鉴意义,包括后来的一些对其改进方法,Sauvola 算法、Nick 算法,核心思想是:根据图像像素点的邻域内的平均灰度和标准偏差来构造一个阈值曲面进行二值化处理。


NiblackThreshold(InputArray src, OutputArray dst, double maxValue, ThresholdTypes type, int blockSize, double k, LocalBinarizationMethods binarizationMethod = LocalBinarizationMethods.Niblack, double r = 128);
Parameters:
        //src      8 位单通道图像。
        //dst:     与 src 大小相同类型的目标图像。
        //maxValue   分配给满足条件的像素的非零值,与 THRESH_BINARY 和 THRESH_BINARY_INV 阈值类型一起使用。
        //type   阈值类型,见 cv::ThresholdTypes。
        //blockSize 用于计算像素阈值的像素邻域的大小:3、5、7 等。
        //k       Niblack 和启发技术使用的用户可调参数。对于 Niblack,这通常是一个介于 0 和 1 之间的值,乘以标准偏差并从平均值中减去。
        //LocalBinarizationMethods 要使用的二值化方法。默认情况下,使用 Niblack 的技术。可以指定其他技术,请参阅 cv::ximgproc::LocalBinarizationMethods。
        //r    Sauvola 技术使用的用户可调参数。这是标准偏差的动态范围。
*/
using System;
using System.Diagnostics;
using OpenCvSharp;
using OpenCvSharp.XImgProc;
using SampleBase;


namespace SamplesCore
{
    internal class BinarizerSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var src = Cv2.ImRead(ImagePath.Binarization, ImreadModes.Grayscale);//读取二值图
            using var niblack = new Mat();
            using var sauvola = new Mat();
            using var nick = new Mat();
            int kernelSize = 51;


            var sw = new Stopwatch();
            sw.Start();
            CvXImgProc.NiblackThreshold(src, niblack, 255, ThresholdTypes.Binary, kernelSize, -0.2, LocalBinarizationMethods.Niblack);
            sw.Stop();
            Console.WriteLine($"Niblack {sw.ElapsedMilliseconds} ms");


            sw.Restart();
            CvXImgProc.NiblackThreshold(src, sauvola, 255, ThresholdTypes.Binary, kernelSize, 0.1, LocalBinarizationMethods.Sauvola);
            sw.Stop();
            Console.WriteLine($"Sauvola {sw.ElapsedMilliseconds} ms");


            sw.Restart();
            CvXImgProc.NiblackThreshold(src, nick, 255, ThresholdTypes.Binary, kernelSize, -0.14, LocalBinarizationMethods.Nick);
            sw.Stop();
            Console.WriteLine($"Nick {sw.ElapsedMilliseconds} ms");


            using (new Window("src", src, WindowFlags.AutoSize))
            using (new Window("Niblack", niblack, WindowFlags.AutoSize))
            using (new Window("Sauvola", sauvola, WindowFlags.AutoSize))
            using (new Window("Nick", nick, WindowFlags.AutoSize))
            {
                Cv2.WaitKey();
            }
        }
    }
}




#38. 使用 BRISK 算法检索关键点。
//https://www.cnblogs.com/welen/articles/6088639.html
//BRISK算法是2011年ICCV上《BRISK:Binary Robust Invariant Scalable Keypoints》文章中,提出来的一种特征提取算法,也是一种二进制的特征描述算子。
//它具有较好的旋转不变性、尺度不变性,较好的鲁棒性等。在图像配准应用中,速度比较:SIFT
    ///使用 BRISK 算法检索关键点。Retrieves keypoints using the BRISK algorithm.
    /// 
    class BRISKSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            var gray = new Mat(ImagePath.Lenna, ImreadModes.Grayscale);//灰度图
            var dst = new Mat(ImagePath.Lenna, ImreadModes.Color);//结果图


            using var brisk = BRISK.Create();
            KeyPoint[] keypoints = brisk.Detect(gray);//检测灰度图关键点


            if (keypoints != null)
            {
                var color = new Scalar(0, 255, 0);
                foreach (KeyPoint kpt in keypoints)
                {
                    float r = kpt.Size / 2;
                    Cv2.Circle(dst, (Point)kpt.Pt, (int)r, color);//绘制圆点 
          //绘制十字
                    Cv2.Line(dst,
                        (Point)new Point2f(kpt.Pt.X + r, kpt.Pt.Y + r),
                        (Point)new Point2f(kpt.Pt.X - r, kpt.Pt.Y - r),
                        color);
                    Cv2.Line(dst,
                        (Point)new Point2f(kpt.Pt.X - r, kpt.Pt.Y + r),
                        (Point)new Point2f(kpt.Pt.X + r, kpt.Pt.Y - r),
                        color);
                }
            }


            using (new Window("BRISK features", dst))
            {
                Cv2.WaitKey();
            }
        }
    }
}


#39.googlenet 深度学习模型  下载模型  
using System;
using System.IO;
using System.Linq;
using System.Net;
using System.Threading.Tasks;
using OpenCvSharp;
using OpenCvSharp.Dnn;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// https://docs.opencv.org/3.3.0/d5/de7/tutorial_dnn_googlenet.html
    /// 
    class CaffeSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            const string protoTxt = @"Data\Text\bvlc_googlenet.prototxt";//配置文件
            const string caffeModel = "bvlc_googlenet.caffemodel";//权重文件
            const string synsetWords = @"Data\Text\synset_words.txt";//分类词
            var classNames = File.ReadAllLines(synsetWords)
                .Select(line => line.Split(' ').Last())
                .ToArray();//读取所有分类


            Console.Write("Downloading Caffe Model...");
            PrepareModel(caffeModel);
            Console.WriteLine(" Done");


            using var net = CvDnn.ReadNetFromCaffe(protoTxt, caffeModel);//加载网络模型
            using var img = new Mat(@"Data\Image\space_shuttle.jpg");//待推理图像
            Console.WriteLine("Layer names: {0}", string.Join(", ", net.GetLayerNames()));//
            Console.WriteLine();


            //对图像进行预处理,包括减均值,比例缩放,裁剪,交换通道等,返回一个4通道的blob(blob可以简单理解为一个N维的数组,用于神经网络的输入)  Convert Mat to batch of images
            using var inputBlob = CvDnn.BlobFromImage(img, 1, new Size(224, 224), new Scalar(104, 117, 123));
            net.SetInput(inputBlob, "data");//设置模型输入
            using var prob = net.Forward("prob");//前馈计算 
            // 找到最佳匹配类 find the best class
            GetMaxClass(prob, out int classId, out double classProb);
            Console.WriteLine("Best class: #{0} '{1}'", classId, classNames[classId]);
            Console.WriteLine("Probability: {0:P2}", classProb);


            Console.WriteLine("Press any key to exit");
            Console.Read();
        }
    //下载为字节数组
        private static byte[] DownloadBytes(string url)
        {
            var client = WebRequest.CreateHttp(url);
            using var response = client.GetResponseAsync().GetAwaiter().GetResult();
            using var responseStream = response.GetResponseStream();
            using var memory = new MemoryStream();
            responseStream.CopyTo(memory);
            return memory.ToArray();
        }
    //下载模型
        private static void PrepareModel(string fileName)
        {
            if (!File.Exists(fileName))//文件不存在
            {
                var contents = DownloadBytes("http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel");//下载模型
                File.WriteAllBytes(fileName, contents);//写入文件
            }
        }


        /// 
        /// Find best class for the blob (i. e. class with maximal probability)
    /// 为 blob 找到最佳类别(即具有最大概率的类别)
        /// 
        /// 
        /// 
        /// 
        private static void GetMaxClass(Mat probBlob, out int classId, out double classProb)
        {
            //将 blob 重塑为 1x1000 矩阵 reshape the blob to 1x1000 matrix
            using var probMat = probBlob.Reshape(1, 1); //转换为单通道1行N列的Mat行向量  https://blog.csdn.net/qq_33515808/article/details/89313885
            Cv2.MinMaxLoc(probMat, out _, out classProb, out _, out var classNumber);//最大概率类,类编号
            classId = classNumber.X;//类索引
        }
    }
}


#40.  打开摄像头 
//https://blog.csdn.net/Maybe_ch/article/details/121142817
using System;
using System.Threading.Tasks;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    class CameraCaptureSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var capture = new VideoCapture(0, VideoCaptureAPIs.DSHOW);//打开相机
            if (!capture.IsOpened())
                return;


            capture.FrameWidth = 1920; //设置帧款
            capture.FrameHeight = 1280;//设置帧高
            capture.AutoFocus = true;//设置自动对焦


            const int sleepTime = 10;//持续时间


            using var window = new Window("capture");
            var image = new Mat();
            
            while (true)
            {
                capture.Read(image);//读取一帧 
                if (image.Empty())
                    break;


                window.ShowImage(image);//显示 
                int c = Cv2.WaitKey(sleepTime);//持续等待按键10ms
                if (c >= 0)
                {
                    break;
                }
            }
        }
    }
}


#41. 直方图均衡化:一般可以用来提升图片的亮度
// cv2.equalizeHist (进行直方图均衡化) 
// cv2.createCLAHA (用于生成自适应均衡化图像)
//http://edu.pointborn.com/article/2021/5/18/1386.html 
//https://www.cnblogs.com/my-love-is-python/p/10405811.html
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    class ClaheSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var src = new Mat(ImagePath.TsukubaLeft, ImreadModes.Grayscale);//灰度图 
            using var dst1 = new Mat();
            using var dst2 = new Mat();
            using var dst3 = new Mat();


            using (var clahe = Cv2.CreateCLAHE())//实例化均衡直方图函数
            {
                clahe.ClipLimit = 20;
                clahe.Apply(src, dst1);//使用 .apply 进行均衡化操作
                clahe.ClipLimit = 40;//clipLimit 颜色对比度的阈值
                clahe.Apply(src, dst2);
                clahe.TilesGridSize = new Size(4, 4);//titleGridSize 进行像素均衡化的网格大小,即在多少网格下进行直方图的均衡化操作
                clahe.Apply(src, dst3);
            }
      //批量显示图像
            Window.ShowImages(
                new[]{src, dst1, dst2, dst3}, 
                new[]{"src", "dst clip20", "dst clip40", "dst tile4x4"});
        }
    }
}




#42.  连通域标记图像ConnectedComponents
//https://blog.csdn.net/jgj123321/article/details/93489417
//https://shimat.github.io/opencvsharp_docs/html/2905013f-9f1a-6179-77a8-4488551c3619.htm
//计算已连接组件标记为布尔图像的图像。具有 4 路或 8 路连接的图像 - 返回 N,即标签总数 [0, N-1],其中 0 表示背景标签。ltype指定输出标签图像类型,这是基于标签总数或源图像中像素总数的重要考虑因素。
using System.Linq;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    class ConnectedComponentsSample : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var src = new Mat(ImagePath.Shapes, ImreadModes.Color);//源图像
            using var gray = src.CvtColor(ColorConversionCodes.BGR2GRAY);//灰度图
            using var binary = gray.Threshold(0, 255, ThresholdTypes.Otsu | ThresholdTypes.Binary);//二值图
            using var labelView = src.EmptyClone();//制作与此图像具有相同大小、深度和通道的 Mat
            using var rectView = binary.CvtColor(ColorConversionCodes.GRAY2BGR);
      //ConnectedComponents ConnectedComponentsEx(InputArray image, PixelConnectivity connectivity = PixelConnectivity.Connectivity8, ConnectedComponentsAlgorithmsTypes ccltype = ConnectedComponentsAlgorithmsTypes.Default);
      //计算连接组件标记为布尔图像的图像。具有 4 或 8 路连接的图像 - 返回 N,标签总数 [0, N-1] 其中 0 表示背景标签。ltype 指定输出标签图像类型,这是基于标签总数或源图像中像素总数的重要考虑因素。
      //image :  要标注的图片
      //connectivity:  8 或 4 分别用于 8 路或 4 路连接
            var cc = Cv2.ConnectedComponentsEx(binary);//相当于halcon的connection获取全部连通域
            if (cc.LabelCount <= 1)
                return;


            //绘制标签  draw labels
            cc.RenderBlobs(labelView);


            //绘制除背景外的边界框 draw bonding boxes except background
            foreach (var blob in cc.Blobs.Skip(1))
            {
                rectView.Rectangle(blob.Rect, Scalar.Red);
            }


            //过滤最大斑点 filter maximum blob
            var maxBlob = cc.GetLargestBlob();
            var filtered = new Mat();
            cc.FilterByBlob(src, filtered, maxBlob);


            using (new Window("src", src))
            using (new Window("binary", binary))
            using (new Window("labels", labelView))
            using (new Window("bonding boxes", rectView))
            using (new Window("maximum blob", filtered))
            {
                Cv2.WaitKey();
            }
        }
    }
}


#43.  DFT  快速傅里叶变换
//http://www.leheavengame.com/article/62a0b30c9ce7955627624f46
using System;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// DFT, inverse DFT
    /// http://stackoverflow.com/questions/19761526/how-to-do-inverse-dft-in-opencv
    /// 
    class DFT : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var img = Cv2.ImRead(ImagePath.Lenna, ImreadModes.Grayscale);//源图像 


            //将输入图像扩展到最佳尺寸 expand input image to optimal size
            using var padded = new Mat(); 
            int m = Cv2.GetOptimalDFTSize(img.Rows);
            int n = Cv2.GetOptimalDFTSize(img.Cols); // on the border add zero values
            Cv2.CopyMakeBorder(img, padded, 0, m - img.Rows, 0, n - img.Cols, BorderTypes.Constant, Scalar.All(0));
            
            //用零添加到扩展的另一个平面  Add to the expanded another plane with zeros
            using var paddedF32 = new Mat();
            padded.ConvertTo(paddedF32, MatType.CV_32F);
            Mat[] planes = { paddedF32, Mat.Zeros(padded.Size(), MatType.CV_32F) };
            using var complex = new Mat();//组合平面
            Cv2.Merge(planes, complex);     //左侧原图空间域,右侧扩展平面频率域    


            // 这样,结果可能适合源矩阵this way the result may fit in the source matrix
            using var dft = new Mat();
            Cv2.Dft(complex, dft);            


            // 计算幅度并切换到对数刻度 compute the magnitude and switch to logarithmic scale
            // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
            Cv2.Split(dft, out var dftPlanes);  // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))


            // planes[0] = magnitude
      /*
      计算二维矢量的幅值
      void magnitude(InputArray x,InputArray y,OutputArray magnitude)
      第一个参数:InputArray类型的x,表示矢量的浮点型X坐标值,也就是实部
      第二个参数:InputArray类型的y,表示矢量的浮点型Y坐标值,也就是虚部
      第三个参数:OutputArray类型的magnitude,输出的幅值,它和第一个参数X有着同样的尺寸和类型
      */
            using var magnitude = new Mat();
            Cv2.Magnitude(dftPlanes[0], dftPlanes[1], magnitude);


            using Mat magnitude1 = magnitude + Scalar.All(1);  // 切换到对数刻度 switch to logarithmic scale
            Cv2.Log(magnitude1, magnitude1); //计算每个数组元素绝对值的自然对数


            //裁剪光谱,如果它有奇数的行或列 crop the spectrum, if it has an odd number of rows or columns
            using var spectrum = magnitude1[
                new Rect(0, 0, magnitude1.Cols & -2, magnitude1.Rows & -2)];


            // rearrange the quadrants of Fourier image  so that the origin is at the image center
      //重新排列傅里叶图像的象限,使原点位于图像中心
            int cx = spectrum.Cols / 2;
            int cy = spectrum.Rows / 2;


            using var q0 = new Mat(spectrum, new Rect(0, 0, cx, cy));   // Top-Left - Create a ROI per quadrant
            using var q1 = new Mat(spectrum, new Rect(cx, 0, cx, cy));  // Top-Right
            using var q2 = new Mat(spectrum, new Rect(0, cy, cx, cy));  // Bottom-Left
            using var q3 = new Mat(spectrum, new Rect(cx, cy, cx, cy)); // Bottom-Right


            // 交换象限(左上和右下) swap quadrants (Top-Left with Bottom-Right)
            using var tmp = new Mat();                           
            q0.CopyTo(tmp);
            q3.CopyTo(q0);
            tmp.CopyTo(q3);


            // 交换象限(右上和左下) swap quadrant (Top-Right with Bottom-Left)
            q1.CopyTo(tmp);                    
            q2.CopyTo(q1);
            tmp.CopyTo(q2);


            //将具有浮点值的矩阵转换为  Transform the matrix with float values into a
            Cv2.Normalize(spectrum, spectrum, 0, 255, NormTypes.MinMax); //将图片的值进行范围调整 
            spectrum.ConvertTo(spectrum, MatType.CV_8U);//转换类型
                                     
            // 显示结果图Show the result
            Cv2.ImShow("Input Image"       , img);
            Cv2.ImShow("Spectrum Magnitude", spectrum);


            //计算离散傅里叶变换的逆变换  calculating the idft
            using var inverseTransform = new Mat();
            Cv2.Dft(dft, inverseTransform, DftFlags.Inverse | DftFlags.RealOutput);//逆变换
            Cv2.Normalize(inverseTransform, inverseTransform, 0, 255, NormTypes.MinMax);//值范围调整
            inverseTransform.ConvertTo(inverseTransform, MatType.CV_8U);//类型转换


            Cv2.ImShow("Reconstructed by Inverse DFT", inverseTransform);//显示逆变换结果
            Cv2.WaitKey();
            Cv2.DestroyAllWindows();
        }
    }
}


#44.  FSRCNN超分辨网络  Super-resolution  DNN模型生成 超分辨率图像 上采样   可用于超分辨率视频生成
//https://blog.csdn.net/qq_45122568/article/details/124002837
//https://blog.csdn.net/qq_45122568/category_11691350.html
//https://zhuanlan.zhihu.com/p/337190517
//FSRCNN模型的速度比SRCNN提高了40倍以上,并且图像恢复质量更高。
//通过卷积神经网络升级图像的类。实现了以下四种模型:
//edsr
//espcn
//fsrcnn
//lapsrn
using OpenCvSharp;
using OpenCvSharp.DnnSuperres;
using SampleBase;


namespace SamplesCore
{
    class DnnSuperresSample : ConsoleTestBase
    {
        // https://github.com/Saafke/FSRCNN_Tensorflow/tree/master/models
        private const string ModelFileName = "Data/Model/FSRCNN_x4.pb";


        public override void RunTest()
        {
            using var dnn = new DnnSuperResImpl("fsrcnn", 4);//scale=4  指定放大系数的整数
            dnn.ReadModel(ModelFileName);//读取模型


            using var src = new Mat(ImagePath.Mandrill, ImreadModes.Color);//源图像
            using var dst = new Mat();//
            dnn.Upsample(src, dst);//通过神经网络进行上采样


            Window.ShowImages(
                new[]{src, dst}, 
                new[]{"src", "dst0"});
        }
    }
}


#45. 绘制图像对的最佳匹配点 
//https://zhuanlan.zhihu.com/p/91479558
//ORB(Oriented FAST and Rotated BRIEF)是Oriented FAST + Rotated BRIEF的缩写(感觉应该叫OFRB)。是目前最快速稳定的特征点检测和提取算法,许多图像拼接和目标追踪技术利用ORB特征进行实现。
using System.Linq;
using OpenCvSharp;
using SampleBase;


namespace SamplesCore
{
    /// 
    /// https://stackoverflow.com/questions/51606215/how-to-draw-bounding-box-on-best-matches/51607041#51607041
    /// 
    class DrawBestMatchRectangle : ConsoleTestBase
    {
        public override void RunTest()
        {
            using var img1 = new Mat(ImagePath.Match1, ImreadModes.Color);//图像1:要搜索的对象图
            using var img2 = new Mat(ImagePath.Match2, ImreadModes.Color);//图像2  场景图


            using var orb = ORB.Create(1000);
            using var descriptors1 = new Mat();
            using var descriptors2 = new Mat();
      //ORB = Oriented FAST(特征点) + Rotated BRIEF(特征描述)
            orb.DetectAndCompute(img1, null, out var keyPoints1, descriptors1);//ORB关键点检测 生成描述子
            orb.DetectAndCompute(img2, null, out var keyPoints2, descriptors2);


            using var bf = new BFMatcher(NormTypes.Hamming, crossCheck: true);//暴力匹配器
            var matches = bf.Match(descriptors1, descriptors2);


            var goodMatches = matches
                .OrderBy(x => x.Distance)
                .Take(10)
                .ToArray();//


            var srcPts = goodMatches.Select(m => keyPoints1[m.QueryIdx].Pt).Select(p => new Point2d(p.X, p.Y)); //源图像上点
            var dstPts = goodMatches.Select(m => keyPoints2[m.TrainIdx].Pt).Select(p => new Point2d(p.X, p.Y));//目标图像上的点
      //计算多个二维点对之间的最优单映射变换矩阵 H(3行x3列) ,使用最小均方误差或者RANSAC方法
            using var homography = Cv2.FindHomography(srcPts, dstPts, HomographyMethods.Ransac, 5, null);// 找到两个平面之间的转换矩阵。


            int h = img1.Height, w = img1.Width;
            var img2Bounds = new[]
            {
                new Point2d(0, 0), 
                new Point2d(0, h-1),
                new Point2d(w-1, h-1), 
                new Point2d(w-1, 0),
            };
            var img2BoundsTransformed = Cv2.PerspectiveTransform(img2Bounds, homography); //计算图像角点的映射点


            using var view = img2.Clone();
            var drawingPoints = img2BoundsTransformed.Select(p => (Point) p).ToArray();//
            Cv2.Polylines(view, new []{drawingPoints}, true, Scalar.Red, 3);//绘制图像2的对象边界四边形


            using (new Window("view", view))
            {
                Cv2.WaitKey();
            }
        }
    }
}

参考:

https://shimat.github.io/opencvsharp_docs/html/d69c29a1-7fb1-4f78-82e9-79be971c3d03.htm 

https://github.com/shimat/opencvsharp

你可能感兴趣的:(ipad,relativelayout,listview,workflow,sharepoint)