我使用的环境为EmguCv3.0.0+KinectSDK2.0+vs2015
使用Kinect获取面部帧比较特殊,必须要在x64环境下才行,另外,还需要将示例项目文件夹中的NuiDatabase复制到程序的根目录里
利用face库里的FaceFrameFeatures可以很方便的获取面部的多种信息
FaceFrameFeatures DefaultFaceFrameFeatures = FaceFrameFeatures.PointsInColorSpace | FaceFrameFeatures.RotationOrientation | FaceFrameFeatures.BoundingBoxInColorSpace;我这只获取了在这个项目中用的到的几种,下图是全部的可获取的信息
还是用的WinFrom和ImageBox
我这个和SDK里的不同 只能追踪一个人
代码如下:
using System; using System.Collections.Generic; using System.ComponentModel; using System.Data; using System.Drawing; using System.Linq; using System.Text; using System.Threading.Tasks; using System.Windows.Forms; using Microsoft.Kinect.Face; using Microsoft.Kinect; using Emgu.CV; using Emgu.CV.Structure; namespace EmguCV_Kinect_Face { public partial class Form1 : Form { KinectSensor kinect = null; MultiSourceFrameReader reader = null; FaceFrameSource facesource = null; FaceFrameReader facereader = null; FrameDescription fd = null; Image<Bgra, byte> colorimg = null; Body[] bodies = null; Body target; byte[] cdata; FaceFrameFeatures DefaultFaceFrameFeatures = FaceFrameFeatures.PointsInColorSpace | FaceFrameFeatures.RotationOrientation | FaceFrameFeatures.BoundingBoxInColorSpace; public Form1() { InitializeComponent(); kinect = KinectSensor.GetDefault(); reader = kinect.OpenMultiSourceFrameReader(FrameSourceTypes.Body | FrameSourceTypes.Color); reader.MultiSourceFrameArrived += Reader_MultiSourceFrameArrived; facesource = new FaceFrameSource(kinect, 0, DefaultFaceFrameFeatures); facereader = facesource.OpenReader(); facereader.FrameArrived += Facereader_FrameArrived; fd = kinect.ColorFrameSource.FrameDescription; colorimg = new Image<Bgra, byte>(fd.Width, fd.Height); cdata = new byte[colorimg.Bytes.Count<byte>()]; kinect.Open(); } private void Facereader_FrameArrived(object sender, FaceFrameArrivedEventArgs e) { using (FaceFrame fframe = e.FrameReference.AcquireFrame()) { if (!fframe.IsTrackingIdValid) return; var result = fframe.FaceFrameResult; if (result == null) return; foreach(var point in result.FacePointsInColorSpace) { if (point.Key == FacePointType.None) continue; CvInvoke.Circle(colorimg, new Point((int)point.Value.X,(int)point.Value.Y),5, new Bgra(0,0,255,255).MCvScalar, 4); } var space=result.FaceBoundingBoxInColorSpace; Rectangle faceBox = new Rectangle(space.Left, space.Top,space.Right - space.Left, space.Bottom - space.Top) ; CvInvoke.Rectangle(colorimg, faceBox, new Bgra(0, 0, 255, 255).MCvScalar, 5); //imageBox2.Image = colorimg.Copy(faceBox); } } private void Reader_MultiSourceFrameArrived(object sender, MultiSourceFrameArrivedEventArgs e) { MultiSourceFrame frame = e.FrameReference.AcquireFrame(); if (frame == null) return; ColorFrame cframe = frame.ColorFrameReference.AcquireFrame(); BodyFrame bframe = frame.BodyFrameReference.AcquireFrame(); if (cframe == null || bframe == null) return; if (bodies == null) bodies = new Body[bframe.BodyCount]; bframe.GetAndRefreshBodyData(bodies); if (!facesource.IsTrackingIdValid) { target = (from body in bodies where body.IsTracked select body).FirstOrDefault(); if (target != null) { facesource.TrackingId = target.TrackingId; } } cframe.CopyConvertedFrameDataToArray(cdata, ColorImageFormat.Bgra); colorimg.Bytes = cdata; imageBox1.Image = colorimg; cframe.Dispose(); bframe.Dispose(); } private void Form1_FormClosing(object sender, FormClosingEventArgs e) { if (kinect != null) { kinect.Close(); kinect = null; } } } }