用OpenCV的SVM实现简单的手势识别(切水果)[附源码]

基于HOG特征的SVM分类器实现


在机器学习领域,支持向量机SVM(Support Vector Machine)是一个有监督的学习模型,通常用来进行模式识别、分类、以及回归分析。
方向梯度直方图(Histogram of Oriented Gradient, HOG)特征是一种在计算机视觉和图像处理中用来进行物体检测的特征描述子。HOG特征通过计算和统计图像局部区域的梯度方向直方图来构成特征

一、计算HOG

void calculateHog(const Mat& src, vector<float>& descriptors,size windowSize,size blockSize,size cellSize)
{
	//梯度方向数 nbins=9
	HOGDescriptor myHog = HOGDescriptor(src.size(), windowSize, blockSize, cellSize,9);
	myHog.compute(src.clone(), descriptors, Size(1, 1), Size(0, 0));
}

二、SVM

因此容易写得一个基于HOG特征SVM分类器

#ifndef HOGSVM_H
#define HOGSVM_H

#include "cvHeadspace.h"
#include 
#include 
using namespace cv;
using namespace cv::ml;
class HogSVM
{
public:
    HogSVM(std::string xmlFilePath,Size sWindow,Size sBlock,Size sCell,int enBins=9)
    {
        mySVM = Algorithm::load<SVM>(xmlFilePath);
        HogDesSize[0]=sWindow;
        HogDesSize[1]=sBlock;
        HogDesSize[2]=sCell;
        nbins=enBins;
    }
    int getLabel(const Mat &src)
    {
        std::vector<float> imageDescriptor;
        calculateHog(src, imageDescriptor,HogDesSize[0],HogDesSize[1],HogDesSize[2]);
        Mat testDescriptor = Mat::zeros(1, imageDescriptor.size(), CV_32FC1);
        for (size_t i = 0; i < imageDescriptor.size(); i++)
        {
            testDescriptor.at<float>(0, i) = imageDescriptor[i];
        }
        float  label = pHogSVM->predict(testDescriptor);
        return (int)label;
    }
private:
    void calculateHog(const Mat& src, std::vector<float>& descriptors,size windowSize,size blockSize,size cellSize)
{
	//梯度方向数 nbins默认为9
	HOGDescriptor myHog = HOGDescriptor(src.size(), windowSize, blockSize, cellSize,nbins);
	myHog.compute(src.clone(), descriptors, Size(1, 1), Size(0, 0));
}
	int nbins;
	Size HogDesSize[3];
    Ptr<SVM> pHogSVM;
};


#endif // HOGSVM_H

三、测试

简单尝试了一下训练,然后把训练的xml读入,做了测试(这里表格里的单位应该是ms不是s)

int label = tempSvm.getLabel(rROI);

用OpenCV的SVM实现简单的手势识别(切水果)[附源码]_第1张图片

四、小demo

运用SVM+Qt+openCV写了小demo测试
用OpenCV的SVM实现简单的手势识别(切水果)[附源码]_第2张图片

适当调整灵敏度后点击“抢占”就能调用windows.api控制鼠标,开始愉快(智杖)的玩耍了!

五、因为有人需要,更新了一下

时间比较久,有些东西已经没了,不过代码还在,之前没上传的原因就是感觉代码写得太烂,而且就是个本科作业级别的玩具,不好意思上传,但是有人需要,所以上传修改了一下文章,如下。

该项目主要有三个工程,
一个是vs_test_project用于前期的手势识别的测试。
还有一个是svm_train用于训练svm.xml文件。这两个工程文件全部贴在下面了。
QT的gui工程用于展示,但这个工程我没备份主要就是调windows api,同时展示一下中间图片变换的过程。

当时是本科作业,主要参考了这位开源的代码和论文,肤色检测部分的原理可以参考这个老哥的本科毕业论文。我主要在这位老哥的工作上干了两件事情:
①做完肤色分割后,是靠肤色点的数量来区别脸部和手部,效果相当不尽人意,于是我在github上找到了一个现有的去除脸部的xml模型,直接用openCV调用即可
②模板匹配的效果太差,于是将模板匹配替换为了HOG+SVM,提升明显
当时作业的C++水平约等于C with class,勿喷

vs_test_project FaceDetector.h

#pragma once

#include 

using namespace cv;
using namespace std;

class FaceDetector {
public:
	FaceDetector(void);
	void removeFaces(Mat &input, Mat &output);
}; 

vs_test_project FaceDetector.cpp

#include "FaceDetector.h"
#include 
#include 
#include 
#include 
#include 
#include 
#include 



Rect getFaceRect(Mat input);

String faceClassifierFileName = "haarcascade_frontalface_alt.xml";
CascadeClassifier faceCascadeClassifier;

FaceDetector::FaceDetector(void) {
	if (!faceCascadeClassifier.load(faceClassifierFileName))
		throw runtime_error("can't load file " + faceClassifierFileName);
}

void FaceDetector::removeFaces(Mat &input, Mat &output) {
	vector<Rect> faces;
	Mat frameGray;

	cvtColor(input, frameGray, CV_BGR2GRAY);
	equalizeHist(frameGray, frameGray);

	faceCascadeClassifier.detectMultiScale(frameGray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(100, 100));

	for (size_t i = 0; i < faces.size(); i++) {
		rectangle(
			output,
			Point(faces[i].x, faces[i].y),
			Point(faces[i].x + faces[i].width, faces[i].y + faces[i].height),
			Scalar(0, 0, 0),
			-1
		);
	}
}

Rect getFaceRect(Mat input) {
	vector<Rect> faceRectangles;
	Mat inputGray;

	cvtColor(input, inputGray, CV_BGR2GRAY);
	equalizeHist(inputGray, inputGray);

	faceCascadeClassifier.detectMultiScale(inputGray, faceRectangles, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(120, 120));

	if (faceRectangles.size() > 0)
		return faceRectangles[0];
	else
		return Rect(0, 0, 1, 1);
}

vs_test_project HogSvm.h

#pragma once
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include  
#include 
#include
using namespace std;
using namespace cv;
using namespace cv::ml;
class HogSvm
{
public:
	HogSvm(std::string xmlFilePath)
	{
		mySVM = Algorithm::load<SVM>(xmlFilePath);
	}
	int getLabel(const Mat &src)
	{
		vector<float> imageDescriptor;
		coumputeHog(src, imageDescriptor);
		Mat testDescriptor = Mat::zeros(1, imageDescriptor.size(), CV_32FC1);
		for (size_t i = 0; i < imageDescriptor.size(); i++)
		{
			testDescriptor.at<float>(0, i) = imageDescriptor[i];
		}
		float  label = mySVM->predict(testDescriptor);
		return (int)label;
	}
private:
	void coumputeHog(const Mat& src, vector<float>& descriptors)
	{
		HOGDescriptor myHog = HOGDescriptor(src.size(), Size(32, 32), Size(8, 8), Size(8, 8), 9);
		myHog.compute(src.clone(), descriptors, Size(1, 1), Size(0, 0));
	}
	Ptr<SVM> mySVM;
};


vs_test_project SkinSegment.h

#pragma once
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include  
#include 
#include
#include 
#include 
#include 
#include 
using namespace cv;
using namespace std;
//值域
struct ValueRange {
	int lowerBound;
	int upperBound;
	ValueRange() {
		lowerBound = 0; 
		upperBound= 0;
	}
	ValueRange(int pLowerBound, int pUpperBound) :lowerBound(pLowerBound), upperBound(pUpperBound) {}
	int getDistance()
	{
		return upperBound - lowerBound;
	}
	void updateValueRange(int capVal) {
		lowerBound = min(lowerBound, capVal);
		upperBound = max(upperBound, capVal);
	}
};

//像素搜素类
class SearchPixel {
public:
	//种子填充
	static void SeedFillfunc(int grayTrackingValue, const cv::Mat& binaryImg, int& blockNums, ValueRange(*yAxisRealm), ValueRange(*xAxisRealm), vector<pair<int, int> >(*pointSet));
	static Mat WaterShedToBin(const cv::Mat& binaryImg);
};

//线性变换
class LinearConvert {
public:
	static void MirrorConvert(Mat pSrc,Mat &pDst)
	{
		int row = pSrc.rows;
		int col = pSrc.cols;
		pDst = pSrc.clone();
		for (int i = 0; i < col; i++) {
			pSrc.col(col - 1 - i).copyTo(pDst.col(i));
		}
	}

};

//颜色空间筛
class SkinSieve
{
public:
	/*HSV颜色空间H范围筛选法*/
	static Mat HcbCrConverter(Mat& src, Mat graySrc, Mat ycrcbSrc, Mat hsvSrc, bool isReturnMask);
	static Mat CbCrSieveFunc(Mat& src, ValueRange cbRange, ValueRange crRange);
	static Mat HSVSieveFunc(Mat& src, ValueRange hRange, ValueRange sRange, ValueRange vRange, bool isReturnMask);
	static Mat OstuConverter(Mat& src);
	static Mat GrayConverter(Mat& src) {
		Mat binImage;
		cvtColor(src, binImage, CV_BGR2GRAY);
		return binImage;
	}
	static Mat YCrCbConverter(Mat& src) {
		Mat Image;
		cvtColor(src, Image, CV_BGR2YCrCb);
		return Image;
	}
	static Mat HSvConverter(Mat& src) {
		Mat Image;
		cvtColor(src, Image, CV_BGR2HSV);
		return Image;
	}
private:
};

//统计类
class Statistic {
public:
	//像素统计
	static void pixelStatistic(float* ColorRatio, int markedNumber, ValueRange* xAxisRealm, ValueRange* yAxisRealm, Mat pSrc)
	{
		for (int k = 0; k < markedNumber; k++)
		{
			ColorRatio[k] = 1;
			if (((xAxisRealm[k].getDistance() + 1) > 50) && ((xAxisRealm[k].getDistance() + 1) < 300) && ((yAxisRealm[k].getDistance() > 150) && ((yAxisRealm[k].getDistance() + 1) < 450)))
			{
				int fusepoint = 0;
				for (int j = yAxisRealm[k].lowerBound; j < yAxisRealm[k].upperBound; j++)
				{
					uchar* current = pSrc.ptr< uchar>(j);
					for (int i = xAxisRealm[k].lowerBound; i < xAxisRealm[k].upperBound; i++)
					{
						if (current[i] == 255)
							fusepoint++;
					}
				}
				ColorRatio[k] = float(fusepoint) / ((xAxisRealm[k].getDistance() + 1) * (yAxisRealm[k].getDistance() + 1));
			}
		}
	}
	static void pixelStatistic(float* ColorRatio, int markedNumber, int* xMin, int* xMax, int* yMin, int* yMax, Mat pSrc)
	{
		for (int k = 0; k < markedNumber; k++)
		{
			ColorRatio[k] = 1;
			if (((xMax[k] - xMin[k] + 1) > 50) && ((xMax[k] - xMin[k] + 1) < 300) && ((yMax[k] - yMin[k] + 1) > 150) && ((yMax[k] - yMin[k] + 1) < 450))
			{
				int fusepoint = 0;
				for (int j = yMin[k]; j < yMax[k]; j++)
				{
					uchar* current = pSrc.ptr< uchar>(j);
					for (int i = xMin[k]; i < xMax[k]; i++)
					{
						if (current[i] == 255)
							fusepoint++;
					}
				}
				ColorRatio[k] = float(fusepoint) / ((xMax[k] - xMin[k] + 1) * (yMax[k] - yMin[k] + 1));
			}
		}
	}
};

// 分水岭
class WatershedSegment {
private:
	cv::Mat markers;
public:
	void setMarkers(const cv::Mat& markerImage) {

		// 转换为整数图像
		markerImage.convertTo(markers, CV_32S);
	}

	cv::Mat process(const cv::Mat& image) {
		// 适用分水岭
		cv::watershed(image, markers);
		return markers;
	}

	// 以图像的形式返回结果
	cv::Mat getSegmentation() {
		cv::Mat tmp;
		// 标签高于255的所有段
		// 将被赋值为255
		markers.convertTo(tmp, CV_8U);
		return tmp;
	}

	// 以图像的形式返回分水岭
	cv::Mat getWatersheds() {
		cv::Mat tmp;
		markers.convertTo(tmp, CV_8U, 255, 255);
		return tmp;
	}
};

vs_test_project SkinSegment.cpp

#include "SkinSegment.h"
Mat SkinSieve::HcbCrConverter(Mat& src, Mat graySrc, Mat ycrcbSrc, Mat hsvSrc, bool isReturnMask)
{
	Mat Y, Cr, Cb, H;
	vector<Mat> channels, channels1;
	Mat binImage;
	graySrc.copyTo(binImage);
	// 通道分离
	split(ycrcbSrc, channels);
	split(hsvSrc, channels1);
	Cr = channels.at(1);	// 分离出【色调Cr】
	Cb = channels.at(2);	// 分离出【饱和度Cb】
	H = channels1.at(0);	// 分离出【H】

	// 肤色检测,输出二值图像
	for (int j = 1; j < Cr.rows - 1; j++)	// 遍历图像像素点
	{
		uchar* currentCr = Cr.ptr< uchar>(j);
		uchar* currentCb = Cb.ptr< uchar>(j);
		uchar* currentH = H.ptr< uchar>(j);
		uchar* current = binImage.ptr< uchar>(j);

		for (int i = 1; i < Cb.cols - 1; i++)
		{
			if ((currentCr[i] >= 135) && (currentCr[i] <= 170) && (currentCb[i] >= 94) && (currentCb[i] <= 125) && (currentH[i] >= 1) && (currentH[i] <= 23))
				current[i] = 255;
			else
				current[i] = 0;
		}
	}
	if (isReturnMask == true) {
		return binImage;
	}
	Mat detect;
	src.copyTo(detect, binImage);
	return detect;
}
/*Mat SkinSieve::HcbCrConverter(Mat& src,Mat &graySrc,Mat &ycrcbSrc,Mat &hsvSrc,bool isReturnMask)
{
	Mat Y, Cr, Cb, H;
	vector channels, channels1;
	Mat binImage ,tmp ,tmp1;
	// 颜色空间变换(GRAY to Bin)
	graySrc.copyTo(binImage);

	// 颜色空间变换(RGB to YCrCb)
	ycrcbSrc.copyTo(tmp);

	// 颜色空间变换(RGB to HSV)
	hsvSrc.copyTo(tmp1);

	// 通道分离
	split(tmp, channels);
	split(tmp1, channels1);
	Cr = channels.at(1);	// 分离出【色调Cr】
	Cb = channels.at(2);	// 分离出【饱和度Cb】
	H = channels1.at(0);	// 分离出【H】

	// 肤色检测,输出二值图像
	for (int j = 1; j < Cr.rows - 1; j++)	// 遍历图像像素点
	{
		uchar* currentCr = Cr.ptr< uchar>(j);
		uchar* currentCb = Cb.ptr< uchar>(j);
		uchar* currentH = H.ptr< uchar>(j);
		uchar* current = binImage.ptr< uchar>(j);

		for (int i = 1; i < Cb.cols - 1; i++)
		{
			if ((currentCr[i] >= 135) && (currentCr[i] <= 170) && (currentCb[i] >= 94) && (currentCb[i] <= 125) && (currentH[i] >= 1) && (currentH[i] <= 23))
				current[i] = 255;
			else
				current[i] = 0;
		}
	}
	if (isReturnMask == true) {
		return binImage;
	}
	Mat detect;
	src.copyTo(detect, binImage);
	return detect;
}*/
/*
ValueRange hScope(0, 40);
ValueRange sScope(48, 360);
ValueRange vScope(50, 360);

ValueRange cbScope(102, 143);
ValueRange crScope(130, 170);
*/
Mat SkinSieve::CbCrSieveFunc(Mat& src, ValueRange cbRange, ValueRange crRange)
{
	/*YCrCb颜色空间Cr,Cb范围筛选法*/
	Mat ycrcb_image;
	int Cr = 1;
	int Cb = 2;
	cvtColor(src, ycrcb_image, CV_BGR2YCrCb); //首先转换成到YCrCb空间
	Mat output_mask = Mat::zeros(src.size(), CV_8UC1);
	for (int i = 0; i < src.rows; i++)
	{
		for (int j = 0; j < src.cols; j++)
		{
			uchar* p_mask = output_mask.ptr<uchar>(i, j);
			uchar* p_src = ycrcb_image.ptr<uchar>(i, j);
			if (p_src[Cr] >= crRange.lowerBound && p_src[Cr] <= crRange.upperBound && p_src[Cb] >= cbRange.lowerBound && p_src[Cb] <= cbRange.upperBound)
			{
				p_mask[0] = 255;
			}
		}
	}
	Mat out;
	src.copyTo(out, output_mask);;
	return out;
}
Mat SkinSieve::HSVSieveFunc(Mat& src, ValueRange hRange, ValueRange sRange, ValueRange vRange,bool isReturnMask)
{
	/*HSV颜色空间H,S,V范围筛选法*/
	Mat hsv_image;
	int h = 0;
	int s = 1;
	int v = 2;
	cvtColor(src, hsv_image, CV_BGR2HSV); //首先转换成到HSV空间
	Mat output_mask = Mat::zeros(src.size(), CV_8UC1);
	for (int i = 0; i < src.rows; i++)
	{
		for (int j = 0; j < src.cols; j++)
		{
			uchar* p_mask = output_mask.ptr<uchar>(i, j);
			uchar* p_src = hsv_image.ptr<uchar>(i, j);
			if (p_src[h] >= hRange.lowerBound && p_src[h] <= hRange.upperBound && p_src[s] >= sRange.lowerBound && p_src[s] <= sRange.upperBound && p_src[v] >= vRange.lowerBound && p_src[v] <= vRange.upperBound)
			{
				p_mask[0] = 255;
			}
		}
	}
	if (isReturnMask == true) return output_mask;
	Mat out;
	src.copyTo(out, output_mask);;
	return out;
}
Mat SkinSieve::OstuConverter(Mat& src)
{
	 Mat gray;
	 cvtColor(src, gray, CV_BGR2GRAY);

	 Mat dst;
	 threshold(gray, dst, 0, 255, CV_THRESH_OTSU);

	 return dst;
}
Mat SearchPixel::WaterShedToBin(const cv::Mat& binaryImg)
{
	Mat tempMat;
	binaryImg.convertTo(tempMat, CV_32SC1);
	Mat dst;
	for (int i = 0; i < tempMat.rows; i++)//图像四周边界不搜索
	{
		int* data = tempMat.ptr<int>(i);
		for (int j = 0; j < tempMat.cols; j++)
		{

			if (data[j] !=255)
			{
				data[j] = 0;
			}
		}
	}
	tempMat.convertTo(dst, CV_8UC1);// 矩阵数据类型转换
	return dst;
}
void SearchPixel::SeedFillfunc(int grayTrackingValue,const cv::Mat& binaryImg ,int& blockNums, ValueRange(*yAxisRealm), ValueRange(*xAxisRealm), vector<pair<int, int> >(*pointSet))
{
	if (binaryImg.empty() || binaryImg.type() != CV_8UC1)// 如果图像是空或者格式不正确就返回
	{
		return;
	}
	Mat tempMat;
	binaryImg.convertTo(tempMat, CV_32SC1);// 矩阵数据类型转换
	int blocks = 0;
	int rows = binaryImg.rows;
	int cols = binaryImg.cols;
	
	int yNeighborOffset[8] = { -1,1, 0,0,-1,1, 1,-1 };// 八连通方向
	int xNeighborOffset[8] = {  0,0,-1,1,-1,1,-1, 1 };

	for (int i = 1; i < rows - 1; i++)//图像四周边界不搜索
	{
		int* data = tempMat.ptr<int>(i);
		for (int j = 1; j < cols - 1; j++)
		{

			if (data[j] == grayTrackingValue)
			{
				stack<pair<int, int> > neighborPixels;
				neighborPixels.push(std::pair<int, int>(j, i));// 向栈顶插入元素像素位置: 
				yAxisRealm[blocks] = ValueRange(i, i);         // y坐标的范围域,用于输出画框
				xAxisRealm[blocks] = ValueRange(j, j);         // x坐标的范围域,用于输出画框
				while (!neighborPixels.empty())
				{
					pair<int, int> currentPixel = neighborPixels.top(); 
					int currentX = currentPixel.first;
					int currentY = currentPixel.second;

					yAxisRealm[blocks].updateValueRange(currentY);  // 更新x,y坐标的范围域,用于输出画框
					xAxisRealm[blocks].updateValueRange(currentX);

					pointSet[blocks].push_back(currentPixel);           // 追加current pixel,用于输出描边

					tempMat.at<int>(currentY, currentX) = 255;

					neighborPixels.pop();	// 出栈

					if ((currentX > 0) && (currentY > 0) && (currentX < (cols - 1)) && (currentY < (rows - 1)))//边界保护
					{
						for (int index = 0; index < 8; index++)
						{
							int yNeighborAxis = currentY + yNeighborOffset[index];
							int xNeighborAxis = currentX + xNeighborOffset[index];
							if (tempMat.at<int>(yNeighborAxis, xNeighborAxis) == grayTrackingValue)
								neighborPixels.push(pair<int, int>(xNeighborAxis, yNeighborAxis));
						}
					}
				}
				++blocks;
			}
		}
	}
	blockNums = blocks;
}


vs_test_project main.cpp

#include 
#include 
#include 
#include 

//cvHeaderFiles set
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include  
#include 
#include
#include "SkinSegment.h"
#include "FaceDetector.h"
#include "HogSvm.h"
using namespace std;
using namespace cv;
using namespace cv::ml;
void drawPointSet(Mat& src, vector<pair<int, int > > pointSet, cv::Scalar color);

int main()
{
	//输入帧和输出帧
	Mat frame, out;
	Mat copy;
	FaceDetector faceDetector;
	HogSvm tempSvm("svm.xml");
	//捕获
	VideoCapture cap(0);
	if (!cap.isOpened()) return -1;
	//颜色空间分离帧
	Mat grayImg, ycrcbImg, hsvImg;

	bool stop = false;
	while (!stop)
	{
		cap >> frame;
		if (frame.empty()) break;
		LinearConvert::MirrorConvert(frame, frame);
		frame.copyTo(copy);
		//颜色空间变换
		grayImg = SkinSieve::GrayConverter(frame);
		ycrcbImg = SkinSieve::YCrCbConverter(frame);
		hsvImg = SkinSieve::HSvConverter(frame);
		//颜色空间筛
		//out = SkinSieve::HcbCrConverter(frame, grayImg, ycrcbImg, hsvImg, true);
		out = SkinSieve::HSVSieveFunc(frame, ValueRange(0, 40), ValueRange(48, 360), ValueRange(50, 360),true);
		//人脸遮挡
		faceDetector.removeFaces(frame, out);
		//腐蚀膨胀
		erode(out, out, Mat());
		dilate(out, out, Mat());
		// 六次递归腐蚀得到前景
		Mat foreGround;
		erode(out, foreGround, cv::Mat(), cv::Point(-1, -1), 6);
		// 识别没有对象的图像像素
		Mat backGround;
		dilate(out, backGround, cv::Mat(), cv::Point(-1, -1), 6);	// 六次递归膨胀
		threshold(backGround, backGround, 1, 128, cv::THRESH_BINARY_INV);	// 二进制阈值函数图像分割cv::THRESH_BINARY_INV 超过阈值 则 值变为 0,其他为 128 黑白二值反转(反转二值阈值化)
		// 显示标记图像
		Mat markers(grayImg.size(), CV_8U, cv::Scalar(0));
		markers = foreGround + backGround;
		// 创建分水岭分割对象
		WatershedSegment segmenter;
		segmenter.setMarkers(markers);
		segmenter.process(frame);
		// 应用分水岭算法
		Mat waterShed;
		waterShed = segmenter.getSegmentation();
		//八连通
		int markedNum;
		ValueRange yRealm[20], xRealm[20];vector<pair<int, int> > borderPointSet[20];
		SearchPixel::SeedFillfunc(0, waterShed, markedNum, yRealm, xRealm, borderPointSet);
		//计算肤色比例
		float skinRatio[20];
		Statistic::pixelStatistic(skinRatio, markedNum,xRealm,yRealm,waterShed);

		Mat rROI;
		// 给符合阈值条件的位置画框
		for (int i = 0; i < markedNum; i++)
		{
			if ((skinRatio[i] < 0.78))
			{
				//框选
				Size dsize = Size(64, 64);
				rROI = Mat(dsize, CV_8UC1);
				Rect roiRect= Rect(xRealm[i].lowerBound, yRealm[i].lowerBound,xRealm[i].getDistance(),yRealm[i].getDistance());
				resize(waterShed(roiRect), rROI, dsize);
				//描边
				drawPointSet(frame, borderPointSet[i], Scalar(75, 0, 130));
				rectangle(frame, roiRect, Scalar(75, 0, 130));
				//SVM分类
				int label = tempSvm.getLabel(rROI);
				// 加标签【手部标记】
				Point end = Point(xRealm[i].lowerBound, yRealm[i].lowerBound);   
				string str = "Hand" + to_string((int)label);
				putText(frame, str, end, cv::FONT_HERSHEY_DUPLEX, 0.7, cv::Scalar(255, 0, 0), 2);	
			}
		}
		imshow("frame", frame);
		int key = waitKey(1);

		if (key == 98) // b
			stop = true;
	}
    return 0;
}

void drawPointSet(Mat& src,vector<pair<int,int > > pointSet,cv::Scalar color)
{
	for (vector<pair<int, int >>::iterator it = pointSet.begin(); it != pointSet.end(); ++it)
		cv::circle(src,Point(it->first,it->second),1, color);
}


svm_train main.cpp
其中label.txt的每行对应于train.txt中每行的标签,train.txt每行就是一张图片的标签

//SVM多分类训练测试
#include 

#include 
#include 
#include 
#include   
#include  
#include 
#include 

using namespace cv;
using namespace std;
using namespace cv::ml;
Size imageSize = Size(64,64);
void coumputeHog(const Mat& src, vector<float>& descriptors)
{

	HOGDescriptor myHog = HOGDescriptor(imageSize, Size(32, 32), Size(8, 8), Size(8, 8),9);
	myHog.compute(src.clone(), descriptors, Size(1, 1), Size(0, 0));
}

int main()
{
	ifstream inLabels("label.txt"), inImages("train.txt"), inTestimage("test.txt");
	string imageName;
	int imageLabel;
	vector<Mat> vecImages;
	vector<int> vecLabels;
	cv::Ptr<cv::ml::SVM> mySVM = cv::ml::SVM::create();
	mySVM->setType(cv::ml::SVM::Types::C_SVC);
	mySVM->setKernel(cv::ml::SVM::KernelTypes::LINEAR);
	mySVM->setTermCriteria(cv::TermCriteria(cv::TermCriteria::MAX_ITER, 1000, 1e-6));

	vector<float> vecDescriptors;

#if(0) //是否需要训练
	while ((inImages >> imageName) && (inLabels >> imageLabel))
	{
		Mat src = imread(imageName);
		resize(src, src, imageSize);
		vecImages.push_back(src);
		vecLabels.push_back(imageLabel);
	}
	cout << vecImages.size();
	inLabels.close();
	inImages.close();

	Mat dataDescriptors;
	Mat dataResponse = (Mat)vecLabels;
	for (size_t i = 0; i < vecImages.size(); i++)
	{
		Mat src = vecImages[i];
		Mat tempRow;
		coumputeHog(src, vecDescriptors);
		if (i == 0)
		{
			dataDescriptors = Mat::zeros(vecImages.size(), vecDescriptors.size(), CV_32FC1);
		}
		tempRow = ((Mat)vecDescriptors).t();
		tempRow.row(0).copyTo(dataDescriptors.row(i));
	}
	mySVM->train(dataDescriptors, cv::ml::SampleTypes::ROW_SAMPLE, dataResponse);
	string svmName = "svm.xml";
	mySVM->save(svmName.c_str());
#else
	mySVM=Algorithm::load<SVM>("svm.xml");
	// 预测
	string testPath;
	while (inTestimage >> testPath)
	{
		cv::TickMeter tm;
		Mat test = imread(testPath);
		tm.start();
		resize(test, test, imageSize);
		vector<float> imageDescriptor;
		coumputeHog(test, imageDescriptor);
		Mat testDescriptor = Mat::zeros(1, imageDescriptor.size(), CV_32FC1);
		for (size_t i = 0; i < imageDescriptor.size(); i++)
		{
			testDescriptor.at<float>(0, i) = imageDescriptor[i];
		}
		float  label = mySVM->predict(testDescriptor);
		cout << label << endl;
		tm.stop();
		cout <<"耗时:"<< tm.getTimeMilli() <<"ms"<<endl;
		imshow("test image", test);
		while (waitKey(1) != 115) // s
		{
		}
	}

	inTestimage.close();
#endif
	return 0;
}


最后,haarcascade_frontalface_alt.xml是openCV官方训练好的,直接用搜索引擎搜索就可以下载,svm.xml需要自己训练

你可能感兴趣的:(杂七杂八,计算机视觉,qt,opencv,c++)