机器人行人跟随ros包中加入图像特征提取和特征匹配,提取surf特征,匹配方法是快速最近邻近似搜索flann。
直接上代码:(有问题可以交流~)
/******************************************************************************
*
* The MIT License (MIT)
*
* Copyright (c) 2018 Bluewhale Robot
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Author: Randoms
*******************************************************************************/
#include "tracking_node.h"
using namespace cv;
using namespace std;
using namespace XiaoqiangTrack;
sensor_msgs::Image last_frame;
XiaoqiangTrack::Tracker *tracker = NULL;
Rect2d body_rect;
ros::Publisher image_pub;
ros::Publisher target_pub;
std::mutex update_track_mutex;
int track_ok_flag = 0;
cv::Rect2d previous_body_rect;
cv::Rect2d body_track_rect;
sensor_msgs::Image get_one_frame() { return last_frame; }
void update_frame(const sensor_msgs::ImageConstPtr &new_frame) //更新帧
{
last_frame = *new_frame;
cv_bridge::CvImagePtr cv_ptr =
cv_bridge::toCvCopy(new_frame, "bgr8"); //图像格式转换
cv::Mat cv_image = cv_ptr->image;
if (tracker == NULL)
return;
unique_lock lock(update_track_mutex);
previous_body_rect = body_rect;//将检测到的当前rect保存为previous_body_rect
track_ok_flag = tracker->updateFrame(cv_image, body_rect);
cv::rectangle(cv_image, body_rect, cv::Scalar(0, 255, 0));
image_pub.publish(cv_ptr->toImageMsg());
xiaoqiang_track::TrackTarget target;
target.x = body_rect.x + body_rect.width / 2;
target.y = body_rect.y + body_rect.height / 2;
target.width = body_track_rect.width;
target.height = body_track_rect.height;
if (track_ok_flag == 0) {
// send stop
target.x = 0;
target.y = 0;
}
target_pub.publish(target);//target.x,target.y是跟踪的点的坐标,kalman...
}
int main(int argc, char **argv) {
ros::init(argc, argv, "xiaoqiang_track_node"); // ros节点初始化
//在一个节点中开辟多个线程,构造时可以指定线程数如(4),AsyncSpinner有start()和stop()函数
ros::AsyncSpinner spinner(4);
spinner.start();
ros::NodeHandle private_nh("~");
ros::Publisher talk_pub = private_nh.advertise("text", 10);
image_pub = private_nh.advertise("processed_image", 10);
target_pub = private_nh.advertise("target", 10);
int watch_dog;
private_nh.param("watch_dog", watch_dog, 2);
ros::Subscriber image_sub = private_nh.subscribe("image", 10, update_frame);
PoseTracker *client;
std::string pose_tracker_type;
ros::param::param("~pose_tracker", pose_tracker_type, "");
if (pose_tracker_type == "baidu") //判断跟踪类型:baidu track or body track
{
client = (PoseTracker *)new BaiduTrack(private_nh);
} else if (pose_tracker_type == "body_pose") {
client = (PoseTracker *)new BodyTrack(private_nh);
} else {
ROS_FATAL_STREAM("unknown pose tracker type " << pose_tracker_type);
ROS_FATAL_STREAM("supported pose trakers are body_pose and baidu");
exit(1);
}
std::string tracker_main_type; //定义主跟踪类型
std::string tracker_aided_type; //辅跟踪
ros::param::param("~tracker_main", tracker_main_type, "");
ros::param::param("~tracker_aided", tracker_aided_type, "");
tracker = new XiaoqiangTrack::Tracker(tracker_main_type,tracker_aided_type); //设置跟踪器
// 告诉用户站在前面
std_msgs::String words;
words.data = "请站在我前面";
talk_pub.publish(words);
// 提醒用户调整好距离
sensor_msgs::Image frame = get_one_frame(); //得到一帧图像
body_rect.x = -1;
body_rect.y = -1;
while (!ros::isShuttingDown()) {
if (frame.data.size() != 0) {
cv::Rect2d rect = client->getBodyRect(frame); //通过frame得到人体图像区域
if (rect.x <= 1 || rect.y <= 1) {
words.data = "我没有看到人,请站到我前面";
talk_pub.publish(words);
} else if (rect.x + rect.width / 2 > 440 ||
rect.x + rect.width / 2 < 200) {
body_rect = rect;
words.data = "请站到镜头中间来";
talk_pub.publish(words);
} else {
body_rect = rect;
words.data = "我看到人了,开始追踪";
talk_pub.publish(words);
break;
}
}
sleep(1);
frame = get_one_frame();
}
/*
¥经过分析代码,初步的想法是在这个位置加上特征提取方法和Opencv的特征匹配,思路是:
-
特征提取是从一帧图像中提取特征,想要提取的特征可以是ORB,FAST,SIFT,SURF等,上面的
- frame = get_one_frame()是获取最新的一帧图像,return last_frame
-
那么对于这一帧图像抽取想要的特征信息,得到特征点,保存检测到的目标特征,之后用来与再次检测时的图像来做匹配
-
当然这里是做特征提取,匹配是在跟踪丢失后,再次启动检测识别时,识别到多个目标,进行匹配
*/
/*fuck!sorry,i don't wanna say that,but i just lost everything i did the whole day because of clicking the
close table without save it! so let me start at the begining...
*/
//begining extract feature
int minHessian = 2000;
SurfFeatureDetector detector(minHessian);
vectorkeypoint1, keypoint2;
//image1 = resize(frame, rect.x:(rect.x+rect.width), rect.y:(rect.y+rect.higth))
IplImage *img1;
CvRect rectInImage1;
rectInImage1 = cvRect(rect.x, rect.y,rect.width, rect.height);
CvSize size1;
size1.width = rectInImage.width;
size1.height = rectInImage.height;
img1 = CvCreatImage(size1, frame->depth, frame->nChannels);
CvSetImageROI(frame, rectInImage1);
cvCopy(img1, frame);//img1是从frame上提取的目标框区域
//检测特征点
detector.detect(img1, keypoint1)
//ending
// 告诉用户可以开始走了
sensor_msgs::Image tracking_frame = get_one_frame();
cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(tracking_frame, "bgr8");
cv::Mat cv_image = cv_ptr->image;
tracker->initTracker(cv_image, body_rect); // init
int repeat_count = 0;
int watch_dog_count = 0;
while (ros::ok()) {
usleep(1000 * 100); // 100ms
// 如果位置不变,则认为可能丢失
if (previous_body_rect == body_rect) {
repeat_count += 100;
if (repeat_count == 5 * 1000) //rect检测到的数据不变,且超过一定时间,判Lost
{
ROS_WARN_STREAM("Target not move, may lost");
repeat_count = 0;
track_ok_flag = 0; // heihei,flag=0 -> reset
}
} //这里判断跟丢
else
{
repeat_count = 0;
}
if (body_rect.width < 300 && body_rect.height < 300 && track_ok_flag == 2 &&
body_rect.height > body_rect.width) //确认检测到的rect符合这些要求
{
watch_dog_count += 100;
if (watch_dog_count <= watch_dog * 1000) // watch_dog=2
{
continue;
}
} //这里判断是否正确的给出rect
watch_dog_count = 0;
tracking_frame = get_one_frame(); //再次获得newframe
body_track_rect = client->getBodyRect(tracking_frame); // track
//usr code begining
IplImage *img2;
CvRect rectInImage2;
ectInImage2 = cvRect(body_track_rect.x, body_track_rect.y,
ody_track_rect.width, body_track_rect.height);
CvSize size2;
size2.width = rectInImage2.width;
size2.height = rectInImage2.height;
img2 = CvCreatImage(size2, tracking_frame->depth, tracking_frame->nChannels);
CvSetImageROI(tracking_frame, rectInImage2);
cvCopy(img2, tracking_frame);//img2是从tracking_frame上提取的目标框区域
//检测特征点
detector.detect(img2, keypoint2);
//计算特征点描述子
SurfDescriptorExtractor extractor;
Mat descriptor1, descriptor2;
extractor.compute(img1, keypoint1, descriptor1);
extractor.compute(img2, keypoint2, descriptor2);
//使用flann匹配
FlannBasedMatcher matcher;
vectormatches;
matcher.match(descriptor1, descriptor2, matches);//匹配结束
double max_dist = 0;
double min_dist = 100;
for(int i = 0; i < descriptor1.rows; i++)
{
double dist = matches[i].distance;
if(dist < min_dist)
min_dist = dist;
if(dist > max_dist)
max_dist = dist;
}//得到匹配结果中的最小距离和最大距离
//处理匹配结果:判断当前匹配的对象是否为目标,仅根据最大最小匹配距离,能否进行判断?
if(min_dist < 1.0)// get it~
{
tracking_frame = tracking_frame;
body_track_rect = body_track_rect;
}
elseif(max_dist > 5.0)//abandon
{
continue;//继续下一帧匹配额
}
elseif((max_dist - min_dist) < 2.0)
{
tracking_frame = tracking_frame;
body_track_rect = body_track_rect;
}
else
{
continue;
}
//usr code ending
if (body_track_rect.x <= 1 || body_track_rect.y <= 1) //识别到的rect.x / .y小于1后跟踪停止
{
tracker->stop();
}
else
{
{
unique_lock lock(update_track_mutex);
body_rect = body_track_rect;
cv_bridge::CvImagePtr cv_ptr =
cv_bridge::toCvCopy(tracking_frame, "bgr8");
cv::Mat cv_image = cv_ptr->image;
if (track_ok_flag == 0) //跟踪标志为0时跟踪复位
{
tracker->reset(cv_image, body_rect, true); // watch out the reset praram
}
else
tracker->reset(cv_image, body_rect); /// reset
}
}
}
}