如何在far_planner 中使用yolov4 之加入dnn检测模块 之ROS Noetic with yolov4 in Opencv DNN module


目前已经开源了~有需要的小伙伴可以自取去研究

GitHub - Leeable/far-planner-with-yolo: A FAR Planner version with yolo to detect object


今天讲的是如何在noetic里用dnn模块来使用yolov4

yolov4 ros noetic

ROS系统中从零开始部署YoloV4目标检测算法(3种方式)_⊙月的博客-CSDN博客_ros 目标检测

这篇文章给了很大的启发,不过在处理opencv的问题上,现在主流的做法是不去动noetic里本身的cv_bridge了,而是在pkg里加入一个opencv的bridge包。


1.编译opencv

众所周知,noetic里配的是3.2版本的cv,太老了,没有办法用dnn模块(dnn至少要3.3以上的opencv才会有)。所以,先去opencv的官网下载source。我下载的是OpenCV – 4.5.4

解压opencv然后

cd opencv-4.5.4
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=RELEASE -DCMAKE_INSTALL_PREFIX=/usr/local -DWITH_TBB=ON -DBUILD_NEW_PYTHON_SUPPORT=ON -DWITH_V4L=ON -DINSTALL_C_EXAMPLES=ON -DINSTALL_PYTHON_EXAMPLES=ON -DBUILD_EXAMPLES=ON -DWITH_QT=ON -DWITH_GTK=ON -DWITH_OPENGL=ON ..

make -j12
sudo make install

2.创建ros功能包

cd ~
mkdir -p ros_ws/src
cd ros_ws
catkin_make
cd src
catkin_create_pkg detection_pkg roscpp rospy std_msgs cv_bridge
cd ..
catkin_make

3.下载vision_opencv

cd ros_ws/src
git clone https://gitcode.net/mirrors/ros-perception/vision_opencv

目前的src目录应该是这样子的:

src
    ├── CMakeLists.txt -> /opt/ros/noetic/share/catkin/cmake/toplevel.cmake
    ├── detection_pkg
    └── vision_opencv

然后进入detection_pkg创建一个data文件夹存yolov4的权重等数据

cd detection_pkg
mkdir data

4.编写detection_pkg下的文件

文件主要有三个,分别是Detection.cpp Detection.h main.cpp

main.cpp

#include "Detection.h"
#include 
#include 
#include 
#include 
#include "std_msgs/Int8.h"
#include "std_msgs/String.h"
using namespace std;
using namespace cv;
using namespace dnn;

ros::Publisher pub;
Detection detection = Detection();

void imageCallback(const sensor_msgs::ImageConstPtr &msg)
{
    Mat img;
    cv_bridge::CvImageConstPtr cv_ptr;
    cv_ptr = cv_bridge::toCvShare(msg, "bgr8");
    img = cv_ptr->image;
    detection.Initalize(img.cols, img.rows);
    detection.Detecting(img);
    imshow("Show RGBImage", detection.GetFrame());
    char ch = waitKey(10);
    string output = detection.GetResult();
    std_msgs::String out_msg;
    std::stringstream ss;
    ss << output;
    out_msg.data = ss.str();
    pub.publish(out_msg);
}

int main(int argc, char **argv){
    ROS_INFO("Opencv version: %s\n", CV_VERSION);
    ros::init(argc, argv,"yolo4");
    ros::NodeHandle nh;
    ros::Subscriber sub = nh.subscribe("/camera/color/image_raw",1,imageCallback);
    pub = nh.advertise("yolo4_result",1);
    ros::spin();
    return 0;
}

Detection.h

#pragma once
#ifndef __DETECTION_H__
#define __DETECTION_H__

#include 
#include 
#include 
#include 
#include 
#include 
#include 

using namespace std;
using namespace cv;
using namespace dnn;

class Detection {
public:
    Detection();
    ~Detection();
    void Initalize(int width, int height);
    void ReadModel();
    bool Detecting(Mat frame);
    vector GetOutputNames();
    void PostProcess();
    void Drawer();

    void DrawBoxes(int classId, float conf, int left, int top, int right, int bottom);
    Mat GetFrame();
    string GetResult();
    int GetResWidth();
    int GetResHeight();
private:
    int m_width;
    int m_height;
    Net m_model;
    Mat m_frame;
    Mat m_blob;
    vector m_outs;
    vector m_confs;
    vector m_boxes;
    vector m_classIds;
    vector m_perfIndx;
    int m_inpWidth;
    int m_inpHeight;
    float m_confThro;
    float m_NMSThro;
    vector m_classes;

    void Dump();
};

#endif

Detection.cpp

#include "Detection.h"

using namespace std;
using namespace cv;
using namespace dnn;

Detection::Detection()
{
    m_width = 0;
    m_height = 0;
    m_inpWidth = 320;
    m_inpHeight = 320;
    m_confThro = 0.25;
    m_NMSThro = 0.4;

    ReadModel();
}

Detection::~Detection(){};

void Detection::Dump()
{
    m_outs.clear();
    m_boxes.clear();
    m_confs.clear();
    m_classIds.clear();
    m_perfIndx.clear();
}

void Detection::Initalize(int width, int height)
{
    m_width = width;
    m_height = height;
}

void Detection::ReadModel()
{
    string classesFile = "/home/lqz/catkin_ws/src/detection_pkg/data/coco.names";
    string modelConfig = "/home/lqz/catkin_ws/src/detection_pkg/data/yolov4-tiny.cfg";
    string modelWeights = "/home/lqz/catkin_ws/src/detection_pkg/data/yolov4-tiny.weights";
    ifstream ifs(classesFile.c_str());
    string line;
    while (getline(ifs, line))
        m_classes.push_back(line);
    m_model = readNetFromDarknet(modelConfig, modelWeights);
    m_model.setPreferableBackend(DNN_BACKEND_OPENCV);
    m_model.setPreferableTarget(DNN_TARGET_OPENCL);
}

bool Detection::Detecting(Mat frame)
{
    Dump();
    m_frame = frame.clone();
    blobFromImage(m_frame, m_blob, 1 / 255.0, Size(m_inpWidth, m_inpHeight));
    m_model.setInput(m_blob);
    m_model.forward(m_outs, GetOutputNames());
    PostProcess();
    Drawer();
    return true;
}

vector Detection::GetOutputNames()
{
    static vector names;
    if (names.empty())
    {
        vector outLayers = m_model.getUnconnectedOutLayers();
        vector layersNames = m_model.getLayerNames();
        names.resize(outLayers.size());
        for (int i = 0; i < outLayers.size(); ++i)
        {
            names[i] = layersNames[outLayers[i] - 1];
        }
    }
    return names;
}

void Detection::PostProcess()
{
    for (int num = 0; num < m_outs.size(); num++)
    {
        Point Position;
        double confidence;
        float *data = (float *)m_outs[num].data;
        for (int j = 0; j < m_outs[num].rows; j++, data += m_outs[num].cols)
        {
            Mat scores = m_outs[num].row(j).colRange(5, m_outs[num].cols);
            minMaxLoc(scores, 0, &confidence, 0, &Position);
            if (confidence > m_confThro)
            {
                int centerX = (int)(data[0] * m_width);
                int centerY = (int)(data[1] * m_height);
                int width = (int)(data[2] * m_width);
                int height = (int)(data[3] * m_height);
                int left = centerX - width / 2;
                int top = centerY - height / 2;

                m_classIds.push_back(Position.x);
                m_confs.push_back((float)confidence);
                m_boxes.push_back(Rect(left, top, width, height));
            }
        }
    }
    NMSBoxes(m_boxes, m_confs, m_confThro, m_NMSThro, m_perfIndx);
}

void Detection::Drawer()
{
    for (int i = 0; i < m_perfIndx.size(); i++)
    {
        int idx = m_perfIndx[i];
        Rect box = m_boxes[idx];
        if (m_classIds[idx] == 0)
        {
            DrawBoxes(m_classIds[idx], m_confs[idx], box.x, box.y, box.x + box.width, box.y + box.height);
        }
    }
}

void Detection::DrawBoxes(int classId, float conf, int left, int top, int right, int bottom)
{
    rectangle(m_frame, Point(left, top), Point(right, bottom), Scalar(255, 178, 50), 3);
    string label = format("%.2f", conf);
    if (!m_classes.empty())
    {
        CV_Assert(classId < (int)m_classes.size());
        label = m_classes[classId] + ":" + label;
    }

    int baseLine;
    Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
    top = max(top, labelSize.height);
    rectangle(m_frame, Point(left, top - round(1.5 * labelSize.height)), Point(left + round(1.5 * labelSize.width), top + baseLine), Scalar(255, 255, 255), FILLED);
    putText(m_frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 0, 0), 1);
}

Mat Detection::GetFrame()
{
    return m_frame;
}

string Detection::GetResult()
{
    string result = "";

    //获取所有最佳检测框信息
    for (int i = 0; i < m_perfIndx.size(); i++)
    {
        int idx = m_perfIndx[i];
        Rect box = m_boxes[idx];

        // std::cout << "m_classIds[idx]"<< m_classIds[idx] << std::endl;
        // std::cout << "m_confs[idx]"<< m_confs[idx] << std::endl;
        // std::cout << "box.x"<< box.x << std::endl;
        // std::cout << "box.y"<< box.y << std::endl;
        // std::cout << "box.width"<< box.width << std::endl;
        // std::cout << "box.height"<< box.height << std::endl;

        if (m_classIds[idx] == 0) //仅person类
        {
            stringstream strStream;
            // int、float类型都可以塞到stringstream中
            strStream << m_classIds[idx] << "," << m_confs[idx] << "," << box.x << "," << box.y << "," << box.width << "," << box.height;
            result = strStream.str();
        }
    }

    return result;
}

int Detection::GetResWidth(){
    return m_width;
}

int Detection::GetResHeight(){
    return m_height;
}

5.修改detection_pkg里的CMakeList.txt

cmake_minimum_required(VERSION 3.0.2)
project(detection_pkg)

## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)

## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
  cv_bridge
  roscpp
  rospy
  std_msgs
)

## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)


## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()

################################################
## Declare ROS messages, services and actions ##
################################################

## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
##   your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
##   * add a build_depend tag for "message_generation"
##   * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
##   * If MSG_DEP_SET isn't empty the following dependency has been pulled in
##     but can be declared for certainty nonetheless:
##     * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
##   * add "message_generation" and every package in MSG_DEP_SET to
##     find_package(catkin REQUIRED COMPONENTS ...)
##   * add "message_runtime" and every package in MSG_DEP_SET to
##     catkin_package(CATKIN_DEPENDS ...)
##   * uncomment the add_*_files sections below as needed
##     and list every .msg/.srv/.action file to be processed
##   * uncomment the generate_messages entry below
##   * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)

## Generate messages in the 'msg' folder
# add_message_files(
#   FILES
#   Message1.msg
#   Message2.msg
# )

## Generate services in the 'srv' folder
# add_service_files(
#   FILES
#   Service1.srv
#   Service2.srv
# )

## Generate actions in the 'action' folder
# add_action_files(
#   FILES
#   Action1.action
#   Action2.action
# )

## Generate added messages and services with any dependencies listed here
# generate_messages(
#   DEPENDENCIES
#   std_msgs
# )

################################################
## Declare ROS dynamic reconfigure parameters ##
################################################

## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
##   * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
##   * add "dynamic_reconfigure" to
##     find_package(catkin REQUIRED COMPONENTS ...)
##   * uncomment the "generate_dynamic_reconfigure_options" section below
##     and list every .cfg file to be processed

## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
#   cfg/DynReconf1.cfg
#   cfg/DynReconf2.cfg
# )

###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
#  INCLUDE_DIRS include
#  LIBRARIES detection_pkg
  CATKIN_DEPENDS cv_bridge roscpp rospy std_msgs
#  DEPENDS system_lib
)

###########
## Build ##
###########

## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
  ${catkin_INCLUDE_DIRS}
)

## Declare a C++ library
# add_library(${PROJECT_NAME}
#   src/${PROJECT_NAME}/detection_pkg.cpp
# )

## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})

## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/detection_pkg_node.cpp)

## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")

## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})

## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
#   ${catkin_LIBRARIES}
# )

#############
## Install ##
#############

# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html

## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# catkin_install_python(PROGRAMS
#   scripts/my_python_script
#   DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )

## Mark executables for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
# install(TARGETS ${PROJECT_NAME}_node
#   RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )

## Mark libraries for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
# install(TARGETS ${PROJECT_NAME}
#   ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
#   LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
#   RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
# )

## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
#   DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
#   FILES_MATCHING PATTERN "*.h"
#   PATTERN ".svn" EXCLUDE
# )

## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
#   # myfile1
#   # myfile2
#   DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )

#############
## Testing ##
#############

## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_detection_pkg.cpp)
# if(TARGET ${PROJECT_NAME}-test)
#   target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()

## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

set(DETECTION Detection.cpp)
add_executable(yolo4 main.cpp ${DETECTION})
find_package(OpenCV REQUIRED)
include_directories(${OpenCV_INCLUDE_DIRS})
target_link_libraries(yolo4 ${OpenCV_LIBS})
target_link_libraries(yolo4 ${catkin_LIBRARIES})

6.正常编译

lqz@lqz-PC:~/ros_ws$ catkin_make
Base path: /home/lqz/ros_ws
Source space: /home/lqz/ros_ws/src
Build space: /home/lqz/ros_ws/build
Devel space: /home/lqz/ros_ws/devel
Install space: /home/lqz/ros_ws/install
####
#### Running command: "cmake /home/lqz/ros_ws/src -DCATKIN_DEVEL_PREFIX=/home/lqz/ros_ws/devel -DCMAKE_INSTALL_PREFIX=/home/lqz/ros_ws/install -G Unix Makefiles" in "/home/lqz/ros_ws/build"
####
-- Using CATKIN_DEVEL_PREFIX: /home/lqz/ros_ws/devel
-- Using CMAKE_PREFIX_PATH: /opt/ros/noetic
-- This workspace overlays: /opt/ros/noetic
-- Found PythonInterp: /usr/bin/python3 (found suitable version "3.7.3", minimum required is "3") 
-- Using PYTHON_EXECUTABLE: /usr/bin/python3
-- Using Debian Python package layout
-- Using empy: /usr/lib/python3/dist-packages/em.py
-- Using CATKIN_ENABLE_TESTING: ON
-- Call enable_testing()
-- Using CATKIN_TEST_RESULTS_DIR: /home/lqz/ros_ws/build/test_results
-- Forcing gtest/gmock from source, though one was otherwise available.
-- Found gtest sources under '/usr/src/googletest': gtests will be built
-- Found gmock sources under '/usr/src/googletest': gmock will be built
-- Found PythonInterp: /usr/bin/python3 (found version "3.7.3") 
-- Using Python nosetests: /usr/bin/nosetests3
-- catkin 0.8.10
-- BUILD_SHARED_LIBS is on
-- BUILD_SHARED_LIBS is on
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- ~~  traversing 5 packages in topological order:
-- ~~  - opencv_tests
-- ~~  - vision_opencv (metapackage)                                                                          
-- ~~  - cv_bridge
-- ~~  - detection_pkg                                                                                        
-- ~~  - image_geometry                                                                                       
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~                                                          
-- +++ processing catkin package: 'opencv_tests'
-- ==> add_subdirectory(vision_opencv/opencv_tests)
-- +++ processing catkin metapackage: 'vision_opencv'
-- ==> add_subdirectory(vision_opencv/vision_opencv)
-- +++ processing catkin package: 'cv_bridge'
-- ==> add_subdirectory(vision_opencv/cv_bridge)
-- Found PythonLibs: /usr/lib/x86_64-linux-gnu/libpython3.7m.so (found version "3.7.3") 
-- Found Boost: /usr/include (found version "1.67.0") found components: python37 
-- Found OpenCV: /usr/local (found suitable version "4.5.4", minimum required is "4") found components: opencv_core opencv_imgproc opencv_imgcodecs 
-- Found PythonLibs: /usr/lib/x86_64-linux-gnu/libpython3.7m.so (found suitable version "3.7.3", minimum required is "3.7") 
-- +++ processing catkin package: 'detection_pkg'
-- ==> add_subdirectory(detection_pkg)
-- Found OpenCV: /usr/local (found version "4.5.4") 
-- +++ processing catkin package: 'image_geometry'
-- ==> add_subdirectory(vision_opencv/image_geometry)
-- Configuring done
-- Generating done
-- Build files have been written to: /home/lqz/ros_ws/build
####
#### Running command: "make -j12 -l12" in "/home/lqz/ros_ws/build"
####
Scanning dependencies of target std_msgs_generate_messages_eus
Scanning dependencies of target std_msgs_generate_messages_py
Scanning dependencies of target geometry_msgs_generate_messages_cpp
Scanning dependencies of target geometry_msgs_generate_messages_nodejs
Scanning dependencies of target sensor_msgs_generate_messages_nodejs
Scanning dependencies of target geometry_msgs_generate_messages_eus
Scanning dependencies of target std_msgs_generate_messages_cpp
Scanning dependencies of target sensor_msgs_generate_messages_cpp
Scanning dependencies of target sensor_msgs_generate_messages_lisp
Scanning dependencies of target sensor_msgs_generate_messages_py
Scanning dependencies of target geometry_msgs_generate_messages_py
Scanning dependencies of target geometry_msgs_generate_messages_lisp
[  0%] Built target std_msgs_generate_messages_eus
[  0%] Built target geometry_msgs_generate_messages_nodejs
[  0%] Built target geometry_msgs_generate_messages_eus
[  0%] Built target sensor_msgs_generate_messages_cpp
[  0%] Built target geometry_msgs_generate_messages_cpp
[  0%] Built target sensor_msgs_generate_messages_nodejs
[  0%] Built target std_msgs_generate_messages_py
[  0%] Built target sensor_msgs_generate_messages_lisp
[  0%] Built target std_msgs_generate_messages_cpp
[  0%] Built target geometry_msgs_generate_messages_py
[  0%] Built target sensor_msgs_generate_messages_py
[  0%] Built target geometry_msgs_generate_messages_lisp
Scanning dependencies of target sensor_msgs_generate_messages_eus
Scanning dependencies of target std_msgs_generate_messages_nodejs
Scanning dependencies of target std_msgs_generate_messages_lisp
[  0%] Built target std_msgs_generate_messages_nodejs
[  0%] Built target sensor_msgs_generate_messages_eus
[  0%] Built target std_msgs_generate_messages_lisp
Scanning dependencies of target image_geometry
Scanning dependencies of target cv_bridge
[ 16%] Building CXX object vision_opencv/image_geometry/CMakeFiles/image_geometry.dir/src/pinhole_camera_model.cpp.o                                                                                                        
[ 16%] Building CXX object vision_opencv/image_geometry/CMakeFiles/image_geometry.dir/src/stereo_camera_model.cpp.o                                                                                                         
[ 33%] Building CXX object vision_opencv/cv_bridge/src/CMakeFiles/cv_bridge.dir/rgb_colors.cpp.o
[ 33%] Building CXX object vision_opencv/cv_bridge/src/CMakeFiles/cv_bridge.dir/cv_bridge.cpp.o
[ 41%] Linking CXX shared library /home/lqz/ros_ws/devel/lib/libimage_geometry.so
[ 41%] Built target image_geometry
[ 50%] Linking CXX shared library /home/lqz/ros_ws/devel/lib/libcv_bridge.so
[ 50%] Built target cv_bridge
Scanning dependencies of target cv_bridge_boost
Scanning dependencies of target yolo4
[ 66%] Building CXX object vision_opencv/cv_bridge/src/CMakeFiles/cv_bridge_boost.dir/module_opencv4.cpp.o
[ 66%] Building CXX object vision_opencv/cv_bridge/src/CMakeFiles/cv_bridge_boost.dir/module.cpp.o
[ 83%] Building CXX object detection_pkg/CMakeFiles/yolo4.dir/Detection.cpp.o
[ 83%] Building CXX object detection_pkg/CMakeFiles/yolo4.dir/main.cpp.o
[ 91%] Linking CXX shared library /home/lqz/ros_ws/devel/lib/python3/dist-packages/cv_bridge/boost/cv_bridge_boost.so                                                                                                       
[ 91%] Built target cv_bridge_boost
[100%] Linking CXX executable /home/lqz/ros_ws/devel/lib/detection_pkg/yolo4
[100%] Built target yolo4

7.下载yolov4的cfg和weights,还有coco.names

mirrors / alexeyab / darknet · GitCode

进入cfg找到对应的下载就可以了,我用的是yolov4-tiny 所以下载了yolov4-tiny.cfg和https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v4_pre/yolov4-tiny.weights

和coco.names

然后把这些放到detection_pkg/data里

8.运行

## terminal 1

roscore

## terminal 2

cd ros_ws
source ./devel/setup.bash
rosrun detection_pkg yolo4


## terminal 3 

rosbag play xxxx.bag(含有人的一段视频的bag就好了)

9.修改camera,在far planner中运行

在main.cpp中,修改订阅节点摄像头的节点为

 ros::Subscriber sub = nh.subscribe("/camera/image",1,imageCallback);

接着就可以看到订阅到的yolov4的视频的啦~

gazebo模型库 

mirrors / osrf / gazebo_models · GitCode

在autonomous_exploration_development_environment-noetic的launch中选择打开gazebo

然后把walking person加入进去,如果需要会动的,可以看我往期的如何加入动态障碍物

你可能感兴趣的:(ROS学习,Far,planner,dnn,自动驾驶,深度学习)