点云深度学习--制作自己的PointConv数据集modelnet-40

点云深度学习--制作自己的PointConv数据集modelnet-40

    • 一、论文及程序地址
    • 二、运行环境
    • 三、生成PCD文件
    • 四、将PCD文件修改为符合规范的txt文件
    • 五、将生成的数据放进至源数据集
    • 六、实验验证

一、论文及程序地址

论文原文
PointConv: Deep Convolutional Networks on 3D Point Clouds

原数据集模板
modelnet40_normal_resampled

二、运行环境

硬件:i7-6700HQ、GTX960M-2G
软件:Ubuntu18.04、C++11、PCL1.9.1,OpenCV4.4.0

三、生成PCD文件

pcd shengcheng/2.cpp

使用Azure Kinect、RealSense等深度相机获取深度图以及RGB图,本实验以Azure Kinect为例,使用到的是Azure Kinect DK。

运行程序命令

cd build
cmake ..
make -j4
./2

CMakeLists.txt

cmake_minimum_required(VERSION 2.6)

project(2)

set(CMAKE_CXX_FLAGS   "-std=c++11")

find_package(OpenCV  REQUIRED)

find_package(k4a REQUIRED)
 

include_directories(${OpenCV_INCLUDE_DIRS})

add_executable(2 2.cpp)

target_link_libraries(2 ${OpenCV_LIBS}  k4a::k4a)

find_package(PCL 1.2 REQUIRED)

include_directories(${PCL_INCLUDE_DIRS})
link_directories(${PCL_LIBRARY_DIRS})
add_definitions(${PCL_DEFINITIONS})

target_link_libraries (2 ${PCL_LIBRARIES})



install(TARGETS 2 RUNTIME DESTINATION bin)

2.cpp

#include   //c++
#include 
#include 
#include 
#include 
#include  //滤波器库

#include 
#include 
#include 
#include 

using namespace pcl;

using namespace std;
using namespace cv;
int main()
{


  k4a::device device = k4a::device::open(K4A_DEVICE_DEFAULT);
  

  k4a_device_configuration_t config = K4A_DEVICE_CONFIG_INIT_DISABLE_ALL; // chuan gan qi pei zhi
  config.camera_fps = K4A_FRAMES_PER_SECOND_30;
  config.color_format = K4A_IMAGE_FORMAT_COLOR_BGRA32;
  config.color_resolution = K4A_COLOR_RESOLUTION_720P;
  config.depth_mode = K4A_DEPTH_MODE_NFOV_UNBINNED; //K4A_DEPTH_MODE_WFOV_2X2BINNED  K4A_DEPTH_MODE_NFOV_UNBINNED
  config.synchronized_images_only = true;
  device.start_cameras(&config);
  k4a::capture capture;
  Mat cv_rgbImage_with_alpha;
  Mat cv_rgbImage_no_alpha;
  Mat cv_depth;
  Mat cv_depth_8U;
  k4a::image rgbImage;
  k4a::image depthImage;
  k4a::image irImage;
  k4a::image transformed_depthImage;

  k4a::calibration k4aCalibration = device.get_calibration(config.depth_mode, config.color_resolution);
  k4a::transformation k4aTransformation = k4a::transformation(k4aCalibration);

 

  for (int it = 1; it < 101; it++)//循环次数也是录取数据集的点云pcd文件个数
  { 
     pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = boost::shared_ptr<pcl::PointCloud<PointXYZ>>(new pcl::PointCloud<pcl::PointXYZ>); // 这个不能放在循环外,否则点云会层层覆盖
    if (device.get_capture(&capture)) 
    {

      rgbImage = capture.get_color_image();
      depthImage = capture.get_depth_image();
      transformed_depthImage = k4aTransformation.depth_image_to_color_camera(depthImage);
    
      cv_depth = cv::Mat(transformed_depthImage.get_height_pixels(), transformed_depthImage.get_width_pixels(), CV_16U, (void *) transformed_depthImage.get_buffer(), static_cast<size_t>(depthImage.get_stride_bytes()));
      cv_depth.convertTo(cv_depth_8U, CV_8U, 1);
      cv_rgbImage_with_alpha = cv::Mat(rgbImage.get_height_pixels(), rgbImage.get_width_pixels(), CV_8UC4, (void *) rgbImage.get_buffer());
      cv::cvtColor(cv_rgbImage_with_alpha, cv_rgbImage_no_alpha, cv::COLOR_BGRA2BGR);
      cv::imshow("color", cv_rgbImage_no_alpha);
      waitKey(5);
      //cv::cvtColor(cv_rgbImage_with_alpha, cv_rgbImage_no_alpha, cv::COLOR_BGRA2BGR);
    

    

      int color_image_width_pixels = rgbImage.get_width_pixels();
      int color_image_height_pixels = rgbImage.get_height_pixels();
      k4a::image transformed_depth_image = NULL;
      transformed_depth_image = k4a::image::create(K4A_IMAGE_FORMAT_DEPTH16, color_image_width_pixels, color_image_height_pixels, color_image_width_pixels * (int)sizeof(uint16_t));
      k4a::image point_cloud_image = NULL;
      point_cloud_image = k4a::image::create(K4A_IMAGE_FORMAT_CUSTOM, color_image_width_pixels, color_image_height_pixels, color_image_width_pixels * 3 * (int)sizeof(int16_t));
      k4aTransformation.depth_image_to_color_camera(depthImage, &transformed_depth_image);
      k4aTransformation.depth_image_to_point_cloud(transformed_depth_image, K4A_CALIBRATION_TYPE_COLOR, &point_cloud_image);

    /*  k4a_calibration_intrinsic_parameters_t *intrinsics = &k4aCalibration.color_camera_calibration.intrinsics.parameters;  // 内参矩阵

      cout << "camera.fx: "<< intrinsics->param.fx << endl;
      cout << "camera.cx: "<< intrinsics->param.cx << endl;
      cout << "camera.fy: "<< intrinsics->param.fy << endl;
      cout << "camera.cy: "<< intrinsics->param.cy << endl;
    */

      int width = rgbImage.get_width_pixels();
      int height = rgbImage.get_height_pixels();

//      cloud->width = width;
//      cloud->height = height;
//      cloud->is_dense=false;
//      cloud->points.resize(cloud->height * cloud->width);

      int16_t *point_cloud_image_data = (int16_t *)(void *)point_cloud_image.get_buffer();
      uint8_t *color_image_data = rgbImage.get_buffer();
      for(int i = 0; i < width * height; ++i)
      {
        PointXYZ point;

        point.z = point_cloud_image_data[3 * i + 2]/ 1000.0f;
        if (point.z == 0)
        {
         continue;
        }
        point.x = point_cloud_image_data[3 * i + 0]/ 1000.0f;
        point.y = point_cloud_image_data[3 * i + 1]/ 1000.0f;
        cloud->points.push_back(point); //放到点云中
      }
      cloud->height=1;
      cloud->width=cloud->points.size();
      cloud->is_dense=false;

      PointCloud<PointXYZ>::Ptr cloud_filtered(new PointCloud<PointXYZ>);
      pcl:: VoxelGrid<pcl::PointXYZ> sor;
      sor.setInputCloud(cloud); //设置输入点云
      sor.setLeafSize(0.03f,0.03f,0.03f);//设置创建体素为0.5cm的立方体
      sor.filter(*cloud_filtered); //存储

      std::string str1,str_name,str_filelist;
      char buffer[5];
      std::sprintf(buffer,"%03d",it);
      cout<< it << endl;
      std::string str2=buffer;
      str1=("./002/002_"+str2+".pcd");
      str_name=("002_"+str2);

      
      ofstream out("./002/name.txt",ios::app);   //1.txt为fps的输出文件
      out<<str_name<<endl;
      out.close();

      str_filelist=("002/002_"+str2+".txt");
      ofstream out2("./002/list.txt",ios::app);
      out2<<str_filelist<<endl;
      out2.close();

      io::savePCDFile(str1,*cloud_filtered);
    }
  }
  //k4aTransformation.destroy();
  device.close();
  return 0;
}

运行结果

文件保存在build/001或者build/002文件夹下,共有三种文件类型。
1.pcd文件共计100个
在这里插入图片描述
2.list.txt
点云深度学习--制作自己的PointConv数据集modelnet-40_第1张图片
3.name.txt
点云深度学习--制作自己的PointConv数据集modelnet-40_第2张图片

四、将PCD文件修改为符合规范的txt文件

原始数据集每行有6个数据,为x、y、z、normalx、normaly、normalz,并分别用逗号隔开,相比于PCD文件没有文件头。CMakelists文件通用,暂不赘述。
zhizuoshujuji/2.cpp

2.cpp

#include 
#include 
#include 
#include 
#include 
#include 
#include 
using namespace std;
 
string itos(int i)
{
    ostringstream str;
    str << i;
    return str.str();
}
 
int main()
{
    for(int it = 1; it <101;it++)
    {
        char buffer[5];
        std::sprintf(buffer,"%03d",it);
        std::string str2=buffer;
        string openname = ("./002/002_"+str2+".pcd");  //我这边测试的在E盘 你自己修改文件保存路径
        string savename = ("./0002/002_"+str2+".txt");
        fstream file(openname.c_str());
        ofstream outfile(savename,ios::out|ios::trunc);
 
        int count = 0;
        while(!file.eof())
        {
            string line;
            getline(file,line);
            if(count > 10)
            {
                int pos;
                pos = line.find(" ");
                while(pos != -1){
                 // str.length()求字符的长度,注意str必须是string类型
                line.replace(pos,string(" ").length(),",");
                pos = line.find(" ");
                }
                outfile << line << endl;
            }
            count++;
        }
        outfile.close();
        file.close();
    }
    return 0;
}

运行结果

文件保存在build/0001或者build/0002文件夹下,pcd文件共计100个
点云深度学习--制作自己的PointConv数据集modelnet-40_第3张图片
txt文件内容
点云深度学习--制作自己的PointConv数据集modelnet-40_第4张图片

五、将生成的数据放进至源数据集

将0001和0002内的txt点云文件放进至pointconv_pytorch-master/data/modelnet40_normal_resampled下,并根据需要将生成的name.txt和list.txt放进至以下文件
在这里插入图片描述

六、实验验证

由于点云文件中点数不固定,为防止出现点数不足问题,将train_cls_conv.py和eval_cls_conv.py中的–num_point修改为512 ,结果如下:
点云深度学习--制作自己的PointConv数据集modelnet-40_第5张图片

你可能感兴趣的:(点云,深度学习,python,pytorch)