记录下,以防下次又要重新配置
sudo apt-get update
sudo apt-get install cmake make gcc
uname -a
Linux l-ThinkStation-P340 5.4.0-122-generic #138~18.04.1-Ubuntu SMP Fri Jun 24 14:14:03 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux
dpkg -l | grep linux
sudo apt-mark hold linux-image-5.4.0-122-generic linux-headers-5.4.0-122-generic linux-hwe-5.4-headers-5.4.0-122 linux-modules-5.4.0-122-generic linux-modules-extra-5.4.0-122-generic
dpkg --get-selections | grep hold
linux-headers-5.4.0-122-generic hold
linux-hwe-5.4-headers-5.4.0-122 hold
linux-image-5.4.0-122-generic hold
linux-modules-5.4.0-122-generic hold
linux-modules-extra-5.4.0-122-generic hold
插件
sudo apt install ros-melodic-usb-cam
rosdep update一直不通过问题
mkdir build
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local/opencv34 ..
make -j16
sudo make install
安装torch1.10.2
对应cuda和cudnn
https://gist.github.com/ax3l/9489132
nvcc --version 查看cuda版本
set cuda 查看cuda位置
(1)下载安装文件
按需求下载cudnn的安装文件
(2)安装cudnn
解压下载的文件,可以看到cuda文件夹,在当前目录打开终端,执行如下命令:
sudo cp cuda/include/cudnn* /usr/local/cuda/include/
sudo cp cuda/lib64/cudnn* /usr/local/cuda/lib64/
sudo chmod a+r /usr/local/cuda/include/cudnn*
sudo chmod a+r /usr/local/cuda/lib64/cudnn*
cat /usr/local/cuda/include/cudnn_version.h | grep CUDNN_MAJOR -A 2 #查看到版本信息既是安装成功
升级cmake
参考博客
libtorch 源码安装
conda install astunparse numpy ninja pyyaml mkl mkl-include setuptools cmake cffi typing_extensions future six requests dataclasses
conda install -c pytorch magma-cuda110
git clone --recursive https://github.com/pytorch/pytorch
cd pytorch
# if you are updating an existing checkout
git submodule sync
git submodule update --init --recursive --jobs 0
# 选择自己需要的Pytorch版本,需要与anaconda安装的pytorch版本对应
git tag -l
git checkout v1.10.2
git submodule update --init --force --recursive
#编译 cmake最好做下升级
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
mkdir build
cd build
cmake ..
make -j16#时间还挺久的
sudo make install
#官方教程
export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"}
python setup.py install
# 编译成功后,会生成libtorch.so动态链接库,存储位置
动态链接库: /usr/local/lib/libtorch.so
功能包: /usr/local/share/cmake/Torch
测试程序
#include
#include
int main() {
torch::Tensor tensor = torch::rand({2, 3});
std::cout << tensor << std::endl;
}
cmake_minimum_required(VERSION 3.0 FATAL_ERROR)
project(example)
set(Torch_DIR /usr/share/libtorch/share/cmake/Torch)
set(OpenCV_DIR OpenCV_DIR /usr/local/opencv34/share/OpenCV)
find_package(Torch REQUIRED)
find_package(OpenCV REQUIRED)
message(STATUS "Pytorch status:")
message(STATUS " libraries: ${TORCH_LIBRARIES}")
message(STATUS "OpenCV library status:")
message(STATUS " version: ${OpenCV_VERSION}")
message(STATUS " libraries: ${OpenCV_LIBS}")
message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
add_executable(libtorch_test libtorch_test.cpp)
#target_link_libraries(libtorch_test ${TORCH_LIBRARIES})
target_link_libraries(libtorch_test ${TORCH_LIBRARIES} ${OpenCV_LIBS})
set_property(TARGET libtorch_test PROPERTY CXX_STANDARD 14)
If you want to disable CUDA support, export the environment variable USE_CUDA=0
参考博客pytorch的github
TorchVision provides an example project for how to use the models on C++ using JIT Script.
Installation From source:
mkdir build
cd build
# Add -DWITH_CUDA=on support for the CUDA if needed
cmake ..
make
make install
Once installed, the library can be accessed in cmake (after properly configuring CMAKE_PREFIX_PATH) via the TorchVision::TorchVision target:
cmake示例
find_package(TorchVision REQUIRED)
target_link_libraries(my-target PUBLIC TorchVision::TorchVision)