pytorch 版本和libtorch版本一致,
原有的环境pytorch 1.6.0,这里采用pytorch 1.7.0
win10 cuda 版本 11.0
nvcc -V
conda create -n sotcuda11 --clone sot
conda activate sotcuda11
conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=11.0 -c pytorch
具体介绍看:https://blog.csdn.net/weixin_41449637/article/details/120036685
import torch
import torchvision
print(torch.__version__)
# An instance of your model.
model = torchvision.models.resnet18()
# An example input you would normally provide to your model's forward() method.
example = torch.rand(1, 3, 224, 224)
# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(model, example)
traced_script_module.save("model.pt")
netron查看导出模型的pytorch版本号,pytorch版本1.7.0,torchscript 1.6
#include // One-stop header.
#include
#include
int main() {
// Deserialize the ScriptModule from a file using torch::jit::load().
torch::jit::script::Module module = torch::jit::load("E:\\vspro\\mysiam\\myLibtorch\\models\\model.pt");
assert(module != nullptr);
std::cout << "ok\n";
// Create a vector of inputs.
std::vector inputs;
inputs.push_back(torch::ones({ 1, 3, 224, 224 }));
// Execute the model and turn its output into a tensor.
at::Tensor output = module->forward(inputs).toTensor();
std::cout << output.slice(/*dim=*/1, /*start=*/0, /*end=*/5) << '\n';
while (1);
}
note:
E:\vspro\mysiam\myLibtorch\libtorch_170_cuda11\libtorch\share\cmake\Torch
有cmake两个文件
----------------------------------------------------------分割线-----------------------------------------------------------
GPU 版本的话,编译方式采用cmake编译哈,
cmakelist.txt内容如下:指定了opencv的路径,libtorch 的路径没有指定,在cmake gui编译的时候指定,编译通过后,用vs2019打开.sln文件,重新编译,设置的注意事项参考上面
```cpp
cmake_minimum_required(VERSION 3.12 FATAL_ERROR)
project(myresnet)
find_package(Torch REQUIRED)
set(OpenCV_DIR E:/OpenCV/opencv/build)
find_package(OpenCV REQUIRED)
if(NOT Torch_FOUND)
message(FATAL_ERROR "Pytorch Not Found!")
endif(NOT Torch_FOUND)
message(STATUS "Pytorch status:")
message(STATUS " libraries: ${TORCH_LIBRARIES}")
message(STATUS "OpenCV library status:")
message(STATUS " version: ${OpenCV_VERSION}")
message(STATUS " libraries: ${OpenCV_LIBS}")
message(STATUS " include path: ${OpenCV_INCLUDE_DIRS}")
add_executable(myresnet main.cpp)
target_link_libraries(myresnet ${TORCH_LIBRARIES} ${OpenCV_LIBS})
set_property(TARGET myresnet PROPERTY CXX_STANDARD 11)
验证GPU版本的代码,main.cpp
#include
#include
#include
#include
#include
#include // One-stop header
int main() {
std::cout << "cuda是否可用:" << torch::cuda::is_available() << std::endl;
std::cout << "cudnn是否可用:" << torch::cuda::cudnn_is_available() << std::endl;
// Deserialize the ScriptModule from a file using torch::jit::load().
torch::jit::script::Module module = torch::jit::load("E:\\vspro\\mysiam\\myLibtorch\\models\\model.pt");
module.to(torch::DeviceType::CUDA);
//assert(module != nullptr);
//std::cout << "ok\n";
// Create a vector of inputs.
std::vector<torch::jit::IValue> inputs;
inputs.push_back(torch::ones({ 1, 3, 224, 224 }).to(torch::DeviceType::CUDA));
// Execute the model and turn its output into a tensor.
auto output = module.forward(inputs).toTensor();
std::cout << output.device() << std::endl;
std::cout << output.to(torch::DeviceType::CPU).slice(1,0,0) << '\n';
return 0;
}
#include
note:上面这行代码的路径在
myLibtorch\libtorch_170_cuda11\libtorch\include\torch\csrc\api\include\torch
而 #include
myLibtorch\libtorch_170_cuda11\libtorch\include\torch