windows + vscdoe + opencv3.4.15
①通过python将训练得到的模型转化为onnx。
②通过C++调用onnx模型实现推理。
① python代码
ResUnet.py
import torch
from torch import nn
import torch.nn.functional as F
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, inChannels, outChannels, midChannels=None):
super().__init__()
if not midChannels:
midChannels = outChannels
self.doubleConv = nn.Sequential(
nn.Conv2d(inChannels, midChannels, kernel_size=3, padding=1),
nn.BatchNorm2d(midChannels),
nn.ReLU(inplace=True),
nn.Conv2d(midChannels, outChannels, kernel_size=3, padding=1),
nn.BatchNorm2d(outChannels),
nn.ReLU(inplace=True)
)
def forward(self, inNet):
return self.doubleConv(inNet)
class ResBlock(nn.Module):
def __init__(self, inChannels, outChannels):
super(ResBlock, self).__init__()
self.down1Sample = nn.Sequential(
nn.Conv2d(inChannels, outChannels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(outChannels))
self.doubleConv = DoubleConv(inChannels, outChannels)
self.down2Sample = nn.MaxPool2d(2)
self.relu = nn.ReLU()
def forward(self, inNet):
identity = self.down1Sample(inNet)
outNet = self.doubleConv(inNet)
outNet = self.relu(outNet + identity)
return self.down2Sample(outNet), outNet
class UpBlock(nn.Module):
def __init__(self, inChannels, outChannels):
super(UpBlock, self).__init__()
self.upSample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.doubleConv = DoubleConv(inChannels, outChannels)
def forward(self, downInput, skipInput):
inNet = self.upSample(downInput)
# input is CHW
dify = skipInput.size()[2] - inNet.size()[2]
difx = skipInput.size()[3] - inNet.size()[3]
inNet = F.pad(inNet, [difx // 2, difx - difx // 2,
dify // 2, dify - dify // 2])
inNet = torch.cat([inNet, skipInput], dim=1)
return self.doubleConv(inNet)
class OutConv(nn.Module):
def __init__(self, inChannels, outChannels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(inChannels, outChannels, kernel_size=1)
def forward(self, inNet):
return self.conv(inNet)
class ResUnet(nn.Module):
"""
Hybrid solution of resnet blocks and double conv blocks
"""
def __init__(self, inChannels=3, nClasses=1):
super(ResUnet, self).__init__()
self.downConv = nn.ModuleList()
neuronNum = [64, 128, 256, 512, 1024]
preChannels = inChannels
for num in neuronNum[0:-1]:
self.downConv.append(ResBlock(preChannels, num))
preChannels = num
self.doubleConv = DoubleConv(preChannels, neuronNum[-1])
self.upConv = nn.ModuleList()
for num1, num2 in zip(neuronNum[1::][::-1], neuronNum[0:-1][::-1]):
self.upConv.append(UpBlock(num1 + num2, num2))
self.convFinal = OutConv(num2, nClasses)
def forward(self, inNet):
skipOuts = []
for cnt, down in enumerate(self.downConv):
inNet, skipOut = down(inNet)
skipOuts.append(skipOut)
inNet = self.doubleConv(inNet)
for cnt, up in enumerate(self.upConv):
inNet = up(inNet, skipOuts[-1 - cnt])
outNet = self.convFinal(inNet)
return outNet
export.py
def export(ckpt):
model = ResUnet().to(DEVICE)
stateDict = torch.load(ckpt)['state_dict']
new_state_dict = OrderedDict()
for key, val in stateDict.items():
name = key[7:] # remove "module."
new_state_dict[name] = val
model.load_state_dict(new_state_dict)
model.eval()
inNet = torch.rand(1, 3, calSize[0], calSize[1]).to(DEVICE)
torch.onnx.export(model, inNet, modelOnnx, opset_version=11, verbose=True, export_params=True, do_constant_folding=True)
② C++代码:
#include
#include
#include
#include
#include
using namespace std;
using namespace cv;
using namespace cv::dnn;
cv::Scalar meanDefault {0.485, 0.456, 0.406};
cv::Scalar stdDefault {0.229, 0.224, 0.225};
std::vector extensions {"jpg", "bmp", "png", "jpeg"};
const int topk = 2;
static const string kWinName = "CT lung seg in OpenCV";
typedef struct
{
int index;
double value;
}sortArray;
cv::Mat Preprocess(cv::Mat pendImg)
{
cv::Mat postImg;
cv::resize(pendImg, postImg, cv::Size(512, 512));
postImg.convertTo(postImg, CV_32F, 1.0/255.0);
cv::subtract(postImg, meanDefault, postImg);
cv::divide(postImg, stdDefault, postImg);
return postImg;
}
bool cmp(sortArray a, sortArray b)
{
return a.value>b.value;
}
std::vector> Postprocess(cv::Mat pendMask)
{
cv::Mat bwImg;
std::vector> contours;
std::vector areas;
cv::threshold(pendMask, bwImg, 1, 255.0, CV_THRESH_BINARY | CV_THRESH_OTSU);
cv::findContours(bwImg, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
std::vector sortedArray(contours.size());
for(int i=0;i> nContours;
for (int i=0;i& imgFilenames)
{
// imgFilenames.clear();
if (path.find(".") != std::string::npos)
{
imgFilenames.push_back(path);
}
else
{
std::string fpath = path.append("*.*");
std::vector allfiles; //cv::String
cv::glob(fpath, allfiles);
for (int i = 0; i < allfiles.size(); i++)
{
size_t iPos = allfiles[i].rfind('.');
std::string fileEx = allfiles[i].substr(iPos + 1, allfiles[i].length());
// cout << fileEx << endl;
if (std::find(extensions.begin(), extensions.end(), fileEx) != extensions.end())
{
imgFilenames.push_back(allfiles[i]);
}
}
}
// return;
}
cv::Mat Seg(std::string modelPath, std::string imgPath){
// Net net = cv::dnn::readNetFromONNX(modelPath);
cv::dnn::Net net = cv::dnn::readNet(modelPath);
net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV);
net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU);
cv::Mat Oimg = imread(imgPath);
cv::Mat img = Preprocess(Oimg.clone());
cv::Mat blob = cv::dnn::blobFromImage(img, 1.0, Size(512, 512), cv::Scalar(0, 0, 0), true, false);
net.setInput(blob);
cv::Mat predOut = net.forward();
std::vector predTmp;
cv::dnn::imagesFromBlob(predOut, predTmp);
cv::Mat predMask;
predTmp[0] = (predTmp[0]>0);
predTmp[0].convertTo(predMask, CV_8UC1);
return predMask;
}
int main()
{
std::string modelBin = "weights/ctSeg.onnx";
std::string path = "imgs/";
std::vector imgfiles;
GetImgFilenames(path, imgfiles);
for (int i=0; i> nContours = Postprocess(predMask);
for (int i = 0; i < nContours.size(); i++)
{
cv::drawContours(imgShow, nContours, i, Scalar(0, 255, 0), 2, 8);
}
cv::imshow("iShow", imgShow);
cv::waitKey(0);
}
return 0;
}
链接:https://pan.baidu.com/s/1DDweuwcpSubLotU79c-jFw
提取码:ZDWD
注:刚接触深度学习完成的模型,所以采用了当时比较常见的网络,网络模型偏大。