C++利用opencv调用pytorch训练好的分类模型

pytorch保存模型


import torch.onnx
 

d = torch.rand(1, 3, 224, 224,dtype=torch.float,device = 'cuda')
m = model_ft
o = model_ft(d)

onnx_path = "onnx_model_name.onnx"
torch.onnx.export(m, d, onnx_path)
 

C++调用

#include "opencv2/dnn/dnn.hpp"

using namespace cv;
using namespace cv::dnn;
using namespace std;

void Classification_good()
{
	// 装载模型,设置参数
	clock_t  st = clock();
	string model = "C:\\Users\\Ring\\Desktop\\A_jupyter\\pytorch\\test\\onnx_model_name.onnx";
	ClassificationModel dnn_model(model);
	dnn_model.setPreferableBackend(DNN_BACKEND_CUDA);
	dnn_model.setPreferableTarget(DNN_TARGET_CUDA);
	float scale = 1.0 / 255;
	int inpWidth = 224, inpHeight = 224;
	Scalar mean(0, 0, 0);
	dnn_model.setInputParams(scale, Size(inpWidth, inpHeight), mean, true, false);

	clock_t  end = clock();
	cout << end - st << endl;

	// 图像文件夹遍历检测
	String folder = "C:\\Users\\Ring\\Desktop\\A_jupyter\\pytorch\\test\\Neu\\val\\Rs/";
	vector imagePathList;
	glob(folder, imagePathList);

	cout << "test In C++!" << endl;
	for (int i = 0; i < imagePathList.size(); i++)
	{
		Mat img = imread(imagePathList[i]);
		resize(img, img, Size(224, 224), 0, 0, INTER_LANCZOS4);
		Mat img_t = Mat::zeros(img.size(), CV_32FC1);

		for (int ii = 0; ii < img.cols; ii++)
		{
			for (int jj = 0; jj < img.rows; jj++)
			{
				img_t.at(ii, jj) = img.at(ii, jj);
			}
		}

		int classIds;
		float confs;
		double time1 = static_cast(getTickCount());
		dnn_model.classify(img, classIds, confs);  // 前向推理,classIds是类别索引,classIds=0是划痕,classIds=1是颗粒
		double time2 = (static_cast(getTickCount()) - time1) / getTickFrequency();
		cout << classIds << endl;
		cout << "time: " << time2 << endl;
	}
}

 

 

训练模型

 

import numpy as np
import torchvision
from torchvision import datasets, transforms, models
import torch

import matplotlib.pyplot as plt
import time
import os
import copy
print("Torchvision Version: ",torchvision.__version__)
print('pytorch Version: ',torch.__version__)

data_dir = "./Neu"
batch_size = 32
input_size = 224

all_imgs = datasets.ImageFolder(os.path.join(data_dir, "train"),
                                transforms.Compose([
        transforms.RandomResizedCrop(input_size), #把每张图片变成resnet需要输入的维度224
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
    ]))
loader = torch.utils.data.DataLoader(all_imgs, batch_size=batch_size, shuffle=True, num_workers=4)

img = next(iter(loader))[0]

img[0][1].dtype

#plt展示torch的图片
unloader = transforms.ToPILImage()
plt.imshow(unloader(img[1].squeeze(0)))

data_transforms = {
    "train": transforms.Compose([
        transforms.RandomResizedCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    "val": transforms.Compose([
        transforms.Resize(input_size),
        transforms.CenterCrop(input_size),
        transforms.ToTensor(),
        #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

# Create training and validation datasets
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
#把迭代器存放到字典里作为value,key是train和val,后面调用key即可。

# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

inputs, labels=next(iter(dataloaders_dict["train"])) #一个batch
print(inputs.shape)
print(labels)



for inputs, labels in dataloaders_dict["train"]:
    print(labels.size()) #最后一个batch不足32

model_name = "resnet"
num_classes = 6
num_epochs = 10
feature_extract = True  #只更新修改的层

def set_parameter_requires_grad(model, feature_extracting):
    if feature_extracting:
        for param in model.parameters():
            param.requires_grad = False #提取的参数梯度不更新

#初始化model
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
    if model_name == "resnet":
        model_ft = models.resnet50(pretrained=use_pretrained) 
        #如果True,从imagenet上返回预训练的模型和参数
        
        set_parameter_requires_grad(model_ft, feature_extract)#提取的参数梯度不更新
        num_ftrs = model_ft.fc.in_features 
        #model_ft.fc是resnet的最后全连接层
        #(fc): Linear(in_features=512, out_features=1000, bias=True)
        #in_features 是全连接层的输入特征维度
        #print(num_ftrs)
        model_ft.fc = nn.Linear(num_ftrs, num_classes)
        #out_features=1000 改为 num_classes=2
        input_size = 224 #resnet18网络输入图片维度是224,resnet34,50,101,152也是
        
    return model_ft, input_size
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)
print(model_ft)

next(iter(model_ft.named_parameters()))

len(next(iter(model_ft.named_parameters())))

for name,param in model_ft.named_parameters():
    print(name) #看下都有哪些参数

model_ft = model_ft.to(device)
params_to_update = model_ft.parameters() #需要更新的参数
print("Params to learn:")
if feature_extract:
    params_to_update = [] #需要更新的参数存放在此
    for name,param in model_ft.named_parameters(): 
        #model_ft.named_parameters()有啥看上面cell
        if param.requires_grad == True: 
#这里要知道全连接层之前的层param.requires_grad == Flase
#后面加的全连接层param.requires_grad == True
            params_to_update.append(param)
            print("\t",name)
else: #否则,所有的参数都会更新
    for name,param in model_ft.named_parameters():
        if param.requires_grad == True:
            print("\t",name)

# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9) #定义优化器
# Setup the loss fxn
criterion = nn.CrossEntropyLoss() #定义损失函数

#训练测试合一起了
def train_model(model, dataloaders, criterion, optimizer, num_epochs=5):
    since = time.time()
    val_acc_history = [] 
    best_model_wts = copy.deepcopy(model.state_dict())#深拷贝上面resnet模型参数
#.copy和.deepcopy区别看这个:https://blog.csdn.net/u011630575/article/details/78604226 
    best_acc = 0.
    for epoch in range(num_epochs):
        print("Epoch {}/{}".format(epoch, num_epochs-1))
        print("-"*10)
        
        for phase in ["train", "val"]:
            running_loss = 0.
            running_corrects = 0.
            if phase == "train":
                model.train()
            else: 
                model.eval()
            
            for inputs, labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)
                
                with torch.autograd.set_grad_enabled(phase=="train"):
                    #torch.autograd.set_grad_enabled梯度管理器,可设置为打开或关闭
                    #phase=="train"是True和False,双等号要注意
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)
                    
                _, preds = torch.max(outputs, 1)
                #返回每一行最大的数和索引,prds的位置是索引的位置
                #也可以preds = outputs.argmax(dim=1)
                if phase == "train":
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    
                running_loss += loss.item() * inputs.size(0) #交叉熵损失函数是平均过的
                running_corrects += torch.sum(preds.view(-1) == labels.view(-1)).item()
                #.view(-1)展开到一维,并自己计算
            
            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects / len(dataloaders[phase].dataset)
       
            print("{} Loss: {} Acc: {}".format(phase, epoch_loss, epoch_acc))
            if phase == "val" and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                #模型变好,就拷贝更新后的模型参数
                
            if phase == "val":
                val_acc_history.append(epoch_acc) #记录每个epoch验证集的准确率
            
        print()
    
    time_elapsed = time.time() - since
    print("Training compete in {}m {}s".format(time_elapsed // 60, time_elapsed % 60))
    print("Best val Acc: {}".format(best_acc))
    
    model.load_state_dict(best_model_wts) #把最新的参数复制到model中
    return model, val_acc_history

# Train and evaluate
model_ft, ohist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs)

# Initialize the non-pretrained version of the model used for this run    初始化用于此运行的模型的未预训练版本
scratch_model,_ = initialize_model(model_name, 
                                   num_classes, 
                                   feature_extract=False, #所有参数都训练
                                   use_pretrained=False)# 不要imagenet的参数
scratch_model = scratch_model.to(device)
scratch_optimizer = optim.SGD(scratch_model.parameters(), 
                              lr=0.001, momentum=0.9)
scratch_criterion = nn.CrossEntropyLoss()
_,scratch_hist = train_model(scratch_model, 
                             dataloaders_dict, 
                             scratch_criterion, 
                             scratch_optimizer, 
                             num_epochs=num_epochs)



#保存模型——这个在opencv中可以被正确的读取和预测

import torch.onnx
 
import netron

d = torch.rand(1, 3, 224, 224,dtype=torch.float,device = 'cuda')
m = model_ft
o = model_ft(d)

onnx_path = "onnx_model_name.onnx"
torch.onnx.export(m, d, onnx_path)
 
netron.start(onnx_path)





 

你可能感兴趣的:(pytorch,Opencv)