深度学习中用到的一些函数
-
-
-
- 1.zip文件解压到任意路径下(python)
- 2.遍历文件夹下的内容,根据后缀名进行筛选,只保存除后缀名外的名字(python)
- 3.设置使用断点续训(python)
- 4.优化器设置及学习率衰减策略(python)
- 5.Tensorboard可视化代码(python)
- 6.模型训练及测试及Tensorboard可视化的主要步骤(python)
1.zip文件解压到任意路径下(python)
import zipfile
f = zipfile.ZipFile("mobilenet-v4-torch-AI.zip",'r')
for file in f.namelist():
f.extract(file,"work/")
f.close()
2.遍历文件夹下的内容,根据后缀名进行筛选,只保存除后缀名外的名字(python)
import os
path = r"G:\pycharm"
name=[]
for i in os.listdir(path):
if i.endswith(".png"):
(file_name, extension) = os.path.splitext(i)
name.append(file_name)
print(name)
3.设置使用断点续训(python)
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--resume', default='', type=str, help='resume from checkpoint')
save_files = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch}
torch.save(save_files, "./save_weights/model_{}.pth".format(epoch))
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
4.优化器设置及学习率衰减策略(python)
if__name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--lr-steps', default=[5, 8], nargs='+', type=int,help='decrease lr every step-size epochs')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
args = parser.parse_args()
model = xxxx()
model.to(device)
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
5.Tensorboard可视化代码(python)
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
writer = SummaryWriter("tensorboard")
writer.add_scalar('y=2x', i * 2, i)
6.模型训练及测试及Tensorboard可视化的主要步骤(python)
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
writer = SummaryWriter("tensorboard")
if__name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
parser.add_argument('--epochs', default=10, type=int, metavar='N',help='number of total epochs to run')
args = parser.parse_args()
for epoch in range(args.start_epoch, args.epochs):
model.train()
train_loss = 0.0
train_bar = tqdm(train_loader, file=sys.stdout)
for step, data in enumerate(train_bar):
images, labels = data
optimizer.zero_grad()
logits = model(images.to(device))
loss = loss_function(logits, labels.to(device))
loss.backward()
optimizer.step()
train_loss += loss.item()
train_bar.desc = "train epoch[{}/{}] loss:{:.3f}".format(epoch + 1, epochs, loss)
model.eval()
acc = 0.0
with torch.no_grad():
val_bar = tqdm(validate_loader, file=sys.stdout)
for val_data in val_bar:
val_images, val_labels = val_data
outputs = model(val_images.to(device))
predict_y = torch.max(outputs, dim=1)[1]
acc += torch.eq(predict_y, val_labels.to(device)).sum().item()
val_bar.desc = "valid epoch[{}/{}]".format(epoch + 1, epochs)
val_accurate = acc / val_num
print('[epoch %d] train_loss: %.3f val_accuracy: %.3f' %(epoch + 1, train_loss / train_steps, val_accurate))
writer.add_scalar('train loss', train_loss/ train_steps, epoch)
writer.add_scalar('val accurate', val_accurate, epoch)
if val_accurate > best_acc:
best_acc = val_accurate
torch.save(model.state_dict(), save_path)