CenterNet(Objects as points)开源代码:https://github.com/xingyizhou/CenterNet
代码结构:
CenterNer-master
|
|--data # 数据存放
|
|--models # 训练好的模型
|
|--src # 源码
我们主要看源码结构:
src # 源码结构
|
|-- lib # 本项目的lib
|
|-- tools # 使用的工具
|
|-- _init_path.py # 将lib加入sys.path, 使得调用库的第一顺位目录为本项目的lib
|
|-- demo.py # 给出的方便实用的demo
|
|-- main.py # 整体流程
|
|-- test.py # 显然,是test
lib结构:
lib # 本部分是作者自己写的模块
|
|-- datasets # 构建dataset
|
|-- detectors # 构建detector
|
|-- external # 引入的外部库,如nms
|
|-- models # 网络模型
|
|-- trains # 训练过程
|
|-- utils # 工具:如image_augumentation
|
|-- logger.py # 日志记录
|
|-- opt.py # 定义和处理命令行参数
tools结构
tools # 大部分是各数据集的验证模块以及处理工具
|
|-- kitti_aval # kitti数据集的验证
|
|-- voc_eval_lib
|
|-- _init_path # 将lib加入sys.path
|
|-- calc_coco_overlap.py # 计算IoU相关
|
|-- convert_hourglass_weight.py
|
|-- convert_kitti_to_coco.py
|
|-- eval_coco.py
|
|-- eval_coco_hp.py
|
|-- merge_pascal_json.py
|
|-- reval.py
|
|-- vis_pred.py
我们跟着main.py熟悉整个CenterNet的运行逻辑:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import torch
import torch.utils.data
from opts import opts
from models.model import create_model, load_model, save_model
from models.data_parallel import DataParallel
from logger import Logger
from datasets.dataset_factory import get_dataset
from trains.train_factory import train_factory
def main(opt):
torch.manual_seed(opt.seed)
torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
Dataset = get_dataset(opt.dataset, opt.task)
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
logger = Logger(opt)
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
start_epoch = 0
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.resume, opt.lr, opt.lr_step)
Trainer = train_factory[opt.task]
trainer = Trainer(opt, model, optimizer)
trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
print('Setting up data...')
val_loader = torch.utils.data.DataLoader(
Dataset(opt, 'val'),
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
if opt.test:
_, preds = trainer.val(0, val_loader)
val_loader.dataset.run_eval(preds, opt.save_dir)
return
train_loader = torch.utils.data.DataLoader(
Dataset(opt, 'train'),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True,
drop_last=True
)
print('Starting training...')
best = 1e10
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
mark = epoch if opt.save_all else 'last'
log_dict_train, _ = trainer.train(epoch, train_loader)
logger.write('epoch: {} |'.format(epoch))
for k, v in log_dict_train.items():
logger.scalar_summary('train_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
epoch, model, optimizer)
with torch.no_grad():
log_dict_val, preds = trainer.val(epoch, val_loader)
for k, v in log_dict_val.items():
logger.scalar_summary('val_{}'.format(k), v, epoch)
logger.write('{} {:8f} | '.format(k, v))
if log_dict_val[opt.metric] < best:
best = log_dict_val[opt.metric]
save_model(os.path.join(opt.save_dir, 'model_best.pth'),
epoch, model)
else:
save_model(os.path.join(opt.save_dir, 'model_last.pth'),
epoch, model, optimizer)
logger.write('\n')
if epoch in opt.lr_step:
save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
epoch, model, optimizer)
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
logger.close()
if __name__ == '__main__':
opt = opts().parse()
main(opt)