注意
点击此处下载完整的示例代码
02. 在 COCO、VID、DET、Youtube_bb 数据集上训练 SiamRPN¶
这是一个使用 Gluon CV 工具包进行的单目标跟踪教程,是一个循序渐进的示例。读者应该具备深度学习基础知识并熟悉 Gluon API。新用户可以先阅读Gluon 60 分钟速成课程。您可以立即开始训练或深入探索。
立即开始训练¶
注意
您可以跳过本教程,因为训练脚本是完整独立的,可以直接启动。
下载 完整的 Python 脚本: train.py
下载 完整的 Python 脚本: test.py
示例训练命令
python train.py --ngpus 8 --epochs 50 --base-lr 0.005
示例测试命令
python test.py --model-path --results-path
请查阅模型动物园,以获取复现预训练模型的训练和测试命令。
网络结构¶
首先,让我们将必要的库导入 Python。
import mxnet as mx
import time
import numpy as np
from mxnet import gluon, nd, autograd
from mxnet.contrib import amp
import gluoncv
from gluoncv.utils import LRScheduler, LRSequential, split_and_load
from gluoncv.data.tracking_data.track import TrkDataset
from gluoncv.model_zoo import get_model
from gluoncv.loss import SiamRPNLoss
SiamRPN 是一种广泛采用的单目标跟踪方法。将模板帧和检测帧发送到孪生网络,通过 RPN 网络和互相关层获得锚点的得分图和坐标回归。
# number of GPUs to use
num_gpus = 1
ctx = [mx.cpu(0)]
batch_size = 32 # adjust to 128 if memory is sufficient
epochs = 1
# Get the model siamrpn_alexnet with SiamRPN backbone
net = get_model('siamrpn_alexnet_v2_otb15', bz=batch_size, is_train=True, ctx=ctx)
net.collect_params().reset_ctx(ctx)
print(net)
# We provide Single Obejct datasets in :class:`gluoncv.data`.
# For example, we can easily get the vid,det,coco dataset:
'''``python scripts/datasets/ilsvrc_det.py``
``python scripts/datasets/ilsvrc_vid.py``
``python scripts/datasets/coco_tracking.py``'''
# If you want to download youtube_bb dataset,you can You can follow it from the following `link <https://github.com/STVIR/pysot/tree/master/training_dataset/yt_bb>`:
# prepare dataset and dataloader
train_dataset = TrkDataset(train_epoch=epochs)
print('Training images:', len(train_dataset))
workers = 0
train_loader = gluon.data.DataLoader(train_dataset,
batch_size=batch_size,
last_batch='discard',
num_workers=workers)
def train_batch_fn(data, ctx):
"""split and load data in GPU"""
template = split_and_load(data[0], ctx_list=ctx, batch_axis=0)
search = split_and_load(data[1], ctx_list=ctx, batch_axis=0)
label_cls = split_and_load(data[2], ctx_list=ctx, batch_axis=0)
label_loc = split_and_load(data[3], ctx_list=ctx, batch_axis=0)
label_loc_weight = split_and_load(data[4], ctx_list=ctx, batch_axis=0)
return template, search, label_cls, label_loc, label_loc_weight
输出
SiamRPN(
(backbone): AlexNetLegacy(
(features): HybridSequential(
(0): Conv2D(None -> 96, kernel_size=(11, 11), stride=(2, 2))
(1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(2): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(0, 0), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
(3): Activation(relu)
(4): Conv2D(None -> 256, kernel_size=(5, 5), stride=(1, 1))
(5): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(6): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(0, 0), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW)
(7): Activation(relu)
(8): Conv2D(None -> 384, kernel_size=(3, 3), stride=(1, 1))
(9): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(10): Activation(relu)
(11): Conv2D(None -> 384, kernel_size=(3, 3), stride=(1, 1))
(12): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(13): Activation(relu)
(14): Conv2D(None -> 256, kernel_size=(3, 3), stride=(1, 1))
(15): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
)
)
(rpn_head): DepthwiseRPN(
(cls): DepthwiseXCorr(
(conv_kernel): HybridSequential(
(0): Conv2D(None -> 256, kernel_size=(3, 3), stride=(1, 1), bias=False)
(1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(2): Activation(relu)
)
(conv_search): HybridSequential(
(0): Conv2D(None -> 256, kernel_size=(3, 3), stride=(1, 1), bias=False)
(1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(2): Activation(relu)
)
(head): HybridSequential(
(0): Conv2D(None -> 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(2): Activation(relu)
(3): Conv2D(None -> 10, kernel_size=(1, 1), stride=(1, 1))
)
)
(loc): DepthwiseXCorr(
(conv_kernel): HybridSequential(
(0): Conv2D(None -> 256, kernel_size=(3, 3), stride=(1, 1), bias=False)
(1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(2): Activation(relu)
)
(conv_search): HybridSequential(
(0): Conv2D(None -> 256, kernel_size=(3, 3), stride=(1, 1), bias=False)
(1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(2): Activation(relu)
)
(head): HybridSequential(
(0): Conv2D(None -> 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=None)
(2): Activation(relu)
(3): Conv2D(None -> 20, kernel_size=(1, 1), stride=(1, 1))
)
)
)
)
Training images: 600000
训练细节¶
训练损失
我们使用 Softmax 交叉熵损失和 L2 损失来训练 SiamRPN。
学习率和调度
lr_scheduler = LRScheduler(mode='step', base_lr=0.005, step_epoch=[0],
nepochs=epochs, iters_per_epoch=len(train_loader), power=0.9)
用于多 GPU 训练的数据并行,仅在演示中使用 CPU
随机梯度下降
optimizer = 'sgd'
# Set parameters
optimizer_params = {'lr_scheduler': lr_scheduler,
'wd': 1e-4,
'momentum': 0.9,
'learning_rate': 0.005}
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
cls_weight = 1.0
loc_weight = 1.2
训练¶
所有准备工作完成后,我们终于可以开始训练了!以下是脚本。
注意
在您的实验中,我们建议将数据集的 epochs=50
。本教程将跳过训练
epochs = 0
for epoch in range(epochs):
loss_total_val = 0
loss_loc_val = 0
loss_cls_val = 0
batch_time = time.time()
for i, data in enumerate(train_loader):
template, search, label_cls, label_loc, label_loc_weight = train_batch_fn(data, ctx)
cls_losses = []
loc_losses = []
total_losses = []
with autograd.record():
for j in range(len(ctx)):
cls, loc = net(template[j], search[j])
label_cls_temp = label_cls[j].reshape(-1).asnumpy()
pos_index = np.argwhere(label_cls_temp == 1).reshape(-1)
neg_index = np.argwhere(label_cls_temp == 0).reshape(-1)
if len(pos_index):
pos_index = nd.array(pos_index, ctx=ctx[j])
else:
pos_index = nd.array(np.array([]), ctx=ctx[j])
if len(neg_index):
neg_index = nd.array(neg_index, ctx=ctx[j])
else:
neg_index = nd.array(np.array([]), ctx=ctx[j])
cls_loss, loc_loss = criterion(cls, loc, label_cls[j], pos_index, neg_index,
label_loc[j], label_loc_weight[j])
total_loss = cls_weight*cls_loss+loc_weight*loc_loss
cls_losses.append(cls_loss)
loc_losses.append(loc_loss)
total_losses.append(total_loss)
autograd.backward(total_losses)
trainer.step(batch_size)
loss_total_val += sum([l.mean().asscalar() for l in total_losses]) / len(total_losses)
loss_loc_val += sum([l.mean().asscalar() for l in loc_losses]) / len(loc_losses)
loss_cls_val += sum([l.mean().asscalar() for l in cls_losses]) / len(cls_losses)
print('Epoch %d iteration %04d/%04d: loc loss %.3f, cls loss %.3f, \
training loss %.3f, batch time %.3f'% \
(epoch, i, len(train_loader), loss_loc_val/(i+1), loss_cls_val/(i+1),
loss_total_val/(i+1), time.time()-batch_time))
batch_time = time.time()
mx.nd.waitall()
您可以立即开始训练。