from torch.optim.optimizer import Optimizer

It has been proposed in Incorporating Nesterov Momentum into Adam__.

:param params: (iterable): iterable of parameters to optimize or dicts defining
parameter groups
:param lr: (float, optional): learning rate (default: 2e-3)
:param betas: (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
:param eps: (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
:param weight_decay: (float, optional): weight decay (L2 penalty) (default: 0)
:param schedule_decay: (float, optional): momentum schedule decay (default: 4e-3)

__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf

"""

def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, schedule_decay=schedule_decay)

[docs]    def step(self, closure=None):
"""Performs a single optimization step.

:param closure: (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()

for group in self.param_groups:
for p in group['params']:
continue
state = self.state[p]

# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.

# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']

state['step'] += 1

if group['weight_decay'] != 0:

momentum_cache_t = beta1 * \
(1. - 0.5 * (0.96 ** (state['step'] * schedule_decay)))
momentum_cache_t_1 = beta1 * \
(1. - 0.5 *
(0.96 ** ((state['step'] + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new

# Decay the first and second moment running average coefficient
bias_correction2 = 1 - beta2 ** state['step']