Shortcuts

import torch
from torch import Tensor

from .optimizer import Optimizer
from typing import List, Optional

.. math::
\begin{aligned}
&\rule{110mm}{0.4pt}                                                                 \\
&\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)},
\: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)},
\: \lambda \text{ (weight decay)}                                                \\
&\textbf{initialize} :  v_0  \leftarrow 0 \: \text{ (square avg)},
\: u_0 \leftarrow 0 \: \text{ (accumulate variables)}                     \\[-1.ex]
&\rule{110mm}{0.4pt}                                                                 \\
&\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
&\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
&\hspace{5mm}if \: \lambda \neq 0                                                    \\
&\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
&\hspace{5mm} v_t      \leftarrow v_{t-1} \rho + g^2_t (1 - \rho)                    \\
&\hspace{5mm}\Delta x_t    \leftarrow   \frac{\sqrt{u_{t-1} +
\epsilon }}{ \sqrt{v_t + \epsilon}  }g_t \hspace{21mm}                           \\
&\hspace{5mm} u_t  \leftarrow   u_{t-1}  \rho +
\Delta x^2_t  (1 - \rho)                                                        \\
&\hspace{5mm}\theta_t      \leftarrow   \theta_{t-1} - \gamma  \Delta x_t            \\
&\rule{110mm}{0.4pt}                                                          \\[-1.ex]
&\bf{return} \:  \theta_t                                                     \\[-1.ex]
&\rule{110mm}{0.4pt}                                                          \\[-1.ex]
\end{aligned}

For further details regarding the algorithm we refer to ADADELTA: An Adaptive Learning Rate Method_.

Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
rho (float, optional): coefficient used for computing a running average
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-6)
lr (float, optional): coefficient that scale delta before it is applied
to the parameters (default: 1.0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
foreach (bool, optional): whether foreach implementation of optimizer is used (default: None)
maximize (bool, optional): maximize the params based on the objective, instead of
minimizing (default: False)

https://arxiv.org/abs/1212.5701
"""

def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0,
foreach: Optional[bool] = None, *, maximize: bool = False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= rho <= 1.0:
raise ValueError("Invalid rho value: {}".format(rho))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))

defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay,
maximize=maximize, foreach=foreach)

def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('foreach', None)
group.setdefault('maximize', False)

def step(self, closure=None):
"""Performs a single optimization step.

Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()

for group in self.param_groups:
square_avgs = []
acc_deltas = []
lr, rho, eps, weight_decay, foreach, maximize = (group['lr'],
group['rho'],
group['eps'],
group['weight_decay'],
group['foreach'],
group['maximize'])

for p in group['params']:
continue

state = self.state[p]

# Lazy state initialization
if len(state) == 0:
state['step'] = 0
state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format)

square_avgs.append(state['square_avg'])
acc_deltas.append(state['acc_delta'])

state['step'] += 1

square_avgs,
acc_deltas,
lr=lr,
rho=rho,
eps=eps,
weight_decay=weight_decay,
foreach=foreach,
maximize=maximize)

return loss

square_avgs: List[Tensor],
acc_deltas: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: bool = None,
*,
lr: float,
rho: float,
eps: float,
weight_decay: float,
maximize: bool):

See :class:~torch.optim.Adadelta for details.
"""

if foreach is None:
# Placeholder for more complex foreach logic to be added when value is not set
foreach = False

if foreach and torch.jit.is_scripting():
raise RuntimeError('torch.jit.script not supported with foreach optimizers')

if foreach and not torch.jit.is_scripting():
else:

func(params,
square_avgs,
acc_deltas,
lr=lr,
rho=rho,
eps=eps,
weight_decay=weight_decay,
maximize=maximize)

square_avgs: List[Tensor],
acc_deltas: List[Tensor],
*,
lr: float,
rho: float,
eps: float,
weight_decay: float,
maximize: bool):

if weight_decay != 0:

if torch.is_complex(param):
square_avg = torch.view_as_real(square_avg)
acc_delta = torch.view_as_real(acc_delta)

if torch.is_complex(param):
delta = torch.view_as_complex(delta)

square_avgs: List[Tensor],
acc_deltas: List[Tensor],
*,
lr: float,
weight_decay: float,
rho: float,
eps: float,
maximize: bool):

if len(params) == 0:
return

if maximize:

if weight_decay != 0:

torch._foreach_mul_(square_avgs, rho)

torch._foreach_sqrt_(std)

torch._foreach_sqrt_(deltas)
torch._foreach_div_(deltas, std)

torch._foreach_mul_(acc_deltas, rho)
torch._foreach_addcmul_(acc_deltas, deltas, deltas, value=1 - rho)


## Docs

Access comprehensive developer documentation for PyTorch

View Docs

## Tutorials

Get in-depth tutorials for beginners and advanced developers

View Tutorials