# mypy: allow-untyped-decorators# mypy: allow-untyped-defsr"""Implementation for the NAdam algorithm."""fromtypingimportcast,List,Optional,Tuple,UnionimporttorchfromtorchimportTensorfrom.optimizerimport(_capturable_doc,_default_to_fused_or_foreach,_differentiable_doc,_disable_dynamo_if_unsupported,_foreach_doc,_get_capturable_supported_devices,_get_scalar_dtype,_get_value,_maximize_doc,_params_doc,_stack_if_compiling,_use_grad_for_differentiable,_view_as_real,Optimizer,ParamsT,)__all__=["NAdam","nadam"]
[docs]classNAdam(Optimizer):# noqa: D101def__init__(self,params:ParamsT,lr:Union[float,Tensor]=2e-3,betas:Tuple[float,float]=(0.9,0.999),eps:float=1e-8,weight_decay:float=0,momentum_decay:float=4e-3,decoupled_weight_decay:bool=False,*,foreach:Optional[bool]=None,maximize:bool=False,capturable:bool=False,differentiable:bool=False,):# noqa: D107ifisinstance(lr,Tensor)andlr.numel()!=1:raiseValueError("Tensor lr must be 1-element")ifnot0.0<=lr:raiseValueError(f"Invalid learning rate: {lr}")ifnot0.0<=eps:raiseValueError(f"Invalid epsilon value: {eps}")ifnot0.0<=betas[0]<1.0:raiseValueError(f"Invalid beta parameter at index 0: {betas[0]}")ifnot0.0<=betas[1]<1.0:raiseValueError(f"Invalid beta parameter at index 1: {betas[1]}")ifnot0.0<=weight_decay:raiseValueError(f"Invalid weight_decay value: {weight_decay}")ifnot0.0<=momentum_decay:raiseValueError(f"Invalid momentum_decay value: {momentum_decay}")defaults=dict(lr=lr,betas=betas,eps=eps,weight_decay=weight_decay,momentum_decay=momentum_decay,decoupled_weight_decay=decoupled_weight_decay,maximize=maximize,foreach=foreach,capturable=capturable,differentiable=differentiable,)super().__init__(params,defaults)def__setstate__(self,state):# noqa: D105super().__setstate__(state)forgroupinself.param_groups:group.setdefault("maximize",False)group.setdefault("foreach",None)group.setdefault("capturable",False)group.setdefault("differentiable",False)group.setdefault("decoupled_weight_decay",False)forpingroup["params"]:p_state=self.state.get(p,[])iflen(p_state)!=0:ifnottorch.is_tensor(p_state["step"]):step_val=float(p_state["step"])p_state["step"]=(torch.tensor(step_val,dtype=_get_scalar_dtype(),device=p.device)ifgroup["capturable"]elsetorch.tensor(step_val,dtype=_get_scalar_dtype()))ifnottorch.is_tensor(p_state["mu_product"]):mu_prod_val=p_state["mu_product"]p_state["mu_product"]=(torch.tensor(mu_prod_val,dtype=_get_scalar_dtype(),device=p.device)ifgroup["capturable"]elsetorch.tensor(mu_prod_val,dtype=_get_scalar_dtype()))def_init_group(self,group,params_with_grad,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps,):has_complex=Falseforpingroup["params"]:ifp.gradisnotNone:has_complex|=torch.is_complex(p)params_with_grad.append(p)ifp.grad.is_sparse:raiseRuntimeError("NAdam does not support sparse gradients")grads.append(p.grad)state=self.state[p]# Lazy state initializationiflen(state)==0:# note(crcrpar): [special device hosting for step]# Deliberately host `step` and `mu_product` on CPU if capturable is False.# This is because kernel launches are costly on CUDA and XLA.state["step"]=(torch.zeros((),dtype=_get_scalar_dtype(),device=p.device)ifgroup["capturable"]elsetorch.tensor(0.0,dtype=_get_scalar_dtype()))state["mu_product"]=(torch.ones((),dtype=_get_scalar_dtype(),device=p.device)ifgroup["capturable"]elsetorch.tensor(1.0,dtype=_get_scalar_dtype()))# Exponential moving average of gradient valuesstate["exp_avg"]=torch.zeros_like(p,memory_format=torch.preserve_format)# Exponential moving average of squared gradient valuesstate["exp_avg_sq"]=torch.zeros_like(p,memory_format=torch.preserve_format)exp_avgs.append(state["exp_avg"])exp_avg_sqs.append(state["exp_avg_sq"])mu_products.append(state["mu_product"])state_steps.append(state["step"])returnhas_complex
[docs]@_use_grad_for_differentiabledefstep(self,closure=None):"""Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """self._cuda_graph_capture_health_check()loss=NoneifclosureisnotNone:withtorch.enable_grad():loss=closure()forgroupinself.param_groups:params_with_grad:List[Tensor]=[]grads:List[Tensor]=[]exp_avgs:List[Tensor]=[]exp_avg_sqs:List[Tensor]=[]mu_products:List[Tensor]=[]state_steps:List[Tensor]=[]beta1,beta2=cast(Tuple[float,float],group["betas"])has_complex=self._init_group(group,params_with_grad,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps,)nadam(params_with_grad,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps,beta1=beta1,beta2=beta2,lr=group["lr"],weight_decay=group["weight_decay"],momentum_decay=group["momentum_decay"],eps=group["eps"],maximize=group["maximize"],decoupled_weight_decay=group["decoupled_weight_decay"],foreach=group["foreach"],capturable=group["capturable"],differentiable=group["differentiable"],has_complex=has_complex,)returnloss
NAdam.__doc__=(r"""Implements NAdam algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma_t \text{ (lr)}, \: \beta_1,\beta_2 \text{ (betas)}, \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ &\hspace{13mm} \: \lambda \text{ (weight decay)}, \:\psi \text{ (momentum decay)} \\ &\hspace{13mm} \: \textit{decoupled\_weight\_decay}, \:\textit{maximize} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, v_0 \leftarrow 0 \text{ ( second moment)} \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} \\ &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ &\hspace{10mm}\textbf{if} \: \textit{decoupled\_weight\_decay} \\ &\hspace{15mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ &\hspace{10mm}\textbf{else} \\ &\hspace{15mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm} \mu_t \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{t \psi} \big) \\ &\hspace{5mm} \mu_{t+1} \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{(t+1)\psi}\big)\\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ &\hspace{5mm}\widehat{m_t} \leftarrow \mu_{t+1} m_t/(1-\prod_{i=1}^{t+1}\mu_i)\\[-1.ex] & \hspace{11mm} + (1-\mu_t) g_t /(1-\prod_{i=1}^{t} \mu_{i}) \\ &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ &\hspace{5mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_. """+rf""" Args:{_params_doc} lr (float, Tensor, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) momentum_decay (float, optional): momentum momentum_decay (default: 4e-3) decoupled_weight_decay (bool, optional): whether to use decoupled weight decay as in AdamW to obtain NAdamW (default: False){_foreach_doc}{_maximize_doc}{_capturable_doc}{_differentiable_doc} .. _Incorporating Nesterov Momentum into Adam: https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 """)def_single_tensor_nadam(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_avg_sqs:List[Tensor],mu_products:List[Tensor],state_steps:List[Tensor],*,beta1:float,beta2:float,lr:float,weight_decay:float,momentum_decay:float,eps:float,decoupled_weight_decay:bool,maximize:bool,capturable:bool,differentiable:bool,has_complex:bool,):fori,paraminenumerate(params):grad=grads[i]ifnotmaximizeelse-grads[i]exp_avg=exp_avgs[i]exp_avg_sq=exp_avg_sqs[i]mu_product=mu_products[i]step_t=state_steps[i]iftorch.is_complex(param):param=torch.view_as_real(param)grad=torch.view_as_real(grad)exp_avg=torch.view_as_real(exp_avg)exp_avg_sq=torch.view_as_real(exp_avg_sq)# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]ifnottorch.compiler.is_compiling()andcapturable:capturable_supported_devices=_get_capturable_supported_devices()assert(param.device.type==mu_product.device.type==step_t.device.typeandparam.device.typeincapturable_supported_devices),(f"If capturable=True, params, mu_products and state_steps must be "f"on supported devices: {capturable_supported_devices}.")# update stepstep_t+=1ifcapturable:step=step_telse:step=_get_value(step_t)bias_correction2=1-beta2**stepifweight_decay!=0:ifdecoupled_weight_decay:# Perform stepweight decayparam.mul_(1-lr*weight_decay)else:grad=grad.add(param,alpha=weight_decay)# calculate the momentum cache \mu^{t} and \mu^{t+1}mu=beta1*(1.0-0.5*(0.96**(step*momentum_decay)))mu_next=beta1*(1.0-0.5*(0.96**((step+1)*momentum_decay)))# update mu_productmu_product*=mu# decay the first and second moment running average coefficientexp_avg.lerp_(grad,1-beta1)exp_avg_sq.mul_(beta2).addcmul_(grad,grad,value=1-beta2)denom=exp_avg_sq.div(bias_correction2).sqrt()ifdifferentiableorcapturable:denom=denom.add(eps)# Make autograd track the operations# by updating the grad and exp_avg directly and not using the# scalar "value" argument of addcdiv.mu_product_next=mu_product*mu_nextgrad=grad*(-lr*(1.0-mu)/(1.0-mu_product))exp_avg=exp_avg*(-lr*mu_next/(1.0-mu_product_next))param.addcdiv_(grad,denom)param.addcdiv_(exp_avg,denom)else:mu_product_next=_get_value(mu_product)*mu_nextdenom.add_(eps)param.addcdiv_(grad,denom,value=(-lr*(1.0-mu)/(1.0-_get_value(mu_product))))param.addcdiv_(exp_avg,denom,value=(-lr*mu_next)/(1.0-mu_product_next))def_multi_tensor_nadam(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_avg_sqs:List[Tensor],mu_products:List[Tensor],state_steps:List[Tensor],*,beta1:float,beta2:float,lr:float,weight_decay:float,momentum_decay:float,eps:float,decoupled_weight_decay:bool,maximize:bool,capturable:bool,differentiable:bool,has_complex:bool,):iflen(params)==0:returnassertnotdifferentiable,"_foreach ops don't support autograd"# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]ifnottorch.compiler.is_compiling()andcapturable:capturable_supported_devices=_get_capturable_supported_devices(supports_xla=False)assertall(p.device.type==mp.device.type==step.device.typeandp.device.typeincapturable_supported_devicesforp,mp,stepinzip(params,mu_products,state_steps)),f"If capturable=True, params, mu_products, and state_steps must be on supported devices: {capturable_supported_devices}."grouped_tensors=Optimizer._group_tensors_by_device_and_dtype([params,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps]# type: ignore[list-item])for(grouped_params_,grouped_grads_,grouped_exp_avgs_,grouped_exp_avg_sqs_,grouped_mu_products_,grouped_state_steps_,),_ingrouped_tensors.values():grouped_params=cast(List[Tensor],grouped_params_)grouped_grads=cast(List[Tensor],grouped_grads_)grouped_exp_avgs=cast(List[Tensor],grouped_exp_avgs_)grouped_exp_avg_sqs=cast(List[Tensor],grouped_exp_avg_sqs_)grouped_mu_products=cast(List[Tensor],grouped_mu_products_)grouped_state_steps=cast(List[Tensor],grouped_state_steps_)# handle complexifhas_complex:_view_as_real(grouped_params,grouped_grads,grouped_exp_avgs,grouped_exp_avg_sqs)ifmaximize:grouped_grads=torch._foreach_neg(grouped_grads)# type: ignore[assignment]# Update steps# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just# wrapped it once now. The alpha is required to assure we go to the right overload.ifnottorch.compiler.is_compiling()andgrouped_state_steps[0].is_cpu:torch._foreach_add_(grouped_state_steps,torch.tensor(1.0,device="cpu"),alpha=1.0)else:torch._foreach_add_(grouped_state_steps,1)ifweight_decay!=0:ifdecoupled_weight_decay:# Perform stepweight decaytorch._foreach_mul_(grouped_params,1-lr*weight_decay)else:# Re-use the intermediate memory (grouped_grads) already allocated for maximizeifmaximize:torch._foreach_add_(grouped_grads,grouped_params,alpha=weight_decay)else:grouped_grads=torch._foreach_add(# type: ignore[assignment]grouped_grads,grouped_params,alpha=weight_decay)# Decay the first and second moment running average coefficienttorch._foreach_lerp_(grouped_exp_avgs,grouped_grads,1-beta1)torch._foreach_mul_(grouped_exp_avg_sqs,beta2)torch._foreach_addcmul_(grouped_exp_avg_sqs,grouped_grads,grouped_grads,1-beta2)exp_avg_sq_sqrt=torch._foreach_sqrt(grouped_exp_avg_sqs)bias_correction_sqrt:Union[Tuple[Tensor,...],List[Tensor]]mus:Union[Tuple[Tensor,...],List[Tensor]]mu_nexts:Union[Tuple[Tensor,...],List[Tensor]]ifcapturable:# mus will be beta1 * (1 - 0.5 * 0.96 ** (step * momentum_decay))exponent=torch._foreach_mul(grouped_state_steps,momentum_decay)mus=torch._foreach_pow(0.96,exponent)torch._foreach_mul_(mus,-0.5)torch._foreach_add_(mus,1.0)torch._foreach_mul_(mus,beta1)# mu_nexts will be beta1 * (1 - 0.5 * 0.96 ** ((step + 1) * momentum_decay))torch._foreach_add_(exponent,momentum_decay)mu_nexts=torch._foreach_pow(0.96,exponent)torch._foreach_mul_(mu_nexts,-0.5)torch._foreach_add_(mu_nexts,1.0)torch._foreach_mul_(mu_nexts,beta1)# save peak memory as we don't need exponent anymoredelexponentbias_correction_sqrt=torch._foreach_pow(beta2,grouped_state_steps)# foreach_sub doesn't allow a scalar as the first argtorch._foreach_sub_(bias_correction_sqrt,1.0)torch._foreach_neg_(bias_correction_sqrt)torch._foreach_sqrt_(bias_correction_sqrt)else:bias_correction_sqrt=[(1-beta2**_get_value(step))**0.5forstepingrouped_state_steps]mus=[beta1*(1.0-0.5*(0.96**(_get_value(step)*momentum_decay)))forstepingrouped_state_steps]mu_nexts=[beta1*(1.0-0.5*(0.96**((_get_value(step)+1)*momentum_decay)))forstepingrouped_state_steps]# update mu_productstorch._foreach_mul_(grouped_mu_products,mus)torch._foreach_div_(exp_avg_sq_sqrt,bias_correction_sqrt)torch._foreach_add_(exp_avg_sq_sqrt,eps)# explicitly delete bias_correction refs to save memorydelbias_correction_sqrtifcapturable:# Build up the step_size multiplier for grad, reusing mus' memorytorch._foreach_sub_(mus,1.0)torch._foreach_mul_(mus,lr)# foreach_sub doesn't allow a scalar as the first argdenom=torch._foreach_sub(grouped_mu_products,1.0)torch._foreach_neg_(denom)torch._foreach_div_(mus,denom)# - lr * (1 - mu) / (1 - mu_product)step_size_grads=mus# explicitly delete denom to save memorydeldenom# Build up the step_size multiplier for exp_avg, reusing mu_nexts' memorydenom=torch._foreach_mul(grouped_mu_products,mu_nexts)torch._foreach_mul_(mu_nexts,lr)# foreach_sub doesn't allow a scalar as the first arg, but it's okay because# we need a negative here anywaytorch._foreach_sub_(denom,1.0)torch._foreach_div_(mu_nexts,denom)# - lr * mu_next / (1 - mu_product * mu_next)step_size_expavg=mu_nexts# explicitly delete denom to save memorydeldenom# we cannot inplace into step_size_grads cuz it is a list of ScalarTensors# and mul'ing with grouped_grads will result in a list of bigger Tensorsnumerator=torch._foreach_mul(step_size_grads,grouped_grads)torch._foreach_addcmul_(numerator,step_size_expavg,grouped_exp_avgs)# finally, update paramstorch._foreach_addcdiv_(grouped_params,numerator,exp_avg_sq_sqrt)else:step_size_grads=_stack_if_compiling([(_get_value(lr)*(1.0-mu)/(1.0-_get_value(mu_product)))*-1formu_product,muinzip(grouped_mu_products,mus)])step_size_expavg=_stack_if_compiling([(_get_value(lr)*mu_next/(1.0-_get_value(mu_product)*mu_next))*-1formu_product,mu_nextinzip(grouped_mu_products,mu_nexts)])torch._foreach_addcdiv_(grouped_params,grouped_grads,exp_avg_sq_sqrt,step_size_grads# type: ignore[arg-type])torch._foreach_addcdiv_(grouped_params,grouped_exp_avgs,exp_avg_sq_sqrt,step_size_expavg# type: ignore[arg-type])@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_nadam)defnadam(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_avg_sqs:List[Tensor],mu_products:List[Tensor],state_steps:List[Tensor],# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627# setting this as kwarg for now as functional API is compiled by torch/distributed/optimdecoupled_weight_decay:bool=False,foreach:Optional[bool]=None,capturable:bool=False,differentiable:bool=False,has_complex:bool=False,maximize:bool=False,*,beta1:float,beta2:float,lr:float,weight_decay:float,momentum_decay:float,eps:float,):r"""Functional API that performs NAdam algorithm computation. See :class:`~torch.optim.NAdam` for details. """ifnotall(isinstance(t,torch.Tensor)fortinstate_steps):raiseRuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")ifnotall(isinstance(t,torch.Tensor)fortinmu_products):raiseRuntimeError("API has changed, `mu_products` argument must contain a list of singleton tensors")ifforeachisNone:_,foreach=_default_to_fused_or_foreach(params,differentiable,use_fused=False)ifforeachandtorch.jit.is_scripting():raiseRuntimeError("torch.jit.script not supported with foreach optimizers")ifforeachandnottorch.jit.is_scripting():func=_multi_tensor_nadamelse:func=_single_tensor_nadamfunc(params,grads,exp_avgs,exp_avg_sqs,mu_products,state_steps,beta1=beta1,beta2=beta2,lr=lr,weight_decay=weight_decay,momentum_decay=momentum_decay,maximize=maximize,decoupled_weight_decay=decoupled_weight_decay,eps=eps,capturable=capturable,differentiable=differentiable,has_complex=has_complex,)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.