[docs]classAdamax(Optimizer):def__init__(self,params:ParamsT,lr:Union[float,Tensor]=2e-3,betas:Tuple[float,float]=(0.9,0.999),eps:float=1e-8,weight_decay:float=0,foreach:Optional[bool]=None,*,maximize:bool=False,differentiable:bool=False,capturable:bool=False,):ifisinstance(lr,Tensor)andlr.numel()!=1:raiseValueError("Tensor lr must be 1-element")ifnot0.0<=lr:raiseValueError(f"Invalid learning rate: {lr}")ifnot0.0<=eps:raiseValueError(f"Invalid epsilon value: {eps}")ifnot0.0<=betas[0]<1.0:raiseValueError(f"Invalid beta parameter at index 0: {betas[0]}")ifnot0.0<=betas[1]<1.0:raiseValueError(f"Invalid beta parameter at index 1: {betas[1]}")ifnot0.0<=weight_decay:raiseValueError(f"Invalid weight_decay value: {weight_decay}")defaults=dict(lr=lr,betas=betas,eps=eps,weight_decay=weight_decay,foreach=foreach,maximize=maximize,differentiable=differentiable,capturable=capturable,)super().__init__(params,defaults)def__setstate__(self,state):super().__setstate__(state)forgroupinself.param_groups:group.setdefault("foreach",None)group.setdefault("maximize",False)group.setdefault("differentiable",False)group.setdefault("capturable",False)forpingroup["params"]:p_state=self.state.get(p,[])iflen(p_state)!=0andnottorch.is_tensor(p_state["step"]):step_val=float(p_state["step"])p_state["step"]=(torch.tensor(step_val,dtype=_get_scalar_dtype(),device=p.device)ifgroup["capturable"]elsetorch.tensor(step_val,dtype=_get_scalar_dtype()))def_init_group(self,group,params_with_grad,grads,exp_avgs,exp_infs,state_steps):has_complex=Falseforpingroup["params"]:ifp.gradisNone:continuehas_complex|=torch.is_complex(p)params_with_grad.append(p)ifp.grad.is_sparse:raiseRuntimeError("Adamax does not support sparse gradients")grads.append(p.grad)state=self.state[p]# State initializationiflen(state)==0:state["step"]=(torch.zeros((),dtype=_get_scalar_dtype(),device=p.device)ifgroup["capturable"]elsetorch.tensor(0.0,dtype=_get_scalar_dtype()))state["exp_avg"]=torch.zeros_like(p,memory_format=torch.preserve_format)state["exp_inf"]=torch.zeros_like(p,memory_format=torch.preserve_format)exp_avgs.append(state["exp_avg"])exp_infs.append(state["exp_inf"])state_steps.append(state["step"])returnhas_complex
[docs]@_use_grad_for_differentiabledefstep(self,closure=None):"""Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """self._cuda_graph_capture_health_check()loss=NoneifclosureisnotNone:withtorch.enable_grad():loss=closure()forgroupinself.param_groups:params_with_grad:List[Tensor]=[]grads:List[Tensor]=[]exp_avgs:List[Tensor]=[]exp_infs:List[Tensor]=[]state_steps:List[Tensor]=[]beta1,beta2=group["betas"]eps=group["eps"]lr=group["lr"]weight_decay=group["weight_decay"]foreach=group["foreach"]maximize=group["maximize"]differentiable=group["differentiable"]capturable=group["capturable"]has_complex=self._init_group(group,params_with_grad,grads,exp_avgs,exp_infs,state_steps)adamax(params_with_grad,grads,exp_avgs,exp_infs,state_steps,eps=eps,beta1=beta1,beta2=beta2,lr=lr,weight_decay=weight_decay,foreach=foreach,maximize=maximize,differentiable=differentiable,capturable=capturable,has_complex=has_complex,)returnloss
Adamax.__doc__=(r"""Implements Adamax algorithm (a variant of Adam based on infinity norm). .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ &\hspace{13mm} \epsilon \text{ (epsilon)} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}if \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\ &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. """+rf""" Args:{_params_doc} lr (float, Tensor, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0){_foreach_doc}{_maximize_doc}{_differentiable_doc}{_capturable_doc} .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 """)def_single_tensor_adamax(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_infs:List[Tensor],state_steps:List[Tensor],*,eps:float,beta1:float,beta2:float,lr:float,weight_decay:float,maximize:bool,differentiable:bool,capturable:bool,has_complex:bool,):fori,paraminenumerate(params):grad=grads[i]grad=gradifnotmaximizeelse-gradexp_avg=exp_avgs[i]exp_inf=exp_infs[i]step_t=state_steps[i]# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]ifnottorch.compiler.is_compiling()andcapturable:capturable_supported_devices=_get_capturable_supported_devices()assert(param.device.type==step_t.device.typeandparam.device.typeincapturable_supported_devices),f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."# update stepstep_t+=1ifweight_decay!=0:grad=grad.add(param,alpha=weight_decay)iftorch.is_complex(param):param=torch.view_as_real(param)grad=torch.view_as_real(grad)exp_avg=torch.view_as_real(exp_avg)exp_inf=torch.view_as_real(exp_inf)# Update biased first moment estimate.exp_avg.lerp_(grad,1-beta1)# Update the exponentially weighted infinity norm.ifnotdifferentiable:torch.maximum(exp_inf.mul_(beta2),grad.abs().add_(eps),out=exp_inf,)else:norm_buf=torch.cat([exp_inf.mul_(beta2).unsqueeze(0),grad.abs().add_(eps).unsqueeze_(0)],0,)exp_inf.copy_(torch.amax(norm_buf,0,keepdim=False))ifcapturable:# why jump through extra hoops and negate bias_correction? check out #121238# once fixed, we should use bias_correction with addcdiv value=-1 for readabilityneg_bias_correction=beta1**step_t-1neg_bias_correction.div_(lr)denom=exp_inf*neg_bias_correctionparam.addcdiv_(exp_avg,denom)else:bias_correction=1-beta1**_get_value(step_t)clr=lr/bias_correctionparam.addcdiv_(exp_avg,exp_inf,value=-clr)def_multi_tensor_adamax(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_infs:List[Tensor],state_steps:List[Tensor],*,eps:float,beta1:float,beta2:float,lr:float,weight_decay:float,maximize:bool,differentiable:bool,capturable:bool,has_complex:bool,):assertnotdifferentiable,"_foreach ops don't support autograd"iflen(params)==0:return# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]ifnottorch.compiler.is_compiling()andcapturable:capturable_supported_devices=_get_capturable_supported_devices(supports_xla=False)assertall(p.device.type==step.device.typeandp.device.typeincapturable_supported_devicesforp,stepinzip(params,state_steps)),f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."grouped_tensors=Optimizer._group_tensors_by_device_and_dtype([params,grads,exp_avgs,exp_infs,state_steps]# type: ignore[list-item])for(grouped_params_,grouped_grads_,grouped_exp_avgs_,grouped_exp_infs_,grouped_state_steps_,),_ingrouped_tensors.values():grouped_params=cast(List[Tensor],grouped_params_)grouped_grads=cast(List[Tensor],grouped_grads_)grouped_exp_avgs=cast(List[Tensor],grouped_exp_avgs_)grouped_exp_infs=cast(List[Tensor],grouped_exp_infs_)grouped_state_steps=cast(List[Tensor],grouped_state_steps_)ifhas_complex:_view_as_real(grouped_params,grouped_grads,grouped_exp_avgs,grouped_exp_infs)ifmaximize:grouped_grads=torch._foreach_neg(grouped_grads)# type: ignore[assignment]# Update steps# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just# wrapped it once now. The alpha is required to assure we go to the right overload.ifnottorch.compiler.is_compiling()andgrouped_state_steps[0].is_cpu:torch._foreach_add_(grouped_state_steps,torch.tensor(1.0,device="cpu"),alpha=1.0)else:torch._foreach_add_(grouped_state_steps,1)ifweight_decay!=0:ifmaximize:# Re-use the intermediate memory (grouped_grads) already allocated for maximizetorch._foreach_add_(grouped_grads,grouped_params,alpha=weight_decay)else:grouped_grads=torch._foreach_add(# type: ignore[assignment]grouped_grads,grouped_params,alpha=weight_decay)# Update biased first moment estimate.torch._foreach_lerp_(grouped_exp_avgs,grouped_grads,1-beta1)# Update the exponentially weighted infinity norm.torch._foreach_mul_(grouped_exp_infs,beta2)# in this case, we need to introduce a copy of the grads# since one has not been introduced previouslyifnotmaximizeandweight_decay==0:grouped_grads=torch._foreach_abs(grouped_grads)# type: ignore[assignment]else:torch._foreach_abs_(grouped_grads)torch._foreach_add_(grouped_grads,eps)torch._foreach_maximum_(grouped_exp_infs,grouped_grads)bias_corrections:Union[Tuple[Tensor,...],List[Tensor]]ifcapturable:bias_corrections=torch._foreach_pow(beta1,grouped_state_steps)# foreach_sub doesn't allow a scalar as the first argtorch._foreach_sub_(bias_corrections,1)torch._foreach_div_(bias_corrections,lr)denom=torch._foreach_mul(grouped_exp_infs,bias_corrections)torch._foreach_addcdiv_(grouped_params,grouped_exp_avgs,denom)else:bias_corrections=[1-beta1**_get_value(step)forstepingrouped_state_steps]step_size=[(_get_value(lr)/bc)*-1forbcinbias_corrections]torch._foreach_addcdiv_(grouped_params,grouped_exp_avgs,grouped_exp_infs,step_size)@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adamax)defadamax(params:List[Tensor],grads:List[Tensor],exp_avgs:List[Tensor],exp_infs:List[Tensor],state_steps:List[Tensor],# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627# setting this as kwarg for now as functional API is compiled by torch/distributed/optimforeach:Optional[bool]=None,maximize:bool=False,differentiable:bool=False,capturable:bool=False,has_complex:bool=False,*,eps:float,beta1:float,beta2:float,lr:float,weight_decay:float,):r"""Functional API that performs adamax algorithm computation. See :class:`~torch.optim.Adamax` for details. """ifnottorch.compiler.is_compiling()andnotall(isinstance(t,torch.Tensor)fortinstate_steps):raiseRuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors")ifforeachisNone:_,foreach=_default_to_fused_or_foreach(params,differentiable,use_fused=False)ifforeachandtorch.jit.is_scripting():raiseRuntimeError("torch.jit.script not supported with foreach optimizers")ifforeachandnottorch.jit.is_scripting():func=_multi_tensor_adamaxelse:func=_single_tensor_adamaxfunc(params,grads,exp_avgs,exp_infs,state_steps,eps=eps,beta1=beta1,beta2=beta2,lr=lr,weight_decay=weight_decay,maximize=maximize,differentiable=differentiable,has_complex=has_complex,capturable=capturable,)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.