fromcollectionsimportdefaultdict,abcascontainer_abcsimporttorchfromcopyimportdeepcopyfromitertoolsimportchainimportwarningsimportfunctoolsclass_RequiredParameter(object):"""Singleton class representing a required parameter for an Optimizer."""def__repr__(self):return"<required parameter>"required=_RequiredParameter()classOptimizer(object):r"""Base class for all optimizers. .. warning:: Parameters need to be specified as collections that have a deterministic ordering that is consistent between runs. Examples of objects that don't satisfy those properties are sets and iterators over values of dictionaries. Args: params (iterable): an iterable of :class:`torch.Tensor` s or :class:`dict` s. Specifies what Tensors should be optimized. defaults: (dict): a dict containing default values of optimization options (used when a parameter group doesn't specify them). """def__init__(self,params,defaults):torch._C._log_api_usage_once("python.optimizer")self.defaults=defaultsself._hook_for_profile()ifisinstance(params,torch.Tensor):raiseTypeError("params argument given to the optimizer should be ""an iterable of Tensors or dicts, but got "+torch.typename(params))self.state=defaultdict(dict)self.param_groups=[]param_groups=list(params)iflen(param_groups)==0:raiseValueError("optimizer got an empty parameter list")ifnotisinstance(param_groups[0],dict):param_groups=[{'params':param_groups}]forparam_groupinparam_groups:self.add_param_group(param_group)def__getstate__(self):return{'defaults':self.defaults,'state':self.state,'param_groups':self.param_groups,}def__setstate__(self,state):self.__dict__.update(state)self._hook_for_profile()# To support multiprocessing pickle/unpickle.def__repr__(self):format_string=self.__class__.__name__+' ('fori,groupinenumerate(self.param_groups):format_string+='\n'format_string+='Parameter Group {0}\n'.format(i)forkeyinsorted(group.keys()):ifkey!='params':format_string+=' {0}: {1}\n'.format(key,group[key])format_string+=')'returnformat_stringdef_hook_for_profile(self):self._zero_grad_profile_name="Optimizer.zero_grad#{}.zero_grad".format(self.__class__.__name__)defprofile_hook_step(func):@functools.wraps(func)defwrapper(*args,**kwargs):obj,*_=argsprofile_name="Optimizer.step#{}.step".format(obj.__class__.__name__)withtorch.autograd.profiler.record_function(profile_name):returnfunc(*args,**kwargs)returnwrapperhooked=getattr(self.__class__.step,"hooked",None)ifnothooked:self.__class__.step=profile_hook_step(self.__class__.step)self.__class__.step.hooked=True
[docs]defstate_dict(self):r"""Returns the state of the optimizer as a :class:`dict`. It contains two entries: * state - a dict holding current optimization state. Its content differs between optimizer classes. * param_groups - a list containing all parameter groups where each parameter group is a dict """# Save order indices instead of Tensorsparam_mappings={}start_index=0defpack_group(group):nonlocalstart_indexpacked={k:vfork,vingroup.items()ifk!='params'}param_mappings.update({id(p):ifori,pinenumerate(group['params'],start_index)ifid(p)notinparam_mappings})packed['params']=[param_mappings[id(p)]forpingroup['params']]start_index+=len(packed['params'])returnpackedparam_groups=[pack_group(g)forginself.param_groups]# Remap state to use order indices as keyspacked_state={(param_mappings[id(k)]ifisinstance(k,torch.Tensor)elsek):vfork,vinself.state.items()}return{'state':packed_state,'param_groups':param_groups,}
[docs]defload_state_dict(self,state_dict):r"""Loads the optimizer state. Args: state_dict (dict): optimizer state. Should be an object returned from a call to :meth:`state_dict`. """# deepcopy, to be consistent with module APIstate_dict=deepcopy(state_dict)# Validate the state_dictgroups=self.param_groupssaved_groups=state_dict['param_groups']iflen(groups)!=len(saved_groups):raiseValueError("loaded state dict has a different number of ""parameter groups")param_lens=(len(g['params'])forgingroups)saved_lens=(len(g['params'])forginsaved_groups)ifany(p_len!=s_lenforp_len,s_leninzip(param_lens,saved_lens)):raiseValueError("loaded state dict contains a parameter group ""that doesn't match the size of optimizer's group")# Update the stateid_map={old_id:pforold_id,pinzip(chain.from_iterable((g['params']forginsaved_groups)),chain.from_iterable((g['params']forgingroups)))}defcast(param,value):r"""Make a deep copy of value, casting all tensors to device of param."""ifisinstance(value,torch.Tensor):# Floating-point types are a bit special here. They are the only ones# that are assumed to always match the type of params.ifparam.is_floating_point():value=value.to(param.dtype)value=value.to(param.device)returnvalueelifisinstance(value,dict):return{k:cast(param,v)fork,vinvalue.items()}elifisinstance(value,container_abcs.Iterable):returntype(value)(cast(param,v)forvinvalue)else:returnvalue# Copy state assigned to params (and cast tensors to appropriate types).# State that is not assigned to params is copied as is (needed for# backward compatibility).state=defaultdict(dict)fork,vinstate_dict['state'].items():ifkinid_map:param=id_map[k]state[param]=cast(param,v)else:state[k]=v# Update parameter groups, setting their 'params' valuedefupdate_group(group,new_group):new_group['params']=group['params']returnnew_groupparam_groups=[update_group(g,ng)forg,nginzip(groups,saved_groups)]self.__setstate__({'state':state,'param_groups':param_groups})
[docs]defzero_grad(self,set_to_none:bool=False):r"""Sets the gradients of all optimized :class:`torch.Tensor` s to zero. Args: set_to_none (bool): instead of setting to zero, set the grads to None. This will in general have lower memory footprint, and can modestly improve performance. However, it changes certain behaviors. For example: 1. When the user tries to access a gradient and perform manual ops on it, a None attribute or a Tensor full of 0s will behave differently. 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s are guaranteed to be None for params that did not receive a gradient. 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None (in one case it does the step with a gradient of 0 and in the other it skips the step altogether). """foreach=self.defaults.get('foreach',False)ifnothasattr(self,"_zero_grad_profile_name"):self._hook_for_profile()ifforeach:per_device_and_dtype_grads=defaultdict(lambda:defaultdict(list))withtorch.autograd.profiler.record_function(self._zero_grad_profile_name):forgroupinself.param_groups:forpingroup['params']:ifp.gradisnotNone:ifset_to_none:p.grad=Noneelse:ifp.grad.grad_fnisnotNone:p.grad.detach_()else:p.grad.requires_grad_(False)if(notforeachorp.grad.is_sparse):p.grad.zero_()else:per_device_and_dtype_grads[p.grad.device][p.grad.dtype].append(p.grad)ifforeach:for_,per_dtype_gradsinper_device_and_dtype_grads.items():forgradsinper_dtype_grads.values():torch._foreach_zero_(grads)
[docs]defstep(self,closure):r"""Performs a single optimization step (parameter update). Args: closure (callable): A closure that reevaluates the model and returns the loss. Optional for most optimizers. .. note:: Unless otherwise specified, this function should not modify the ``.grad`` field of the parameters. """raiseNotImplementedError
[docs]defadd_param_group(self,param_group):r"""Add a param group to the :class:`Optimizer` s `param_groups`. This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the :class:`Optimizer` as training progresses. Args: param_group (dict): Specifies what Tensors should be optimized along with group specific optimization options. """assertisinstance(param_group,dict),"param group must be a dict"params=param_group['params']ifisinstance(params,torch.Tensor):param_group['params']=[params]elifisinstance(params,set):raiseTypeError('optimizer parameters need to be organized in ordered collections, but ''the ordering of tensors in sets will change between runs. Please use a list instead.')else:param_group['params']=list(params)forparaminparam_group['params']:ifnotisinstance(param,torch.Tensor):raiseTypeError("optimizer can only optimize Tensors, ""but one of the params is "+torch.typename(param))ifnotparam.is_leaf:raiseValueError("can't optimize a non-leaf Tensor")forname,defaultinself.defaults.items():ifdefaultisrequiredandnamenotinparam_group:raiseValueError("parameter group didn't specify a value of required optimization parameter "+name)else:param_group.setdefault(name,default)params=param_group['params']iflen(params)!=len(set(params)):warnings.warn("optimizer contains a parameter group with duplicate parameters; ""in future, this will cause an error; ""see github.com/pytorch/pytorch/issues/40967 for more information",stacklevel=3)param_set=set()forgroupinself.param_groups:param_set.update(set(group['params']))ifnotparam_set.isdisjoint(set(param_group['params'])):raiseValueError("some parameters appear in more than one parameter group")self.param_groups.append(param_group)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.