importtorchimporttorch._Cas_Cfromtorch._Cimport_functionsimporttorch.utils.hooksashooksfromtorch._siximportwith_metaclassimportfunctoolsimportwarningsfromcollectionsimportOrderedDictfromtypingimportAny,List,Optional# Formerly known as: _ContextMethodMixinclassFunctionCtx(object):defsave_for_backward(self,*tensors:torch.Tensor):r"""Saves given tensors for a future call to :func:`~Function.backward`. ``save_for_backward`` should be called at most once, only from inside the :func:`forward` method, and only with tensors. All tensors intended to be used in the backward pass should be saved with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent incorrect gradients and memory leaks, and enable the application of saved tensor hooks. See :class:`torch.autograd.graph.saved_tensors_hooks`. In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors` attribute. Before returning them to the user, a check is made to ensure they weren't used in any in-place operation that modified their content. Arguments can also be ``None``. This is a no-op. See :ref:`extending-autograd` for more details on how to use this method. Example:: >>> class Func(Function): >>> @staticmethod >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int): >>> w = x * y * z >>> out = x * y + y * z + w >>> ctx.save_for_backward(x, y, w, out) >>> ctx.z = z # z is not a tensor >>> return out >>> >>> @staticmethod >>> def backward(ctx, grad_out): >>> x, y, w, out = ctx.saved_tensors >>> z = ctx.z >>> gx = grad_out * (y + y * z) >>> gy = grad_out * (x + z + x * z) >>> gz = None >>> return gx, gy, gz >>> >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double) >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double) >>> c = 4 >>> d = Func.apply(a, b, c) """self.to_save=tensorsdefsave_for_forward(self,*tensors:torch.Tensor):r"""Saves given tensors for a future call to :func:`~Function.jvp`. ``save_for_forward`` should be only called once, from inside the :func:`forward` method, and only be called with tensors. In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors` attribute. Arguments can also be ``None``. This is a no-op. See :ref:`extending-autograd` for more details on how to use this method. Example:: >>> class Func(torch.autograd.Function): >>> @staticmethod >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int): >>> ctx.save_for_backward(x, y) >>> ctx.save_for_forward(x, y) >>> ctx.z = z >>> return x * y * z >>> >>> @staticmethod >>> def jvp(ctx, x_t, y_t, _): >>> x, y = ctx.saved_tensors >>> z = ctx.z >>> return z * (y * x_t + x * y_t) >>> >>> @staticmethod >>> def vjp(ctx, grad_out): >>> x, y = ctx.saved_tensors >>> z = ctx.z >>> return z * grad_out * y, z * grad_out * x, None >>> >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double) >>> t = torch.tensor(1., dtype=torch.double) >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double) >>> c = 4 >>> >>> with fwAD.dual_level(): >>> a_dual = fwAD.make_dual(a, t) >>> d = Func.apply(a_dual, b, c) """fortensorintensors:assertisinstance(tensor,torch.Tensor)ortensorisNone,("save_for_forward expects all arguments to be tensors; you should ""save non-tensors as attributes on ctx.")self.saved_for_forward=tensorsdefmark_dirty(self,*args:torch.Tensor):r"""Marks given tensors as modified in an in-place operation. **This should be called at most once, only from inside the** :func:`forward` **method, and all arguments should be inputs.** Every tensor that's been modified in-place in a call to :func:`forward` should be given to this function, to ensure correctness of our checks. It doesn't matter whether the function is called before or after modification. Examples:: >>> class Inplace(Function): >>> @staticmethod >>> def forward(ctx, x): >>> x_npy = x.numpy() # x_npy shares storage with x >>> x_npy += 1 >>> ctx.mark_dirty(x) >>> return x >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, grad_output): >>> return grad_output >>> >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone() >>> b = a * a >>> Inplace.apply(a) # This would lead to wrong gradients! >>> # but the engine would not know unless we mark_dirty >>> b.backward() # RuntimeError: one of the variables needed for gradient >>> # computation has been modified by an inplace operation """self.dirty_tensors=argsdefmark_shared_storage(self,*pairs):warnings.warn('mark_shared_storage is deprecated. ''Tensors with shared storages are automatically tracked. Note ''that calls to `set_()` are not tracked')defmark_non_differentiable(self,*args:torch.Tensor):r"""Marks outputs as non-differentiable. **This should be called at most once, only from inside the** :func:`forward` **method, and all arguments should be tensor outputs.** This will mark outputs as not requiring gradients, increasing the efficiency of backward computation. You still need to accept a gradient for each output in :meth:`~Function.backward`, but it's always going to be a zero tensor with the same shape as the shape of a corresponding output. This is used e.g. for indices returned from a sort. See example:: >>> class Func(Function): >>> @staticmethod >>> def forward(ctx, x): >>> sorted, idx = x.sort() >>> ctx.mark_non_differentiable(idx) >>> ctx.save_for_backward(x, idx) >>> return sorted, idx >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, g1, g2): # still need to accept g2 >>> x, idx = ctx.saved_tensors >>> grad_input = torch.zeros_like(x) >>> grad_input.index_add_(0, idx, g1) >>> return grad_input """self.non_differentiable=argsdefset_materialize_grads(self,value:bool):r"""Sets whether to materialize output grad tensors. Default is ``True``. **This should be called only from inside the** :func:`forward` **method** If ``True``, undefined output grad tensors will be expanded to tensors full of zeros prior to calling the :func:`backward` method. Example:: >>> class SimpleFunc(Function): >>> @staticmethod >>> def forward(ctx, x): >>> return x.clone(), x.clone() >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, g1, g2): >>> return g1 + g2 # No check for None necessary >>> >>> # We modify SimpleFunc to handle non-materialized grad outputs >>> class Func(Function): >>> @staticmethod >>> def forward(ctx, x): >>> ctx.set_materialize_grads(False) >>> ctx.save_for_backward(x) >>> return x.clone(), x.clone() >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, g1, g2): >>> x, = ctx.saved_tensors >>> grad_input = torch.zeros_like(x) >>> if g1 is not None: # We must check for None now >>> grad_input += g1 >>> if g2 is not None: >>> grad_input += g2 >>> return grad_input >>> >>> a = torch.tensor(1., requires_grad=True) >>> b, _ = Func.apply(a) # induces g2 to be undefined """self.materialize_grads=value# DO NOT USE: This is only defined to be able to load old serialized models_ContextMethodMixin=FunctionCtxclass_HookMixin(object):@staticmethoddef_register_hook(backward_hooks,hook):ifbackward_hooksisNone:backward_hooks=OrderedDict()handle=hooks.RemovableHandle(backward_hooks)backward_hooks[handle.id]=hookreturnbackward_hooks,handleclassBackwardCFunction(_C._FunctionBase,FunctionCtx,_HookMixin):defapply(self,*args):# _forward_cls is defined by derived class# The user should define either backward or vjp but never both.backward_fn=self._forward_cls.backward# type: ignore[attr-defined]vjp_fn=self._forward_cls.vjp# type: ignore[attr-defined]ifbackward_fnisnotFunction.backwardandvjp_fnisnotFunction.vjp:raiseRuntimeError("Implementing both 'backward' and 'vjp' for a custom ""Function is not allowed. You should only implement one ""of them.")user_fn=vjp_fnifvjp_fnisnotFunction.vjpelsebackward_fnreturnuser_fn(self,*args)defapply_jvp(self,*args):# _forward_cls is defined by derived classreturnself._forward_cls.jvp(self,*args)# type: ignore[attr-defined]classFunctionMeta(type):"""Function metaclass. This metaclass sets up the following properties: _backward_cls: The Function class corresponding to the differentiated version of this function (which is generated on the fly by this metaclass). """def__init__(cls,name,bases,attrs):backward_fn=type(name+'Backward',(BackwardCFunction,),{'_forward_cls':cls})cls._backward_cls=backward_fnsuper(FunctionMeta,cls).__init__(name,bases,attrs)# mypy doesn't understand `with_metaclass` from torch._six
[docs]classFunction(with_metaclass(FunctionMeta,_C._FunctionBase,FunctionCtx,_HookMixin)):# type: ignore[misc]r"""Base class to create custom `autograd.Function` To create a custom `autograd.Function`, subclass this class and implement the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom op in the forward pass, call the class method ``apply``. Do not call :meth:`forward` directly. To ensure correctness and best performance, make sure you are calling the correct methods on ``ctx`` and validating your backward function using :func:`torch.autograd.gradcheck`. See :ref:`extending-autograd` for more details on how to use this class. Examples:: >>> class Exp(Function): >>> @staticmethod >>> def forward(ctx, i): >>> result = i.exp() >>> ctx.save_for_backward(result) >>> return result >>> >>> @staticmethod >>> def backward(ctx, grad_output): >>> result, = ctx.saved_tensors >>> return grad_output * result >>> >>> # Use it by calling the apply method: >>> output = Exp.apply(input) """def__init__(self,*args,**kwargs):cls=self.__class__warnings.warn(f"{cls} should not be instantiated. Methods on autograd functions""are all static, so you should invoke them on the class itself. ""Instantiating an autograd function will raise an ""error in a future version of PyTorch.",DeprecationWarning)def__call__(self,*args,**kwargs):raiseRuntimeError("Legacy autograd function with non-static forward method is deprecated. ""Please use new-style autograd function with static forward method. ""(Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)")# for the traceris_traceable=False@staticmethoddefforward(ctx:Any,*args:Any,**kwargs:Any)->Any:r"""Performs the operation. This function is to be overridden by all subclasses. It must accept a context ctx as the first argument, followed by any number of arguments (tensors or other types). The context can be used to store arbitrary data that can be then retrieved during the backward pass. Tensors should not be stored directly on `ctx` (though this is not currently enforced for backward compatibility). Instead, tensors should be saved either with :func:`ctx.save_for_backward` if they are intended to be used in ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` if they are intended to be used for in ``jvp``. """raiseNotImplementedError("You must implement the forward function for custom"" autograd.Function.")@staticmethoddefbackward(ctx:Any,*grad_outputs:Any)->Any:r"""Defines a formula for differentiating the operation with backward mode automatic differentiation (alias to the vjp function). This function is to be overridden by all subclasses. It must accept a context :attr:`ctx` as the first argument, followed by as many outputs as the :func:`forward` returned (None will be passed in for non tensor outputs of the forward function), and it should return as many tensors, as there were inputs to :func:`forward`. Each argument is the gradient w.r.t the given output, and each returned value should be the gradient w.r.t. the corresponding input. If an input is not a Tensor or is a Tensor not requiring grads, you can just pass None as a gradient for that input. The context can be used to retrieve tensors saved during the forward pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple of booleans representing whether each input needs gradient. E.g., :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the first input to :func:`forward` needs gradient computated w.r.t. the output. """raiseNotImplementedError("You must implement either the backward or vjp method for ""your custom autograd.Function to use it with backward ""mode AD.")# vjp and backward are alias of each othervjp=backward@staticmethoddefjvp(ctx:Any,*grad_inputs:Any)->Any:r"""Defines a formula for differentiating the operation with forward mode automatic differentiation. This function is to be overridden by all subclasses. It must accept a context :attr:`ctx` as the first argument, followed by as many inputs as the :func:`forward` got (None will be passed in for non tensor inputs of the forward function), and it should return as many tensors as there were outputs to :func:`forward`. Each argument is the gradient w.r.t the given input, and each returned value should be the gradient w.r.t. the corresponding output. If an output is not a Tensor or the function is not differentiable with respect to that output, you can just pass None as a gradient for that input. You can use the :attr:`ctx` object to pass any value from the forward to this functions. """raiseNotImplementedError("You must implement the jvp function for custom ""autograd.Function to use it with forward mode AD.")
defonce_differentiable(fn):@functools.wraps(fn)defwrapper(ctx,*args):withtorch.no_grad():outputs=fn(ctx,*args)ifnottorch.is_grad_enabled():returnoutputs# If any of the inputs have requires_grad=True, we force the outputs# to have requires_grad=True but point to a grad_fn which throws an# error message during (double) back-propagation.# XXX: this is only an approximation of requires_grad - there's no way# to figure out if fn didn't use ctx.saved_tensors and as a result# some Tensors might require grad, even if no args do.# Unfortunately, this leads to unexpected error messages ("no nodes# require computing gradients"), but I don't have a better idea.# These functions would raise an error in backward anyway.requires_grad=any(isinstance(arg,torch.Tensor)andarg.requires_gradforarginargs)ifnotrequires_grad:returnoutputsifnotisinstance(outputs,tuple):outputs=(outputs,)err_fn=_functions.DelayedError(b"trying to differentiate twice a function that was marked "b"with @once_differentiable",len(outputs))# Create aliases of each output that has requires_grad=True. We need# at least one of the inputs to err_fn to require grad so that the# output will have a grad_fn.deffake_requires_grad(var):ifvarisnotNone:var=var.detach()var.requires_grad=Truereturnvarreturnerr_fn(*[fake_requires_grad(v)forvinoutputs])returnwrapperdeftraceable(fn_cls):r"""Marks Function as traceable for the JIT. Traceable functions have additional restrictions - they can't pass any data-dependent values to backward (e.g. Prod passes the output, which makes it non-traceable), and their backward should be implemented entirely in terms of operations on autograd Tensors in all cases. DON'T USE THIS DECORATOR. IT IS FOR INTERNAL USE ONLY AND SHOULD BE HANDLED WITH CARE (or can give incorrect results otherwise). """fn_cls.is_traceable=Truereturnfn_clsclassInplaceFunction(Function):def__init__(self,inplace=False):super(InplaceFunction,self).__init__()self.inplace=inplacedef_nested_map(condition,fn,condition_msg=None):def_map(obj):ifcondition(obj):returnfn(obj)elifobjisNone:returnNoneelifisinstance(obj,(list,tuple)):mapped=(_map(x)forxinobj)ifhasattr(obj,'_fields'):# obj is namedtuplereturntype(obj)(*mapped)returntype(obj)(mapped)elifisinstance(obj,dict):return{x:_map(obj[x])forxinobj}else:raiseValueError("Auto nesting doesn't know how to process ""an input object of type "+torch.typename(obj)+(". Accepted types: "+condition_msg+", or lists/tuples of them"ifcondition_msgelse""))return_mapdef_jit_unwrap_structured(obj):ifhasattr(obj,"_jit_unwrap"):returnobj._jit_unwrap()returnobjdef_iter_filter(condition,allow_unknown=False,condition_msg=None,conversion=None):def_iter(obj):ifconversionisnotNone:obj=conversion(obj)ifcondition(obj):yieldobjelifobjisNone:returnelifisinstance(obj,(list,tuple)):foroinobj:forvarin_iter(o):yieldvarelifisinstance(obj,dict):# We only accept primitive key types, so we needn't inspect themforoinobj.values():forvarin_iter(o):yieldvarelifallow_unknown:yieldobjelse:raiseValueError("Auto nesting doesn't know how to process ""an input object of type "+torch.typename(obj)+(". Accepted types: "+condition_msg+", or lists/tuples of them"ifcondition_msgelse""))return_iterdef_unflatten(input,proto):# unflatten a list or tuple input into a nested list/tuple structure# specified by protodefunflatten_helper(input,proto):res:List[Optional[torch.Tensor]]=[]ifhasattr(proto,"_jit_wrap"):returnproto._jit_wrap(input)ifnotisinstance(proto,(list,tuple)):returninput[0],input[1:]foreinproto:ifeisNone:res.append(e)else:res_e,input=unflatten_helper(input,e)res.append(res_e)returntype(proto)(res),inputreturnunflatten_helper(input,proto)[0]_iter_jit_values=_iter_filter(lambdao:oisNoneorisinstance(o,torch._C.Value),condition_msg="jit's Values or None")_iter_tensors=_iter_filter(lambdax:isinstance(x,torch.Tensor),condition_msg="Tensors",conversion=_jit_unwrap_structured)_iter_tensors_permissive=_iter_filter(lambdax:isinstance(x,torch.Tensor),allow_unknown=True,condition_msg="Tensors (permissive)")_iter_None_tensors=_iter_filter(lambdao:oisNoneorisinstance(o,torch.Tensor),condition_msg="Tensors or None")_map_tensor_data=_nested_map(lambdax:isinstance(x,torch.Tensor),lambdao:o.data,condition_msg="Tensors")classNestedIOFunction(Function):# The 'type: ignore' statements are needed here because these functions are declared as '@staticmethod' in the# superclass (Function) but are instance methods here, which mypy reports as incompatible.def_do_forward(self,*input):self._nested_input=inputflat_input=tuple(_iter_tensors(input))flat_output=super(NestedIOFunction,self)._do_forward(*flat_input)nested_output=self._nested_outputnested_tensors=_unflatten(flat_output,self._nested_output)returnnested_tensorsdef_do_backward(self,gradients,retain_variables):self.retain_variables=retain_variablesresult=super(NestedIOFunction,self)._do_backward(gradients,retain_variables)ifnotretain_variables:delself._nested_outputdelself._to_save_nestedreturnresultdefbackward(self,*gradients:Any)->Any:# type: ignore[override]nested_gradients=_unflatten(gradients,self._nested_output)result=self.backward_extended(*nested_gradients)# type: ignore[func-returns-value]returntuple(_iter_None_tensors(result))__call__=_do_forwarddefforward(self,*args:Any)->Any:# type: ignore[override]nested_tensors=_map_tensor_data(self._nested_input)result=self.forward_extended(*nested_tensors)# type: ignore[func-returns-value]delself._nested_inputself._nested_output=resultreturntuple(_iter_tensors(result))defsave_for_backward(self,*args:Any)->None:self.to_save=tuple(_iter_tensors(args))self._to_save_nested=args@propertydefsaved_tensors(self):flat_tensors=super(NestedIOFunction,self).saved_tensorsreturn_unflatten(flat_tensors,self._to_save_nested)defmark_dirty(self,*args:Any,**kwargs:Any)->None:self.dirty_tensors=tuple(_iter_tensors((args,kwargs)))defmark_non_differentiable(self,*args:Any,**kwargs:Any)->None:self.non_differentiable=tuple(_iter_tensors((args,kwargs)))defforward_extended(self,*input:Any)->None:raiseNotImplementedErrordefbackward_extended(self,*grad_output:Any)->None:raiseNotImplementedError
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.