[docs]defset_module(obj,mod):""" Set the module attribute on a python object for a given object for nicer printing """ifnotisinstance(mod,str):raiseTypeError("The mod argument should be a string")obj.__module__=mod
iftorch._running_with_deploy():# not valid inside torch_deploy interpreter, no paths exists for frozen modulescmake_prefix_path=Noneelse:cmake_prefix_path=_osp.join(_osp.dirname(_osp.dirname(__file__)),"share","cmake")
[docs]defswap_tensors(t1,t2):""" This function swaps the content of the two Tensor objects. At a high level, this will make t1 have the content of t2 while preserving its identity. This will not work if t1 and t2 have different slots. """# Ensure there are no weakrefsifweakref.getweakrefs(t1):raiseRuntimeError("Cannot swap t1 because it has weakref associated with it")ifweakref.getweakrefs(t2):raiseRuntimeError("Cannot swap t2 because it has weakref associated with it")t1_slots=set(copyreg._slotnames(t1.__class__))# type: ignore[attr-defined]t2_slots=set(copyreg._slotnames(t2.__class__))# type: ignore[attr-defined]ift1_slots!=t2_slots:raiseRuntimeError("Cannot swap t1 and t2 if they have different slots")defswap_attr(name):tmp=getattr(t1,name)setattr(t1,name,(getattr(t2,name)))setattr(t2,name,tmp)deferror_pre_hook(grad_outputs):raiseRuntimeError("Trying to execute AccumulateGrad node that was poisoned by swap_tensors ""this can happen when you try to run backward on a tensor that was swapped. ""For a module m with `torch.__future__.set_swap_module_params_on_conversion(True)` ""you should not change the device or dtype of the module (e.g. `m.cpu()` or `m.half()`) ""between running forward and backward. To resolve this, please only change the ""device/dtype before running forward (or after both forward and backward).")defcheck_use_count(t,name="t1"):use_count=t._use_count()error_str=(f"Expected use_count of {name} to be 1 or 2 with an AccumulateGrad node but got {use_count} "f"make sure you are not holding references to the tensor in other places.")ifuse_count>1:ifuse_count==2andt.is_leaf:accum_grad_node=torch.autograd.graph.get_gradient_edge(t).node# Make sure that the accumulate_grad node was not lazy_init-ed by get_gradient_edgeift._use_count()==2:accum_grad_node.register_prehook(error_pre_hook)else:raiseRuntimeError(error_str)else:raiseRuntimeError(error_str)check_use_count(t1,"t1")check_use_count(t2,"t2")# Swap the types# Note that this will fail if there are mismatched slotsswap_attr("__class__")# Swap the dynamic attributesswap_attr("__dict__")# Swap the slotsforslotint1_slots:ifhasattr(t1,slot)andhasattr(t2,slot):swap_attr(slot)elifhasattr(t1,slot):setattr(t2,slot,(getattr(t1,slot)))delattr(t1,slot)elifhasattr(t2,slot):setattr(t1,slot,(getattr(t2,slot)))delattr(t2,slot)# Swap the at::Tensor they point totorch._C._swap_tensor_impl(t1,t2)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.