importgcimporttorchfrom._utilsimport_dummy_typeifnothasattr(torch._C,'_CudaStreamBase'):# Define dummy base classestorch._C.__dict__['_CUDAGraph']=_dummy_type('_CUDAGraph')torch._C.__dict__['_graph_pool_handle']=_dummy_type('_graph_pool_handle')fromtorch._Cimport_CUDAGraph# noqa: F401fromtorch._Cimport_graph_pool_handle# Python shim helps Sphinx process docstrings more reliably.defgraph_pool_handle():r""" Returns an opaque token representing the id of a graph memory pool. See :ref:`Graph memory management<graph-memory-management>`. .. warning:: This API is in beta and may change in future releases. """return_graph_pool_handle()# Python shim helps Sphinx process docstrings more reliably.
[docs]classCUDAGraph(torch._C._CUDAGraph):r""" Wrapper around a CUDA graph. .. warning:: This API is in beta and may change in future releases. """def__new__(cls):returnsuper(CUDAGraph,cls).__new__(cls)def__init__(self):super(CUDAGraph,self).__init__()
[docs]defcapture_begin(self,pool=None):r""" Begins capturing CUDA work on the current stream. Typically, you shouldn't call ``capture_begin`` yourself. Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`, which call ``capture_begin`` internally. Arguments: pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or :meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) that hints this graph may share memory with the indicated pool. See :ref:`Graph memory management<graph-memory-management>`. """# I'm not sure if pybind11 converts a None arg to the default defined on the C++ side,# so I'm not taking any chances.ifpoolisNone:super(CUDAGraph,self).capture_begin()else:super(CUDAGraph,self).capture_begin(pool)
[docs]defcapture_end(self):r""" Ends CUDA graph capture on the current stream. After ``capture_end``, ``replay`` may be called on this instance. Typically, you shouldn't call ``capture_end`` yourself. Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`, which call ``capture_end`` internally. """super(CUDAGraph,self).capture_end()
[docs]defreplay(self):r""" Replays the CUDA work captured by this graph. """super(CUDAGraph,self).replay()
[docs]defreset(self):r""" Deletes the graph currently held by this instance. """super(CUDAGraph,self).reset()
[docs]defpool(self):r""" Returns an opaque token representing the id of this graph's memory pool. This id can optionally be passed to another graph's ``capture_begin``, which hints the other graph may share the same memory pool. """returnsuper(CUDAGraph,self).pool()
classgraph(object):r""" Context-manager that captures CUDA work into a :class:`torch.cuda.CUDAGraph` object for later replay. See :ref:`CUDA Graphs <cuda-graph-semantics>` for a general introduction, detailed use, and constraints. Arguments: cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture. pool (optional): Opaque token (returned by a call to :func:`~torch.cuda.graph_pool_handle()` or :meth:`other_Graph_instance.pool()<torch.cuda.CUDAGraph.pool>`) hinting this graph's capture may share memory from the specified pool. See :ref:`Graph memory management<graph-memory-management>`. stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context. If not supplied, ``graph`` sets its own internal side stream as the current stream in the context. .. note:: For effective memory sharing, if you pass a ``pool`` used by a previous capture and the previous capture used an explicit ``stream`` argument, you should pass the same ``stream`` argument to this capture. .. warning:: This API is in beta and may change in future releases. """default_capture_stream=Nonedef__init__(self,cuda_graph,pool=None,stream=None):# Lazy-init of default_capture_stream helps avoid circular-import errors.# Not thread safe, but graphs already have the general (explicitly documented)# restriction that only one capture may be underway at a time in the process.ifself.__class__.default_capture_streamisNone:self.__class__.default_capture_stream=torch.cuda.Stream()self.pool=()ifpoolisNoneelse(pool,)self.capture_stream=streamifstreamisnotNoneelseself.__class__.default_capture_streamassertself.capture_streamisnotNoneself.stream_ctx=torch.cuda.stream(self.capture_stream)self.cuda_graph=cuda_graphdef__enter__(self):# Free as much memory as we can for the graphtorch.cuda.synchronize()gc.collect()torch.cuda.empty_cache()# Stackoverflow seems comfortable with this pattern# https://stackoverflow.com/questions/26635684/calling-enter-and-exit-manually#39172487self.stream_ctx.__enter__()self.cuda_graph.capture_begin(*self.pool)def__exit__(self,exc_type,exc_value,traceback):self.cuda_graph.capture_end()self.stream_ctx.__exit__(exc_type,exc_value,traceback)# returning None should propagate exceptions from either capture_end or stream_ctx.__exit__()defmake_graphed_callables(callables,sample_args):r""" Accepts callables (functions or :class:`nn.Module<torch.nn.Module>`\ s) and returns graphed versions. Each graphed callable's forward pass runs its source callable's forward CUDA work as a CUDA graph inside a single autograd node. The graphed callable's forward pass also appends a backward node to the autograd graph. During backward, this node runs the callable's backward work as a CUDA graph. Therefore, each graphed callable should be a drop-in replacement for its source callable in an autograd-enabled training loop. See :ref:`Partial-network capture<partial-network-capture>` for detailed use and constraints. If you pass a tuple of several callables, their captures will use the same memory pool. See :ref:`Graph memory management<graph-memory-management>` for when this is appropriate. Arguments: callables (torch.nn.Module or Python function, or tuple of these): Callable or callables to graph. See :ref:`Graph memory management<graph-memory-management>` for when passing a tuple of callables is appropriate. If you pass a tuple of callables, their order in the tuple must be the same order they'll run in the live workload. sample_args (tuple of Tensors, or tuple of tuples of Tensors): Samples args for each callable. If a single callable was passed, ``sample_args`` must be a single tuple of argument Tensors. If a tuple of callables was passed, ``sample_args`` must be tuple of tuples of argument Tensors. .. note:: The ``requires_grad`` state of each Tensor in ``sample_args`` must match the state that's expected for the corresponding real input in the training loop. .. warning:: This API is in beta and may change in future releases. .. warning:: ``sample_args`` for each callable must be a tuple of Tensors. Other types and keyword args are not allowed. .. warning:: Returned callables do not support higher order differentiation (e.g., double backward). .. warning:: In any :class:`~torch.nn.Module` passed to :func:`~make_graphed_callables`, only parameters may be trainable. Buffers must have ``requires_grad=False``. .. warning:: After you pass a :class:`torch.nn.Module` through :func:`~make_graphed_callables`, you may not add or remove any of that Module's parameters or buffers. .. warning:: :class:`torch.nn.Module`\s passed to :func:`~torch.cuda.make_graphed_callables` must not have module hooks registered on them at the time they are passed. However, registering hooks on modules *after* passing them through :func:`~torch.cuda.make_graphed_callables` is allowed. .. warning:: When running a graphed callable, you must pass its arguments in the same order and format they appeared in that callable's ``sample_args``. .. warning:: All Tensor outputs of graphed callables must require grad. """just_one_callable=Falseifnotisinstance(callables,tuple):just_one_callable=Truecallables=(callables,)sample_args=(sample_args,)forc,argsinzip(callables,sample_args):ifisinstance(c,torch.nn.Module):assertlen(c._backward_hooks)==0andlen(c._forward_hooks)==0andlen(c._forward_pre_hooks)==0, \
"Modules must not have hooks registered at the time they are passed. However, registering hooks "+ \
"on modules after passing them through make_graphed_callables is allowed."assertall(b.requires_gradisFalseforbinc.buffers()),"In any :class:`~torch.nn.Module` passed to "+ \
":func:`~make_graphed_callables`, only parameters may be trainable. All buffers must have "+ \
"``requires_grad=False``."assertall(isinstance(arg,torch.Tensor)forarginargs),"In the beta API, sample_args "+ \
"for each callable must be a tuple of Tensors. Other types and keyword args are not allowed."# If a callable is an nn.Module, its graph's full input surface is the args the user explicitly# passes to forward (ie, its sample_args) AND the module's parameter attributes.per_callable_len_user_args=[len(args)forargsinsample_args]per_callable_module_params=[tuple(c.parameters())ifisinstance(c,torch.nn.Module)else()forcincallables]per_callable_static_input_surfaces=[sample_args[i]+per_callable_module_params[i]foriinrange(len(callables))]fwd_graphs=[torch.cuda.CUDAGraph()for_inrange(len(callables))]bwd_graphs=[torch.cuda.CUDAGraph()for_inrange(len(callables))]mempool=graph_pool_handle()# Warmup# Hopefully prevents cudnn benchmarking and other lazy-initialization cuda work# from ending up in any captures.torch.cuda.synchronize()withtorch.cuda.stream(torch.cuda.Stream()):forfunc,args,static_input_surfaceinzip(callables,sample_args,per_callable_static_input_surfaces):for_inrange(3):outputs=func(*args)outputs=(outputs,)ifisinstance(outputs,torch.Tensor)elseoutputsgrad_inputs=torch.autograd.grad(outputs=outputs,inputs=tuple(iforiinstatic_input_surfaceifi.requires_grad),grad_outputs=tuple(torch.empty_like(o)foroinoutputs),only_inputs=True,allow_unused=False)deloutputs,grad_inputstorch.cuda.synchronize()# All captures here share a mempool. To avoid replays corrupting each other's memory,# the safest approach is to capture all passes in the same order they'll run:# fwd 1, fwd 2, ... fwd N, then bwd N, bwd N-1, ... bwd 1.# Capture forward graphsper_callable_static_outputs=[]per_callable_output_was_tensor=[]forfunc,args,fwd_graphinzip(callables,sample_args,fwd_graphs):withtorch.cuda.graph(fwd_graph,pool=mempool):outputs=func(*args)# Assumes model output is a tensor or tuple of tensorsifisinstance(outputs,torch.Tensor):per_callable_output_was_tensor.append(True)outputs=(outputs,)else:per_callable_output_was_tensor.append(False)per_callable_static_outputs.append(outputs)# Capture backward graphs in reverse orderper_callable_static_grad_outputs=[]per_callable_static_grad_inputs=[]forstatic_input_surface,static_outputs,bwd_graph,module_paramsin \
zip(reversed(per_callable_static_input_surfaces),reversed(per_callable_static_outputs),reversed(bwd_graphs),reversed(per_callable_module_params)):# For now, assumes all static_outputs require gradassertall(o.requires_gradforoinstatic_outputs),"Outputs of graphed callables must require grad."static_grad_outputs=tuple(torch.empty_like(o)foroinstatic_outputs)withtorch.cuda.graph(bwd_graph,pool=mempool):grad_inputs=torch.autograd.grad(outputs=static_outputs,inputs=tuple(iforiinstatic_input_surfaceifi.requires_grad),grad_outputs=static_grad_outputs,only_inputs=True,allow_unused=False)# Constructs a tuple suitable for returning from Graphed.backward:# Pads out the actually-needed grads with Nones in gradient slots for inputs that don't require grad.# I couldn't think of a slick one-liner for this pattern.static_grad_inputs=[]grad_idx=0forarginstatic_input_surface:ifarg.requires_grad:static_grad_inputs.append(grad_inputs[grad_idx])grad_idx+=1else:static_grad_inputs.append(None)# type: ignore[arg-type]static_grad_inputs=tuple(static_grad_inputs)# type: ignore[assignment]per_callable_static_grad_outputs.append(static_grad_outputs)per_callable_static_grad_inputs.append(static_grad_inputs)# Reverses the most recent two listsper_callable_static_grad_outputs=list(reversed(per_callable_static_grad_outputs))per_callable_static_grad_inputs=list(reversed(per_callable_static_grad_inputs))# Now for every per_callable list, per_callable_*[i] holds the stuff for the ith callable.defmake_graphed_autograd_function(fwd_graph,bwd_graph,module_params,len_user_args,output_was_tensor,static_input_surface,static_outputs,static_grad_outputs,static_grad_inputs):classGraphed(torch.autograd.Function):@staticmethoddefforward(ctx,*inputs):# At this stage, only the user args may (potentially) be new tensors.foriinrange(len_user_args):ifstatic_input_surface[i].data_ptr()!=inputs[i].data_ptr():static_input_surface[i].copy_(inputs[i])fwd_graph.replay()assertisinstance(static_outputs,tuple)returntuple(o.detach()foroinstatic_outputs)@staticmethod@torch.autograd.function.once_differentiabledefbackward(ctx,*grads):forg,gradinzip(static_grad_outputs,grads):ifgisNone:assertgradisNoneelse:# don't copy if autograd gods have been kind and the# incoming grad is already in the right placeifg.data_ptr()!=grad.data_ptr():g.copy_(grad)bwd_graph.replay()# Input args that didn't require grad expect a None gradient.assertisinstance(static_grad_inputs,tuple)returntuple(b.detach()ifbisnotNoneelsebforbinstatic_grad_inputs)deffunctionalized(*user_args):# Runs the autograd function with inputs == all inputs to the graph that might require grad# (explicit user args + module parameters)# Assumes module params didn't change since capture.out=Graphed.apply(*(user_args+module_params))returnout[0]ifoutput_was_tensorelseoutreturnfunctionalized# Put together the final graphed callablesret=[]fori,funcinenumerate(callables):graphed=make_graphed_autograd_function(fwd_graphs[i],bwd_graphs[i],per_callable_module_params[i],per_callable_len_user_args[i],per_callable_output_was_tensor[i],per_callable_static_input_surfaces[i],per_callable_static_outputs[i],per_callable_static_grad_outputs[i],per_callable_static_grad_inputs[i])ifisinstance(func,torch.nn.Module):defmake_graphed_forward(func,graph_training_state,graphed,orig_fwd):defnew_fwd(*user_args):# If the module's training-or-eval state matches what we graphed,# run the graph, otherwise run the original forward methodiffunc.training==graph_training_state:returngraphed(*user_args)else:returnorig_fwd(*user_args)returnnew_fwdfunc.forward=make_graphed_forward(func,func.training,graphed,func.forward)# type: ignore[assignment]ret.append(func)else:ret.append(graphed)ifjust_one_callable:returnret[0]returntuple(ret)
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.