python.closure
==================
cond_closed_over_variable
^^^^^^^^^^^^^^^^^^^^^^^^^

.. note::

    Tags: :doc:`torch.cond <torch.cond>`, :doc:`python.closure <python.closure>`

    Support Level: SUPPORTED

Original source code:

.. code-block:: python

    # mypy: allow-untyped-defs
    import torch
    
    from functorch.experimental.control_flow import cond
    
    class CondClosedOverVariable(torch.nn.Module):
        """
        torch.cond() supports branches closed over arbitrary variables.
        """
    
        def forward(self, pred, x):
            def true_fn(val):
                return x * 2
    
            def false_fn(val):
                return x - 2
    
            return cond(pred, true_fn, false_fn, [x + 1])
    
    example_args = (torch.tensor(True), torch.randn(3, 2))
    tags = {"torch.cond", "python.closure"}
    model = CondClosedOverVariable()
    

    torch.export.export(model, example_args)

Result:

.. code-block::

    ExportedProgram:
        class GraphModule(torch.nn.Module):
            def forward(self, pred: "b8[]", x: "f32[3, 2]"):
                     add: "f32[3, 2]" = torch.ops.aten.add.Tensor(x, 1);  add = None
                
                     true_graph_0 = self.true_graph_0
                false_graph_0 = self.false_graph_0
                cond = torch.ops.higher_order.cond(pred, true_graph_0, false_graph_0, [x]);  pred = true_graph_0 = false_graph_0 = x = None
                getitem: "f32[3, 2]" = cond[0];  cond = None
                return (getitem,)
                
            class true_graph_0(torch.nn.Module):
                def forward(self, x: "f32[3, 2]"):
                             mul: "f32[3, 2]" = torch.ops.aten.mul.Tensor(x, 2);  x = None
                    return (mul,)
                    
            class false_graph_0(torch.nn.Module):
                def forward(self, x: "f32[3, 2]"):
                             sub: "f32[3, 2]" = torch.ops.aten.sub.Tensor(x, 2);  x = None
                    return (sub,)
                    
    Graph signature: ExportGraphSignature(input_specs=[InputSpec(kind=<InputKind.USER_INPUT: 1>, arg=TensorArgument(name='pred'), target=None, persistent=None), InputSpec(kind=<InputKind.USER_INPUT: 1>, arg=TensorArgument(name='x'), target=None, persistent=None)], output_specs=[OutputSpec(kind=<OutputKind.USER_OUTPUT: 1>, arg=TensorArgument(name='getitem'), target=None)])
    Range constraints: {}
    


nested_function
^^^^^^^^^^^^^^^

.. note::

    Tags: :doc:`python.closure <python.closure>`

    Support Level: SUPPORTED

Original source code:

.. code-block:: python

    # mypy: allow-untyped-defs
    import torch
    
    class NestedFunction(torch.nn.Module):
        """
        Nested functions are traced through. Side effects on global captures
        are not supported though.
        """
    
        def forward(self, a, b):
            x = a + b
            z = a - b
    
            def closure(y):
                nonlocal x
                x += 1
                return x * y + z
    
            return closure(x)
    
    example_args = (torch.randn(3, 2), torch.randn(2))
    tags = {"python.closure"}
    model = NestedFunction()
    

    torch.export.export(model, example_args)

Result:

.. code-block::

    ExportedProgram:
        class GraphModule(torch.nn.Module):
            def forward(self, a: "f32[3, 2]", b: "f32[2]"):
                     add: "f32[3, 2]" = torch.ops.aten.add.Tensor(a, b)
                
                     sub: "f32[3, 2]" = torch.ops.aten.sub.Tensor(a, b);  a = b = None
                
                     add_: "f32[3, 2]" = torch.ops.aten.add_.Tensor(add, 1);  add = None
                
                     mul: "f32[3, 2]" = torch.ops.aten.mul.Tensor(add_, add_);  add_ = None
                add_1: "f32[3, 2]" = torch.ops.aten.add.Tensor(mul, sub);  mul = sub = None
                return (add_1,)
                
    Graph signature: ExportGraphSignature(input_specs=[InputSpec(kind=<InputKind.USER_INPUT: 1>, arg=TensorArgument(name='a'), target=None, persistent=None), InputSpec(kind=<InputKind.USER_INPUT: 1>, arg=TensorArgument(name='b'), target=None, persistent=None)], output_specs=[OutputSpec(kind=<OutputKind.USER_OUTPUT: 1>, arg=TensorArgument(name='add_1'), target=None)])
    Range constraints: {}