"""Functions to export models into the ONNX IR format.These models can be loaded with the ONNX library and thenconverted to models which run on other deep learning frameworks."""from__future__importannotationsimportcontextlibimportcopyimportinspectimportioimportosimportreimporttextwrapimporttypingimportwarningsimportzipfilefromtypingimport(Any,Callable,cast,Collection,Dict,List,Mapping,Optional,Sequence,Set,Tuple,Type,Union,)importtorchimporttorch._C._onnxas_C_onnximporttorch.jit._traceimporttorch.serializationfromtorchimport_Cfromtorch.onnximport(# noqa: F401_constants,_deprecation,_exporter_states,_patch_torch,errors,symbolic_caffe2,symbolic_helper,)fromtorch.onnx._globalsimportGLOBALSfromtorch.onnx._internalimport_beartype,diagnostics,jit_utils,registration__all__=["is_in_onnx_export","select_model_mode_for_export","disable_apex_o2_state_dict_hook","setup_onnx_logging","exporter_context","export","warn_on_static_input_change","unpack_quantized_tensor","export_to_pretty_string","unconvertible_ops","register_custom_op_symbolic","unregister_custom_op_symbolic",]
[docs]defis_in_onnx_export()->bool:"""Returns whether it is in the middle of ONNX export."""returnGLOBALS.in_onnx_export
# TODO(justinchuby): Remove dependency to this global variable from constant_fold.cpp# Skip check due to cannot import IValue from torch._C_params_dict={}# type: ignore[var-annotated]
[docs]@contextlib.contextmanager@_beartype.beartypedefselect_model_mode_for_export(model,mode:_C_onnx.TrainingMode):r"""A context manager to temporarily set the training mode of ``model`` to ``mode``, resetting it when we exit the with-block. Args: model: Same type and meaning as ``model`` arg to :func:`export`. mode: Same type and meaning as ``training`` arg to :func:`export`. """ifnotisinstance(mode,_C_onnx.TrainingMode):raiseTypeError(f"'mode' should be a torch.onnx.TrainingMode enum, but got '{type(mode)}'.")originally_training:bool=Falseifhasattr(model,"training"):originally_training=model.training# ONNX opset 12 has better support for training amenable models, with updated# versions of the dropout and batch_norm operatorsifmode==_C_onnx.TrainingMode.TRAININGor(mode==_C_onnx.TrainingMode.PRESERVEandoriginally_training):GLOBALS.export_training=TrueifGLOBALS.export_onnx_opset_version<12:warnings.warn("You are exporting the model in training mode with onnx opset "f"version {GLOBALS.export_onnx_opset_version}. ""Opset versions lower than opset 12 will not be able to export ""nodes such as Dropout and BatchNorm correctly.")else:GLOBALS.export_training=FalseGLOBALS.training_mode=modeifmode==_C_onnx.TrainingMode.TRAINING:model.train(True)elifmode==_C_onnx.TrainingMode.EVAL:model.train(False)# else mode == _C_onnx.TrainingMode.PRESERVE, do nothingtry:yieldfinally:ifhasattr(model,"training")andnotmode==_C_onnx.TrainingMode.PRESERVE:model.train(originally_training)
@contextlib.contextmanager@_beartype.beartypedefdisable_apex_o2_state_dict_hook(model:Union[torch.nn.Module,torch.jit.ScriptFunction]):# Apex O2 hook state_dict to return fp16 weights as fp32.# Exporter cannot identify them as same tensors.# Since this hook is only used by optimizer, it is safe to# remove this hook while exporting.ifnotisinstance(model,torch.jit.ScriptFunction):model_hooks={}# type: ignore[var-annotated]formoduleinmodel.modules():forkey,hookinmodule._state_dict_hooks.items():iftype(hook).__name__=="O2StateDictHook":ifmodulenotinmodel_hooks:model_hooks[module]={}model_hooks[module][key]=hookifmoduleinmodel_hooks:forkeyinmodel_hooks[module]:module._state_dict_hooks.pop(key)try:yieldfinally:# Add the hooks backformodule,m_mapinmodel_hooks.items():forkey,hookinm_map.items():module._state_dict_hooks[key]=hookelse:try:yieldfinally:pass@contextlib.contextmanager@_beartype.beartypedefsetup_onnx_logging(verbose:bool):is_originally_enabled=torch.onnx.is_onnx_log_enabled()ifis_originally_enabledorverbose:torch.onnx.enable_log()try:yieldfinally:ifnotis_originally_enabled:torch.onnx.disable_log()@contextlib.contextmanager@_beartype.beartypedefexporter_context(model,mode:_C_onnx.TrainingMode,verbose:bool):withselect_model_mode_for_export(model,mode)asmode_ctx,disable_apex_o2_state_dict_hook(model)asapex_ctx,setup_onnx_logging(verbose)aslog_ctx,diagnostics.create_export_diagnostic_context()asdiagnostic_ctx:yield(mode_ctx,apex_ctx,log_ctx,diagnostic_ctx)
[docs]@_beartype.beartypedefexport(model:Union[torch.nn.Module,torch.jit.ScriptModule,torch.jit.ScriptFunction],args:Union[Tuple[Any,...],torch.Tensor],f:Union[str,io.BytesIO],export_params:bool=True,verbose:bool=False,training:_C_onnx.TrainingMode=_C_onnx.TrainingMode.EVAL,input_names:Optional[Sequence[str]]=None,output_names:Optional[Sequence[str]]=None,operator_export_type:_C_onnx.OperatorExportTypes=_C_onnx.OperatorExportTypes.ONNX,opset_version:Optional[int]=None,do_constant_folding:bool=True,dynamic_axes:Optional[Union[Mapping[str,Mapping[int,str]],Mapping[str,Sequence[int]]]]=None,keep_initializers_as_inputs:Optional[bool]=None,custom_opsets:Optional[Mapping[str,int]]=None,export_modules_as_functions:Union[bool,Collection[Type[torch.nn.Module]]]=False,)->None:r"""Exports a model into ONNX format. If ``model`` is not a :class:`torch.jit.ScriptModule` nor a :class:`torch.jit.ScriptFunction`, this runs ``model`` once in order to convert it to a TorchScript graph to be exported (the equivalent of :func:`torch.jit.trace`). Thus this has the same limited support for dynamic control flow as :func:`torch.jit.trace`. Args: model (:class:`torch.nn.Module`, :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`): the model to be exported. args (tuple or torch.Tensor): args can be structured either as: 1. ONLY A TUPLE OF ARGUMENTS:: args = (x, y, z) The tuple should contain model inputs such that ``model(*args)`` is a valid invocation of the model. Any non-Tensor arguments will be hard-coded into the exported model; any Tensor arguments will become inputs of the exported model, in the order they occur in the tuple. 2. A TENSOR:: args = torch.Tensor([1]) This is equivalent to a 1-ary tuple of that Tensor. 3. A TUPLE OF ARGUMENTS ENDING WITH A DICTIONARY OF NAMED ARGUMENTS:: args = ( x, { "y": input_y, "z": input_z } ) All but the last element of the tuple will be passed as non-keyword arguments, and named arguments will be set from the last element. If a named argument is not present in the dictionary, it is assigned the default value, or None if a default value is not provided. .. note:: If a dictionary is the last element of the args tuple, it will be interpreted as containing named arguments. In order to pass a dict as the last non-keyword arg, provide an empty dict as the last element of the args tuple. For example, instead of:: torch.onnx.export( model, ( x, # WRONG: will be interpreted as named arguments {y: z} ), "test.onnx.pb" ) Write:: torch.onnx.export( model, ( x, {y: z}, {} ), "test.onnx.pb" ) f: a file-like object (such that ``f.fileno()`` returns a file descriptor) or a string containing a file name. A binary protocol buffer will be written to this file. export_params (bool, default True): if True, all parameters will be exported. Set this to False if you want to export an untrained model. In this case, the exported model will first take all of its parameters as arguments, with the ordering as specified by ``model.state_dict().values()`` verbose (bool, default False): if True, prints a description of the model being exported to stdout. In addition, the final ONNX graph will include the field ``doc_string``` from the exported model which mentions the source code locations for ``model``. If True, ONNX exporter logging will be turned on. training (enum, default TrainingMode.EVAL): * ``TrainingMode.EVAL``: export the model in inference mode. * ``TrainingMode.PRESERVE``: export the model in inference mode if model.training is False and in training mode if model.training is True. * ``TrainingMode.TRAINING``: export the model in training mode. Disables optimizations which might interfere with training. input_names (list of str, default empty list): names to assign to the input nodes of the graph, in order. output_names (list of str, default empty list): names to assign to the output nodes of the graph, in order. operator_export_type (enum, default OperatorExportTypes.ONNX): * ``OperatorExportTypes.ONNX``: Export all ops as regular ONNX ops (in the default opset domain). * ``OperatorExportTypes.ONNX_FALLTHROUGH``: Try to convert all ops to standard ONNX ops in the default opset domain. If unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting the op into a custom opset domain without conversion. Applies to `custom ops <https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html>`_ as well as ATen ops. For the exported model to be usable, the runtime must support these non-standard ops. * ``OperatorExportTypes.ONNX_ATEN``: All ATen ops (in the TorchScript namespace "aten") are exported as ATen ops (in opset domain "org.pytorch.aten"). `ATen <https://pytorch.org/cppdocs/#aten>`_ is PyTorch's built-in tensor library, so this instructs the runtime to use PyTorch's implementation of these ops. .. warning:: Models exported this way are probably runnable only by Caffe2. This may be useful if the numeric differences in implementations of operators are causing large differences in behavior between PyTorch and Caffe2 (which is more common on untrained models). * ``OperatorExportTypes.ONNX_ATEN_FALLBACK``: Try to export each ATen op (in the TorchScript namespace "aten") as a regular ONNX op. If we are unable to do so (e.g. because support has not been added to convert a particular torch op to ONNX), fall back to exporting an ATen op. See documentation on OperatorExportTypes.ONNX_ATEN for context. For example:: graph(%0 : Float): %3 : int = prim::Constant[value=0]() # conversion unsupported %4 : Float = aten::triu(%0, %3) # conversion supported %5 : Float = aten::mul(%4, %0) return (%5) Assuming ``aten::triu`` is not supported in ONNX, this will be exported as:: graph(%0 : Float): %1 : Long() = onnx::Constant[value={0}]() # not converted %2 : Float = aten::ATen[operator="triu"](%0, %1) # converted %3 : Float = onnx::Mul(%2, %0) return (%3) If PyTorch was built with Caffe2 (i.e. with ``BUILD_CAFFE2=1``), then Caffe2-specific behavior will be enabled, including special support for ops are produced by the modules described in `Quantization <https://pytorch.org/docs/stable/quantization.html>`_. .. warning:: Models exported this way are probably runnable only by Caffe2. opset_version (int, default 14): The version of the `default (ai.onnx) opset <https://github.com/onnx/onnx/blob/master/docs/Operators.md>`_ to target. Must be >= 7 and <= 16. do_constant_folding (bool, default True): Apply the constant-folding optimization. Constant-folding will replace some of the ops that have all constant inputs with pre-computed constant nodes. dynamic_axes (dict[string, dict[int, string]] or dict[string, list(int)], default empty dict): By default the exported model will have the shapes of all input and output tensors set to exactly match those given in ``args``. To specify axes of tensors as dynamic (i.e. known only at run-time), set ``dynamic_axes`` to a dict with schema: * KEY (str): an input or output name. Each name must also be provided in ``input_names`` or ``output_names``. * VALUE (dict or list): If a dict, keys are axis indices and values are axis names. If a list, each element is an axis index. For example:: class SumModule(torch.nn.Module): def forward(self, x): return torch.sum(x, dim=1) torch.onnx.export( SumModule(), (torch.ones(2, 2),), "onnx.pb", input_names=["x"], output_names=["sum"] ) Produces:: input { name: "x" ... shape { dim { dim_value: 2 # axis 0 } dim { dim_value: 2 # axis 1 ... output { name: "sum" ... shape { dim { dim_value: 2 # axis 0 ... While:: torch.onnx.export( SumModule(), (torch.ones(2, 2),), "onnx.pb", input_names=["x"], output_names=["sum"], dynamic_axes={ # dict value: manually named axes "x": {0: "my_custom_axis_name"}, # list value: automatic names "sum": [0], } ) Produces:: input { name: "x" ... shape { dim { dim_param: "my_custom_axis_name" # axis 0 } dim { dim_value: 2 # axis 1 ... output { name: "sum" ... shape { dim { dim_param: "sum_dynamic_axes_1" # axis 0 ... keep_initializers_as_inputs (bool, default None): If True, all the initializers (typically corresponding to parameters) in the exported graph will also be added as inputs to the graph. If False, then initializers are not added as inputs to the graph, and only the non-parameter inputs are added as inputs. This may allow for better optimizations (e.g. constant folding) by backends/runtimes. If ``opset_version < 9``, initializers MUST be part of graph inputs and this argument will be ignored and the behavior will be equivalent to setting this argument to True. If None, then the behavior is chosen automatically as follows: * If ``operator_export_type=OperatorExportTypes.ONNX``, the behavior is equivalent to setting this argument to False. * Else, the behavior is equivalent to setting this argument to True. custom_opsets (dict[str, int], default empty dict): A dict with schema: * KEY (str): opset domain name * VALUE (int): opset version If a custom opset is referenced by ``model`` but not mentioned in this dictionary, the opset version is set to 1. Only custom opset domain name and version should be indicated through this argument. export_modules_as_functions (bool or set of type of nn.Module, default False): Flag to enable exporting all ``nn.Module`` forward calls as local functions in ONNX. Or a set to indicate the particular types of modules to export as local functions in ONNX. This feature requires ``opset_version`` >= 15, otherwise the export will fail. This is because ``opset_version`` < 15 implies IR version < 8, which means no local function support. Module variables will be exported as function attributes. There are two categories of function attributes. 1. Annotated attributes: class variables that have type annotations via `PEP 526-style <https://www.python.org/dev/peps/pep-0526/#class-and-instance-variable-annotations>`_ will be exported as attributes. Annotated attributes are not used inside the subgraph of ONNX local function because they are not created by PyTorch JIT tracing, but they may be used by consumers to determine whether or not to replace the function with a particular fused kernel. 2. Inferred attributes: variables that are used by operators inside the module. Attribute names will have prefix "inferred::". This is to differentiate from predefined attributes retrieved from python module annotations. Inferred attributes are used inside the subgraph of ONNX local function. * ``False`` (default): export ``nn.Module`` forward calls as fine grained nodes. * ``True``: export all ``nn.Module`` forward calls as local function nodes. * Set of type of nn.Module: export ``nn.Module`` forward calls as local function nodes, only if the type of the ``nn.Module`` is found in the set. Raises: :class:`torch.onnx.errors.CheckerError`: If the ONNX checker detects an invalid ONNX graph. :class:`torch.onnx.errors.UnsupportedOperatorError`: If the ONNX graph cannot be exported because it uses an operator that is not supported by the exporter. :class:`torch.onnx.errors.OnnxExporterError`: Other errors that can occur during export. All errors are subclasses of :class:`errors.OnnxExporterError`. """_export(model,args,f,export_params,verbose,training,input_names,output_names,operator_export_type=operator_export_type,opset_version=opset_version,do_constant_folding=do_constant_folding,dynamic_axes=dynamic_axes,keep_initializers_as_inputs=keep_initializers_as_inputs,custom_opsets=custom_opsets,export_modules_as_functions=export_modules_as_functions,)
@_beartype.beartypedef_is_constant_tensor_list(node):ifnode.kind()!="prim::Constant":returnFalseoutput_type=node.output().type()ifoutput_type.isSubtypeOf(_C.ListType.ofTensors()):returnTrueifoutput_type.isSubtypeOf(_C.ListType(_C.OptionalType.ofTensor())):returnTrue# ONNX can't handle constants that are lists of tensors, which can# get generated in constant prop. So we split them back into prim::ListConstructs@_beartype.beartypedef_split_tensor_list_constants(g,block):fornodeinblock.nodes():forsubblockinnode.blocks():_split_tensor_list_constants(g,subblock)if_is_constant_tensor_list(node):inputs=[]forvalinnode.output().toIValue():input=g.insertConstant(val)input.node().moveBefore(node)input.node().copyMetadata(node)inputs.append(input)lc=(g.create("prim::ListConstruct",inputs).insertBefore(node).output().setType(_C.ListType.ofTensors()))lc.node().copyMetadata(node)node.output().replaceAllUsesWith(lc)@_beartype.beartypedef_optimize_graph(graph:_C.Graph,operator_export_type:_C_onnx.OperatorExportTypes,_disable_torch_constant_prop:bool=False,fixed_batch_size:bool=False,params_dict=None,dynamic_axes=None,input_names=None,module=None,):ifparams_dictisNone:params_dict={}# Inline everything_C._jit_pass_inline(graph)# Remove fork/wait nodes_C._jit_pass_inline_fork_wait(graph)_C._jit_pass_lint(graph)_C._jit_pass_onnx_autograd_function_process(graph)_C._jit_pass_lower_all_tuples(graph)# we now record some ops like ones/zeros# into a trace where we previously recorded constants.# use constant prop to maintain our current level of onnx support# without implementing symbolics for all of themif_disable_torch_constant_propisFalse:_C._jit_pass_constant_propagation(graph)_split_tensor_list_constants(graph,graph)# run dce to eliminate dead parts of the graph that might have been# left behind by things like symbolic_override_C._jit_pass_dce(graph)_C._jit_pass_lint(graph)# CSE should improve perf when Autocast is used with disabled cache# Autocast is disabled due to a limitation on tracer as described at https://github.com/pytorch/pytorch/issues/84092# Must run before _C._jit_pass_erase_number_types to prevent type substitutionif_C._jit_pass_cse(graph):_C._jit_pass_onnx_lint(graph)_C._jit_pass_canonicalize_graph_fuser_ops(graph)_C._jit_pass_lint(graph)_C._jit_pass_peephole(graph,True)_C._jit_pass_fuse_addmm(graph)_C._jit_pass_lint(graph)_C._jit_pass_peephole(graph,True)_C._jit_pass_lower_all_tuples(graph)# in _jit_pass_onnx, symbolic functions are called for each node for conversion.# However, there are nodes that cannot be converted without additional context.# For example, the number of outputs from split (and whether it is static or dynamic) is unknown# until the point where it is unpacked by listUnpack node.# This pass does a preprocess, and prepares the nodes such that enough context can be received# by the symbolic function._C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph,module)_C._jit_pass_onnx_preprocess(graph)# onnx does not support tuples, so try to remove them_C._jit_pass_lint(graph)# onnx only supports tensors, but 1 / 2 = 0.5 and tensor(1) / tensor(2) = 0_C._jit_pass_prepare_division_for_onnx(graph)_C._jit_pass_onnx_remove_print(graph)_C._jit_pass_onnx_preprocess_caffe2(graph)symbolic_helper._quantized_ops.clear()# Unpack quantized weights for conv and linear ops and insert into graph._C._jit_pass_onnx_unpack_quantized_weights(graph,params_dict,symbolic_helper.is_caffe2_aten_fallback())ifsymbolic_helper.is_caffe2_aten_fallback():# Insert permutes before and after each conv op to ensure correct order._C._jit_pass_onnx_quantization_insert_permutes(graph,params_dict)# Find consecutive permutes that are no-ops and remove them._C._jit_pass_custom_pattern_based_rewrite_graph(textwrap.dedent("""\ graph(%Pi): %Pq = quantized::nhwc2nchw(%Pi) %Pr = quantized::nchw2nhwc(%Pq) return (%Pr)"""),textwrap.dedent("""\ graph(%Ri): return (%Ri)"""),graph,)# onnx only supports tensors, so we turn all out number types into tensors_C._jit_pass_erase_number_types(graph)ifGLOBALS.onnx_shape_inference:input_names=[]ifinput_namesisNoneelseinput_namesdynamic_axes={}ifdynamic_axesisNoneelsedynamic_axes_C._jit_pass_onnx_set_dynamic_input_shape(graph,dynamic_axes,input_names)_C._jit_pass_onnx_lint(graph)graph=_C._jit_pass_onnx(graph,operator_export_type)_C._jit_pass_onnx_lint(graph)_C._jit_pass_lint(graph)_C._jit_pass_onnx_scalar_type_analysis(graph,True,GLOBALS.export_onnx_opset_version)_C._jit_pass_lint(graph)_C._jit_pass_onnx_peephole(graph,GLOBALS.export_onnx_opset_version,fixed_batch_size)_C._jit_pass_lint(graph)# graph is not a valid jit graph anymore because types have been replaced# (e.g. int with Tensor), so it now contains operators that don't actually# exist. We can't run normal dead code elimination because it'd fail trying# to look up if an operator has side effects, but we can run a dead code# elimination variant that doesn't need to look up if an op has side effects._C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)_C._jit_pass_lint(graph)graph=_C._jit_pass_canonicalize(graph)_C._jit_pass_lint(graph)ifGLOBALS.onnx_shape_inference:_C._jit_pass_onnx_graph_shape_type_inference(graph,params_dict,GLOBALS.export_onnx_opset_version)returngraph@_beartype.beartypedefwarn_on_static_input_change(input_states):"""Warns that changes to input dictionaries and strings won't take effect in the traced ONNX graph. We accept dictionaries and strings as ONNX inputs, but they should be only for configuration use. we detect here if these inputs are modified, and if so we warn the user that the changes won't take effect in the traced ONNX graph. """forinput,traced_inputinzip(input_states[0],input_states[1]):ifisinstance(input,dict):iflist(input.keys())!=list(traced_input.keys()):warning=("We detected that you are modifying a dictionary that is an input to your ""model. ""Note that dictionaries are allowed as inputs in ONNX but they should be ""handled with care. ""Usages of dictionaries is not recommended, and should not be used except ""for configuration use. ""Also note that the order and values of the keys must remain the same. ")warnings.warn(warning)elifisinstance(input,str):ifinput!=traced_input:warning=("The model seems to have string inputs/outputs. ""Note that strings will not appear as inputs/outputs of the ONNX graph. ")warnings.warn(warning)@_beartype.beartypedef_resolve_args_by_export_type(arg_name,arg_value,operator_export_type):"""Resolves the arguments that are ignored when export_type != operator_export_type.ONNX."""if(operator_export_typeisnotoperator_export_type.ONNXand_C_onnx._CAFFE2_ATEN_FALLBACK):ifarg_valueisTrue:warnings.warn(f"'{arg_name}' can be set to True only when 'operator_export_type' is ""`ONNX`. Since 'operator_export_type' is not set to 'ONNX', "f"'{arg_name}' argument will be ignored.")arg_value=Falsereturnarg_value@_beartype.beartypedef_decide_keep_init_as_input(keep_initializers_as_inputs:Optional[bool],operator_export_type:_C_onnx.OperatorExportTypes,opset_version:int,):"""Decides whether the initializers in the graph should be listed as ONNX graph inputs. This method encapsulates the logic to decide whether the initializers in the graph should be listed as ONNX graph inputs (i.e., whether to choose ONNX IR v3 or v4). If keep_initializers_as_inputs is not specified (None), then we decide whether to keep initializers as graph inputs (val_keep_init_as_ip) based on export type. If export type is ONNX, then do not keep initializers as input (val_keep_init_as_ip=False). For all other export types keep initializers as input (val_keep_init_as_ip=True). If keep_initializers_as_inputs is specified, then respect it. Unless opset version <= 8, in which case it must be ignored because for opset version <= 8, all initializers MUST be part of graph input (only ONNX IR v3 is allowed), i.e. val_keep_init_as_ip=True. Special handling is needed for opset version 8 or lower, because irrespective of user input for keep_initializers_as_inputs, the graph must follow ONNX IR v3 semantics, i.e. all initializers must be listed as ONNX graph input. """ifopset_version<9:ifkeep_initializers_as_inputsisFalse:warnings.warn("Setting 'keep_initializers_as_inputs=False' for opset version""8 or lower would lead to an invalid ONNX graph. Therefore, ""'keep_initializers_as_inputs=False' is ignored during export.""Exported model will have initializers as graph inputs (compliant "" to ONNX IR v3).")returnTrue# i.e. True == initializers are part of graph input (ONNX IR v3)val_keep_init_as_ip=(Trueifkeep_initializers_as_inputsisNoneelsekeep_initializers_as_inputs)if(keep_initializers_as_inputsisNoneandoperator_export_typeis_C_onnx.OperatorExportTypes.ONNX):val_keep_init_as_ip=Falsereturnval_keep_init_as_ip@_beartype.beartypedef_decide_add_node_names(add_node_names,operator_export_type):return_resolve_args_by_export_type("add_node_names",add_node_names,operator_export_type)@_beartype.beartypedef_decide_constant_folding(do_constant_folding,operator_export_type,training):do_constant_folding=_resolve_args_by_export_type("do_constant_folding",do_constant_folding,operator_export_type)ifdo_constant_foldingand(trainingisnotNoneandtrainingisnot_C_onnx.TrainingMode.EVAL):warnings.warn("It is recommended that constant folding be turned off ('do_constant_folding=False') ""when exporting the model in training-amenable mode, i.e. with 'training=TrainingMode.TRAIN' ""or 'training=TrainingMode.PRESERVE' (when model is in training mode). Otherwise, some ""learnable model parameters may not translate correctly in the exported ONNX model ""because constant folding mutates model parameters. Please consider ""turning off constant folding or setting the training=TrainingMode.EVAL.")returndo_constant_folding@_beartype.beartypedef_signature(model)->inspect.Signature:should_be_callable=getattr(model,"forward",model)ifcallable(should_be_callable):returninspect.signature(should_be_callable)raiseValueError("model has no forward method and is not callable")@_beartype.beartypedef_decide_input_format(model,args):try:sig=_signature(model)exceptValueErrorase:warnings.warn(f"{e}, skipping _decide_input_format")returnargstry:ordered_list_keys=list(sig.parameters.keys())ifordered_list_keys[0]=="self":ordered_list_keys=ordered_list_keys[1:]args_dict:Dict={}ifisinstance(args,list):args_list=argselifisinstance(args,tuple):args_list=list(args)else:args_list=[args]ifisinstance(args_list[-1],dict):args_dict=args_list[-1]args_list=args_list[:-1]n_nonkeyword=len(args_list)foroptional_arginordered_list_keys[n_nonkeyword:]:ifoptional_arginargs_dict:args_list.append(args_dict[optional_arg])# Check if this arg has a default valueelse:param=sig.parameters[optional_arg]ifparam.default!=param.empty:args_list.append(param.default)args=args_listifisinstance(args,list)elsetuple(args_list)# Cases of models with no input argsexceptIndexError:warnings.warn("No input args, skipping _decide_input_format")exceptExceptionase:warnings.warn(f"Skipping _decide_input_format\n{e.args[0]}")returnargs@_beartype.beartypedef_trace(func,args,operator_export_type,return_outs=False):# Special case for common case of passing a single Tensorifisinstance(args,torch.Tensor):args=(args,)trace_graph,torch_out,inputs_states=torch.jit._get_trace_graph(func,args,strict=False,_force_outplace=False,_return_inputs_states=True,)warn_on_static_input_change(inputs_states)trace_graph=_optimize_graph(trace_graph,operator_export_type,params_dict={})ifreturn_outs:returntrace_graph,torch_outreturntrace_graph@_beartype.beartypedef_trace_and_get_graph_from_model(model,args):# A basic sanity check: make sure the state_dict keys are the same# before and after running the model. Fail fast!orig_state_dict_keys=torch.jit._unique_state_dict(model).keys()# Disable Autocast cache because it replaces kernel's weight and bias# by (undesired) constants.# No perf impact for when there are reused weights since https://github.com/pytorch/pytorch/pull/85665# TODO: https://github.com/pytorch/pytorch/issues/84092prev_autocast_cache_enabled=torch.is_autocast_cache_enabled()torch.set_autocast_cache_enabled(False)trace_graph,torch_out,inputs_states=torch.jit._get_trace_graph(model,args,strict=False,_force_outplace=False,_return_inputs_states=True,)torch.set_autocast_cache_enabled(prev_autocast_cache_enabled)warn_on_static_input_change(inputs_states)iforig_state_dict_keys!=torch.jit._unique_state_dict(model).keys():raiseRuntimeError("state_dict changed after running the tracer; ""something weird is happening in your model!")returntrace_graph,torch_out@_beartype.beartypedef_get_param_count_list(method_graph,args_params):param_count_list=[]forinput_,arg_params_inzip(method_graph.inputs(),args_params):if"PackedParams"instr(input_.type()):in_vars,_=torch.jit._flatten(arg_params_)param_count_list.append(len(in_vars))else:param_count_list.append(arg_params_isnotNone)returnparam_count_list@_beartype.beartypedef_check_flatten_did_not_remove(original,jit_flattened):"""torch.jit._flatten removes None. Check if it did so in this case."""@_beartype.beartypedefflatten(x):ifisinstance(x,(list,tuple)):forinnerinx:yield fromflatten(inner)elifisinstance(x,dict):forinnerinx.values():yield fromflatten(inner)else:yieldxflattened_with_none=list(flatten(original))num_none=len(flattened_with_none)-len(jit_flattened)assertnum_none>=0ifnum_none:raiseValueError(f"args contained {num_none} None's after flattening. ""When exporting a ScriptModule or ScriptFunction, no args may ""be None because that breaks type propagation.")def_create_jit_graph(model:Union[torch.nn.Module,torch.jit.ScriptFunction],args:Sequence[Any])->Tuple[_C.Graph,List[_C.IValue],Optional[Any],Optional[_C.ScriptModule]]:ifisinstance(model,(torch.jit.ScriptFunction,torch.jit.ScriptModule)):flattened_args=tuple(torch.jit._flatten(tuple(args))[0])_check_flatten_did_not_remove(args,flattened_args)torch_out=Noneifisinstance(model,torch.jit.ScriptModule):try:graph=model.forward.graphexceptAttributeErrorase:raiseRuntimeError("'forward' method must be a script method")frome_C._jit_pass_onnx_function_substitution(graph)freezed_module=_C._freeze_module(cast(_C.ScriptModule,model._c),preserveParameters=True)module,params=_C._jit_onnx_list_model_parameters(freezed_module)method_graph=module._get_method("forward").graphargs_params=tuple(args)+tuple(params)param_count_list=_get_param_count_list(method_graph,args_params)in_vars,_=torch.jit._flatten(args_params)graph=_C._propagate_and_assign_input_shapes(method_graph,tuple(in_vars),param_count_list,False,False)returngraph,params,torch_out,module# torch.jit.ScriptFunctionparams=[]graph=model.graph_C._jit_pass_onnx_function_substitution(graph)param_count_list=_get_param_count_list(graph,args)graph=_C._propagate_and_assign_input_shapes(graph,flattened_args,param_count_list,False,False)returngraph,params,torch_out,Nonegraph,torch_out=_trace_and_get_graph_from_model(model,args)_C._jit_pass_onnx_lint(graph)state_dict=torch.jit._unique_state_dict(model)params=list(state_dict.values())graph_inputs=list(graph.inputs())user_input_num=len(graph_inputs)-len(state_dict)param_names=list(state_dict.keys())fori,inpinenumerate(graph_inputs):ifi>=user_input_num:inp.setDebugName(param_names[i-user_input_num])_C._jit_pass_onnx_function_substitution(graph)returngraph,params,torch_out,None@_beartype.beartypedef_get_named_param_dict(graph,params):input_and_param_names=[val.debugName()forvalingraph.inputs()]param_names=input_and_param_names[len(input_and_param_names)-len(params):]_params_dict=dict(zip(param_names,params))return_params_dict@_beartype.beartypedef_get_example_outputs(model,args):input_args=copy.deepcopy(args)input_kwargs={}ifinput_argsandisinstance(input_args[-1],dict):input_kwargs=input_args[-1]input_args=input_args[:-1]example_outputs=model(*input_args,**input_kwargs)ifisinstance(example_outputs,list):example_outputs=[example_outputs]elifnotisinstance(example_outputs,tuple):example_outputs=(example_outputs,)returnexample_outputs_qtype_vtype_map={torch.quint8:torch.uint8,torch.qint8:torch.int8,torch.qint32:torch.int32,torch.quint4x2:torch.int8,}@_beartype.beartypedefunpack_quantized_tensor(value,cast_onnx_accepted=True):ifisinstance(value,torch.Tensor)andvalue.dtypein_qtype_vtype_map:q_value_dequantize=value.dequantize()q_scale=(torch.tensor(value.q_scale(),dtype=torch.double)ifcast_onnx_acceptedelsetorch.tensor(value.q_scale(),dtype=torch.float32))q_zero_point=(torch.tensor(value.q_zero_point(),dtype=torch.int64)ifcast_onnx_acceptedelsetorch.tensor(value.q_zero_point(),dtype=_qtype_vtype_map[value.dtype]))q_value=q_value_dequantize/q_scale+q_zero_pointq_value=q_value.to(dtype=_qtype_vtype_map[value.dtype])returnq_value,q_scale,q_zero_pointelse:return(value,)@_beartype.beartypedef_pre_trace_quant_model(model,args):r"""Returns `torch.jit.trace(model, args)` if model is quantized. Otherwise do nothing and return original model. This is due to https://github.com/pytorch/pytorch/issues/75761. """ifany(hasattr(m,"_packed_params")formingetattr(model,"modules",lambda:[])())orany(getattr(arg,"is_quantized",False)forarginargs):returntorch.jit.trace(model,args)returnmodel@_beartype.beartypedef_model_to_graph(model,args,verbose=False,input_names=None,output_names=None,operator_export_type=_C_onnx.OperatorExportTypes.ONNX,do_constant_folding=True,_disable_torch_constant_prop=False,fixed_batch_size=False,training=_C_onnx.TrainingMode.EVAL,dynamic_axes=None,)->Tuple[_C.Graph,Dict[str,torch.Tensor],Optional[Union[torch.Tensor,Tuple[torch.Tensor,...],List[torch.Tensor],Dict[str,torch.Tensor],Any,# Can be nested tuples etc.]],]:"""Converts model into an ONNX graph. Returns: graph: A TorchScript IR Graph with ONNX nodes. params_dict: Dict from input param name to param value. torch_out: The output tensors resulting from the trace of ``model``. If ``model`` is a :class:`torch.jit.ScriptModule` or :class:`torch.jit.ScriptFunction`, this will be None, since we are not doing any tracing. """# TODO: can we simplify this to always return a tuple of Tensor or None?# Special case for common case of passing a single Tensorifisinstance(args,(torch.Tensor,int,float,bool)):args=(args,)model=_pre_trace_quant_model(model,args)graph,params,torch_out,module=_create_jit_graph(model,args)params_dict=_get_named_param_dict(graph,params)try:graph=_optimize_graph(graph,operator_export_type,_disable_torch_constant_prop=_disable_torch_constant_prop,fixed_batch_size=fixed_batch_size,params_dict=params_dict,dynamic_axes=dynamic_axes,input_names=input_names,module=module,)exceptExceptionase:torch.onnx.log("Torch IR graph at exception: ",graph)raiseis_script=isinstance(model,(torch.jit.ScriptFunction,torch.jit.ScriptModule))ifis_script:example_outputs=_get_example_outputs(model,args)example_outputs_final=()forexample_outputinexample_outputs:example_outputs_final+=unpack_quantized_tensor(example_output)out_vars,desc=torch.jit._flatten(example_outputs_final)_C._jit_pass_onnx_assign_output_shape(graph,out_vars,desc,GLOBALS.onnx_shape_inference,is_script)# NB: ONNX requires complete information about output types, which might be# erased by some optimizations, so we need to set it explicitly again.else:ifnotisinstance(torch_out,(list,tuple)):output_wrapped=[torch_out]else:output_wrapped=torch_out# type: ignore[assignment]output_tensors,out_desc=torch.jit._flatten(tuple(output_wrapped))# assign_output_shape pass is not compatible with quantized outputs.# Quantized outputs are flattened to 3 values in ONNX, while packed as# single value in PyTorch.ifnotany(getattr(out,"is_quantized",False)foroutinoutput_tensors):_C._jit_pass_onnx_assign_output_shape(graph,output_tensors,out_desc,GLOBALS.onnx_shape_inference,is_script,)_set_input_and_output_names(graph,input_names,output_names)params_dict=_get_named_param_dict(graph,params)iftrainingisNoneortraining==_C_onnx.TrainingMode.EVAL:params_dict=_C._jit_pass_onnx_eval_peephole(graph,params_dict)if(do_constant_foldingandGLOBALS.export_onnx_opset_version>=_constants.ONNX_CONSTANT_FOLDING_MIN_OPSET):params_dict=_C._jit_pass_onnx_constant_fold(graph,params_dict,GLOBALS.export_onnx_opset_version)_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)ifGLOBALS.onnx_shape_inference:_C._jit_pass_onnx_graph_shape_type_inference(graph,params_dict,GLOBALS.export_onnx_opset_version)params_dict=_C._jit_pass_onnx_eliminate_unused_items(graph,params_dict)# For ONNX opset < 9, constants only have three data types: float16, float, double.# In this pass transform constants of other data types to float/double + cast operator.ifGLOBALS.export_onnx_opset_version<9:_C._jit_pass_onnx_cast_all_constant_to_floating(graph)params_dict=_C._jit_pass_filter_non_tensor_arguments(params_dict)_C._jit_decay_packed_param_input_types(graph)# If output names lack a proper name and are identified only by their unique# give them a legible name for debugging purposes_apply_friendly_debug_names(graph,params_dict)returngraph,params_dict,torch_out
[docs]@_beartype.beartypedefexport_to_pretty_string(model,args,export_params=True,verbose=False,training=_C_onnx.TrainingMode.EVAL,input_names=None,output_names=None,operator_export_type=_C_onnx.OperatorExportTypes.ONNX,export_type=None,google_printer=False,opset_version=None,keep_initializers_as_inputs=None,custom_opsets=None,add_node_names=True,do_constant_folding=True,dynamic_axes=None,):r""" Similar to :func:`export`, but returns a text representation of the ONNX model. Only differences in args listed below. All other args are the same as :func:`export`. Args: add_node_names (bool, default True): Whether or not to set NodeProto.name. This makes no difference unless ``google_printer=True``. google_printer (bool, default False): If False, will return a custom, compact representation of the model. If True will return the protobuf's `Message::DebugString()`, which is more verbose. Returns: A UTF-8 str containing a human-readable representation of the ONNX model. """ifopset_versionisNone:opset_version=_constants.ONNX_DEFAULT_OPSETifcustom_opsetsisNone:custom_opsets={}GLOBALS.export_onnx_opset_version=opset_versionGLOBALS.operator_export_type=operator_export_typewithexporter_context(model,training,verbose):val_keep_init_as_ip=_decide_keep_init_as_input(keep_initializers_as_inputs,operator_export_type,opset_version)val_add_node_names=_decide_add_node_names(add_node_names,operator_export_type)val_do_constant_folding=_decide_constant_folding(do_constant_folding,operator_export_type,training)args=_decide_input_format(model,args)graph,params_dict,torch_out=_model_to_graph(model,args,verbose,input_names,output_names,operator_export_type,val_do_constant_folding,training=training,dynamic_axes=dynamic_axes,)returngraph._pretty_print_onnx(# type: ignore[attr-defined]params_dict,opset_version,False,operator_export_type,google_printer,val_keep_init_as_ip,custom_opsets,val_add_node_names,)
@_beartype.beartypedefunconvertible_ops(model,args,training:_C_onnx.TrainingMode=_C_onnx.TrainingMode.EVAL,opset_version:Optional[int]=None,)->Tuple[_C.Graph,List[str]]:"""Returns an approximated list of all ops that are yet supported by :mod:`torch.onnx`. The list is approximated because some ops may be removed during the conversion process and don't need to be converted. Some other ops may have partial support that will fail conversion with particular inputs. Please open a Github Issue for op support requests. Args: model: Same as the `model` parameter in :func:`torch.onnx.export`. args: Same as the `args` parameter in :func:`torch.onnx.export`. training: Same as the `training` parameter in :func:`torch.onnx.export`. opset_version: Same as the `opset_version` parameter in :func:`torch.onnx.export`. Returns: The JIT graph and a list of unconvertible ops in the format of "domain::op". """opset_version=opset_versionor_constants.ONNX_DEFAULT_OPSETGLOBALS.export_onnx_opset_version=opset_versiontry:withexporter_context(model,training,verbose=False):# Create a mostly clean JIT graph that contains the plain aten and# other ops we can check with the symbolic registry.# NOTE: We don't want to actually convert any ops to ONNX or run any# symbolic functions because there is a higher chance that a pass# fails or an unconvertible op messes up the graph during ONNX conversion.# This way we can always generate a list just by looking at the names# of the ops in the graph.args=_decide_input_format(model,args)model=_pre_trace_quant_model(model,args)graph,_,_,module=_create_jit_graph(model,args)_C._jit_pass_inline(graph)_C._jit_pass_onnx_remove_inplace_ops_for_onnx(graph,module)_C._jit_pass_erase_number_types(graph)_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)exceptExceptionase:raiseerrors.OnnxExporterError("Failed to discover unconvertible ops because of errors during the JIT graph ""generation process.")fromeunsupported_ops=[]fornodeingraph.nodes():domain_op=node.kind()ifdomain_op.startswith("onnx::")ordomain_op.startswith("prim::"):# We consider onnx and prim ops as supported ops, even though some "prim"# ops are not implemented as symbolic functions, because they may be# eliminated in the conversion passes. Users may still see errors caused# by prim ops even though they don't show up in the list.continueifnotregistration.registry.is_registered_op(domain_op,opset_version):# We consider all registered ops supported, even though some of them are# only partially supported, because there is not yet a good way to check# if an op is fully supported.# TODO(justinchuby): Create a way to check if an op is fully supported.unsupported_ops.append(domain_op)returngraph,unsupported_ops@_beartype.beartypedef_setup_trace_module_map(model:Union[torch.nn.Module,torch.jit.ScriptModule],export_modules_as_functions:Union[bool,Collection[Type[torch.nn.Module]]],)->Set[str]:def__register_attribute_hook():attr_name="_onnx_attrs"def_track_module_attributes_forward_pre_hook(module,input):setattr(module,attr_name,_get_module_attributes(module))def_track_module_attributes_forward_hook(module,input,output):tracing_state=_C._get_tracing_state()ifnottracing_state:returngraph=tracing_state.graph()onnx_attrs={}ifhasattr(module,attr_name):onnx_attrs=getattr(module,attr_name)delattr(module,attr_name)_C._jit_pass_onnx_track_scope_attributes(graph,onnx_attrs)forminmodel.modules():m.register_forward_hook(_track_module_attributes_forward_hook)m.register_forward_pre_hook(_track_module_attributes_forward_pre_hook)def_unqualified_variable_name(qualified_name:str)->str:""" Parse qualified variable name and return the unqualified version. Pure numeric atoms are considered inadequate, so this function will look past them, and start from the first non-numeric atom. Example: >>> _unqualified_variable_name('__main__.Foo.bar') 'bar' >>> _unqualified_variable_name('__main__.Foo.bar.0') 'bar.0' """name_atoms=qualified_name.split(".")fori,atominreversed(list(enumerate(name_atoms))):ifnotatom.isnumeric():return".".join(name_atoms[i:])returnqualified_nametrace_module_map={_m:torch._C._jit_onnx_create_full_scope_name(torch.typename(type(_m)),_unqualified_variable_name(_n))for_n,_minmodel.named_modules()}torch.jit._trace._trace_module_map=trace_module_mapifisinstance(export_modules_as_functions,bool)andexport_modules_as_functions:module_typenames={torch.typename(type(module))formoduleintrace_module_map}elifisinstance(export_modules_as_functions,set)andexport_modules_as_functions:def_find_typename(v):ifisinstance(v,type):returntorch.typename(v)else:raiseRuntimeError("Only type of the `nn.Module` should be ""passed in the set for argument `export_modules_as_functions`. ""Got `%s`."%(type(v).__name__))module_typenames={_find_typename(v)forvinexport_modules_as_functions}else:module_typenames=set()ifmodule_typenames:__register_attribute_hook()returnmodule_typenames@_beartype.beartypedef_reset_trace_module_map():torch.jit._trace._trace_module_map=None_C._jit_pass_onnx_clear_scope_records()@_beartype.beartypedef_get_module_attributes(module):annotations=typing.get_type_hints(type(module))base_m_annotations=typing.get_type_hints(torch.nn.Module)[annotations.pop(k,None)forkinbase_m_annotations]return{k:getattr(module,k)forkinannotations}@_beartype.beartypedef_export(model,args,f,export_params=True,verbose=False,training=_C_onnx.TrainingMode.EVAL,input_names=None,output_names=None,operator_export_type=_C_onnx.OperatorExportTypes.ONNX,export_type=None,opset_version=None,do_constant_folding=True,dynamic_axes=None,keep_initializers_as_inputs=None,fixed_batch_size=False,custom_opsets=None,add_node_names=True,onnx_shape_inference=True,export_modules_as_functions=False,):assertGLOBALS.in_onnx_exportisFalseifexport_typeisNone:export_type=_exporter_states.ExportTypes.PROTOBUF_FILEifisinstance(model,torch.nn.DataParallel):raiseValueError("torch.nn.DataParallel is not supported by ONNX ""exporter, please use 'attribute' module to ""unwrap model from torch.nn.DataParallel. Try ""torch.onnx.export(model.module, ...)")GLOBALS.onnx_shape_inference=onnx_shape_inferenceifopset_versionisNone:opset_version=_constants.ONNX_DEFAULT_OPSETifexport_modules_as_functionsandopset_version<15:raiseValueError("`export_modules_as_functions` is not supported for `opset_version` < 15.""This is because `opset_version` < 15 implies IR version < 8, which means ""no local function support. ")ifnotoperator_export_type:if_C_onnx._CAFFE2_ATEN_FALLBACK:operator_export_type=_C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACKelse:operator_export_type=_C_onnx.OperatorExportTypes.ONNX# By default, training=TrainingMode.EVAL,# which is good because running a model in training mode could result in# internal buffers getting updated, dropout getting applied, etc.# If you really know what you're doing, you can turn# training=TrainingMode.TRAINING or training=TrainingMode.PRESERVE,# (to preserve whatever the original training mode was.)GLOBALS.export_onnx_opset_version=opset_versionGLOBALS.operator_export_type=operator_export_typetry:GLOBALS.in_onnx_export=Truemodule_typenames_to_export_as_functions:Set[str]=set()ifisinstance(model,(torch.nn.Module,torch.jit.ScriptModule)):module_typenames_to_export_as_functions=_setup_trace_module_map(model,export_modules_as_functions)withexporter_context(model,training,verbose):val_keep_init_as_ip=_decide_keep_init_as_input(keep_initializers_as_inputs,operator_export_type,opset_version,)val_add_node_names=_decide_add_node_names(add_node_names,operator_export_type)val_do_constant_folding=_decide_constant_folding(do_constant_folding,operator_export_type,training)# Normally f can be a file-like object, but for large models, the external data format requires a# valid `model_file_location`. Code in export.cpp will enforce this.ifisinstance(f,str):model_file_location=felse:model_file_location=""args=_decide_input_format(model,args)ifdynamic_axesisNone:dynamic_axes={}_validate_dynamic_axes(dynamic_axes,model,input_names,output_names)graph,params_dict,torch_out=_model_to_graph(model,args,verbose,input_names,output_names,operator_export_type,val_do_constant_folding,fixed_batch_size=fixed_batch_size,training=training,dynamic_axes=dynamic_axes,)# TODO: Don't allocate a in-memory string for the protobufdefer_weight_export=(export_typeisnot_exporter_states.ExportTypes.PROTOBUF_FILE)ifcustom_opsetsisNone:custom_opsets={}_C._jit_pass_dce_allow_deleting_nodes_with_side_effects(graph)node_attr_to_name={}# type: ignore[var-annotated]ifmodule_typenames_to_export_as_functions:# NOTE: cannot call DCE after this pass. DCE will remove function definition nodes.node_attr_to_name=_C._jit_pass_onnx_function_extraction(graph,module_typenames_to_export_as_functions,list(params_dict.keys()),)params_dict=_C._jit_pass_onnx_deduplicate_initializers(# type: ignore[assignment]graph,params_dict,getattr(model,"training",False)# type: ignore[arg-type])_C._jit_pass_onnx_assign_scoped_names_for_node_and_value(graph)ifexport_params:(proto,export_map,val_use_external_data_format,node_names,)=graph._export_onnx(# type: ignore[attr-defined]params_dict,opset_version,dynamic_axes,defer_weight_export,operator_export_type,notverbose,val_keep_init_as_ip,custom_opsets,val_add_node_names,model_file_location,node_attr_to_name,)else:(proto,export_map,val_use_external_data_format,node_names,)=graph._export_onnx(# type: ignore[attr-defined]{},opset_version,dynamic_axes,False,operator_export_type,notverbose,val_keep_init_as_ip,custom_opsets,val_add_node_names,model_file_location,node_attr_to_name,)ifverbose:torch.onnx.log("Exported graph: ",graph)ifexport_type==_exporter_states.ExportTypes.PROTOBUF_FILE:assertlen(export_map)==0withtorch.serialization._open_file_like(f,"wb")asopened_file:opened_file.write(proto)elifexport_typein[_exporter_states.ExportTypes.ZIP_ARCHIVE,_exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVE,]:compression=(zipfile.ZIP_DEFLATEDifexport_type==_exporter_states.ExportTypes.COMPRESSED_ZIP_ARCHIVEelsezipfile.ZIP_STORED)withzipfile.ZipFile(f,"w",compression=compression)asz:z.writestr(_constants.ONNX_ARCHIVE_MODEL_PROTO_NAME,proto)fork,vinexport_map.items():z.writestr(k,v)elifexport_type==_exporter_states.ExportTypes.DIRECTORY:ifos.path.exists(f):assertos.path.isdir(f)else:os.makedirs(f)model_proto_file=os.path.join(f,_constants.ONNX_ARCHIVE_MODEL_PROTO_NAME)withtorch.serialization._open_file_like(model_proto_file,"wb")asopened_file:opened_file.write(proto)fork,vinexport_map.items():weight_proto_file=os.path.join(f,k)withtorch.serialization._open_file_like(weight_proto_file,"wb")asopened_file:opened_file.write(v)else:raiseRuntimeError("Unknown export type")# The ONNX checker only works for ONNX graph. So if the operator_export_type is not ONNX,# we can skip this check.# If large model format export is enabled, proto will only contain data location instead of# raw data and _check_onnx_proto() will fail because it can only handle the raw ONNX proto# string in memory.if(operator_export_typeis_C_onnx.OperatorExportTypes.ONNX)and(notval_use_external_data_format):try:_C._check_onnx_proto(proto,full_check=True)exceptRuntimeErrorase:raiseerrors.CheckerError(e)finally:assertGLOBALS.in_onnx_exportGLOBALS.in_onnx_export=False_reset_trace_module_map()returntorch_out@_beartype.beartypedef_apply_friendly_debug_names(graph,params):forningraph.nodes():forvinn.inputs():old_name=v.debugName()ifold_name!=str(v.unique()):continuenew_name=f"{n.kind()}_{v.unique()}"v.setDebugName(new_name)ifold_nameinparams:params[new_name]=params.pop(old_name)@_beartype.beartypedef_set_input_and_output_names(graph,input_names,output_names):@_beartype.beartypedefset_names(node_list,name_list,descriptor):ifname_listisNone:returniflen(name_list)>len(node_list):raiseRuntimeError("number of %s names provided (%d) exceeded number of %ss (%d)"%(descriptor,len(name_list),descriptor,len(node_list)))# Mark if the output node DebugName is set before.output_node_set=set()fori,(name,node)inenumerate(zip(name_list,node_list)):# Duplicated output node, insert onnx::Identity to avoid setting the same DebugName after setDebugName().ifdescriptor=="output":ifnodeinoutput_node_set:identity_node=graph.create("onnx::Identity")identity_node.insertAfter(node.node())identity_node.addInput(node)identity_node.output().setType(node.type())graph.return_node().replaceInput(i,identity_node.output())node=identity_node.output()output_node_set.add(node)ifnode.debugName()!=name:node.setDebugName(name)set_names(list(graph.inputs()),input_names,"input")set_names(list(graph.outputs()),output_names,"output")@_beartype.beartypedef_run_symbolic_method(g,op_name,symbolic_fn,args):r""" This trampoline function gets invoked for every symbolic method call from C++. """try:returnsymbolic_fn(g,*args)exceptTypeErrorase:# Handle the specific case where we didn't successfully dispatch# to symbolic_fn. Otherwise, the backtrace will have the clues# you need.e.args=(f"{e.args[0]} (occurred when translating {op_name})",)raise@_beartype.beartypedef_add_block(node:_C.Node)->_C.Block:returnnode.addBlock()@_beartype.beartypedef_add_input_to_block(block:_C.Block):returnblock.addInputToBlock()# type: ignore[attr-defined]@_beartype.beartypedef_add_output_to_block(block:_C.Block,value:_C.Value)->int:returnblock.registerOutput(value)@_beartype.beartypedef_should_aten_fallback(name:str,opset_version:int,operator_export_type:_C_onnx.OperatorExportTypes,):is_exportable_aten_op=registration.registry.is_registered_op(name,opset_version)is_onnx_aten_export=operator_export_type==_C_onnx.OperatorExportTypes.ONNX_ATENis_aten_fallback_export=(operator_export_type==_C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK)returnis_onnx_aten_exportor(notis_exportable_aten_opandis_aten_fallback_export)@_beartype.beartypedef_need_symbolic_context(symbolic_fn:Callable)->bool:"""Checks if the first argument to symbolic_fn is annotated as type `torch.onnx.SymbolicContext`."""params=tuple(inspect.signature(symbolic_fn).parameters.values())# When the annotation is postpone-evaluated, the annotation is a string# and not a type. We need to use get_type_hints to get the real type.ifnotparams:returnFalsefirst_param_name=params[0].nametype_hints=typing.get_type_hints(symbolic_fn)iffirst_param_namenotintype_hints:returnFalseparam_type=type_hints[first_param_name]returnissubclass(param_type,_exporter_states.SymbolicContext)@_beartype.beartypedef_symbolic_context_handler(symbolic_fn:Callable)->Callable:"""Decorator that provides the symbolic context to the symbolic function if needed."""if_need_symbolic_context(symbolic_fn):# TODO(justinchuby): Update the module name of GraphContext when it is publicwarnings.warn("The first argument to symbolic functions is deprecated in 1.13 and will be ""removed in the future. Please annotate treat the first argument (g) as GraphContext ""and use context information from the object instead.",category=FutureWarning,)defwrapper(graph_context:jit_utils.GraphContext,*args,**kwargs):symbolic_context=_exporter_states.SymbolicContext(params_dict=graph_context.params_dict,env=graph_context.env,cur_node=graph_context.original_node,onnx_block=graph_context.block,)returnsymbolic_fn(symbolic_context,graph_context,*args,**kwargs)returnwrapperreturnsymbolic_fn@_beartype.beartypedef_get_aten_op_overload_name(n:_C.Node)->str:# Returns `overload_name` attribute to ATen ops on non-Caffe2 buildsschema=n.schema()ifnotschema.startswith("aten::")orsymbolic_helper.is_caffe2_aten_fallback():return""return_C.parse_schema(schema).overload_name@_beartype.beartypedef_run_symbolic_function(graph:_C.Graph,block:_C.Block,node:_C.Node,inputs:Any,env:Dict[_C.Value,_C.Value],operator_export_type=_C_onnx.OperatorExportTypes.ONNX,)->Optional[Union[_C.Value,Sequence[Optional[_C.Value]]]]:"""Runs a symbolic function. The function is used in C++ to export the node to ONNX. Returns: A single or a tuple of Values. None when the node gets cloned as is into the new graph. """opset_version=GLOBALS.export_onnx_opset_version# See Note [Export inplace]node_kind=node.kind()ifnode_kind.endswith("_"):# Treat relu_ -> relu; add_ -> add etc.ns_op_name=node_kind[:-1]else:ns_op_name=node_kindnamespace,op_name=ns_op_name.split("::")graph_context=jit_utils.GraphContext(graph=graph,block=block,opset=opset_version,original_node=node,params_dict=_params_dict,env=env,)try:# Caffe2-specific: Quantized op symbolics are registered for opset 9 only.ifsymbolic_helper.is_caffe2_aten_fallback()andopset_version==9:symbolic_caffe2.register_quantized_ops("caffe2",opset_version)ifnamespace=="quantized"andsymbolic_helper.is_caffe2_aten_fallback():domain="caffe2"else:domain=namespacesymbolic_function_name=f"{domain}::{op_name}"symbolic_function_group=registration.registry.get_function_group(symbolic_function_name)ifsymbolic_function_groupisnotNone:symbolic_fn=symbolic_function_group.get(opset_version)ifsymbolic_fnisnotNone:attrs={k:symbolic_helper._node_get(node,k)forkinnode.attributeNames()}returnsymbolic_fn(graph_context,*inputs,**attrs)attrs={k+"_"+node.kindOf(k)[0]:symbolic_helper._node_get(node,k)forkinnode.attributeNames()}ifnamespace=="onnx":# Clone node to trigger ONNX shape inferencereturngraph_context.op(op_name,*inputs,**attrs,outputs=node.outputsSize())# type: ignore[attr-defined]if_should_aten_fallback(ns_op_name,opset_version,operator_export_type):# Direct ATen export requestedoutputs=node.outputsSize()attrs["outputs"]=outputs# `overload_name` is set for non-Caffe2 builds onlyreturngraph_context.at(op_name,*inputs,overload_name=_get_aten_op_overload_name(node),**attrs,)raiseerrors.UnsupportedOperatorError(domain,op_name,opset_version,symbolic_function_group.get_min_supported()ifsymbolic_function_groupelseNone,)exceptRuntimeError:ifoperator_export_type==_C_onnx.OperatorExportTypes.ONNX_FALLTHROUGH:returnNoneelif(operator_export_type==_C_onnx.OperatorExportTypes.ONNX_ATEN_FALLBACKandnotsymbolic_helper.is_caffe2_aten_fallback()):# Emit ATen op for non-Caffe2 builds when `operator_export_type==ONNX_ATEN_FALLBACK`attrs={k+"_"+node.kindOf(k)[0]:symbolic_helper._node_get(node,k)forkinnode.attributeNames()}returngraph_context.at(op_name,*inputs,overload_name=_get_aten_op_overload_name(node),**attrs,)raiseexceptTypeErrorase:# Handle the specific case where we didn't successfully dispatch.# Otherwise, the backtrace will have the clues you need.e.args=(f"{e.args[0]}\n(Occurred when translating {op_name}).",)raise@_beartype.beartypedef_verify_custom_op_name(symbolic_name:str):ifnotre.match(r"^[a-zA-Z0-9-_]+::[a-zA-Z-_]+[a-zA-Z0-9-_]*$",symbolic_name):raiseerrors.OnnxExporterError(f"Failed to register operator {symbolic_name}. ""The symbolic name must match the format domain::name, ""and should start with a letter and contain only ""alphanumerical characters")ns,_=symbolic_name.split("::")ifns=="onnx":raiseValueError(f"Failed to register operator {symbolic_name}. {ns} domain cannot be modified.")
[docs]@_beartype.beartypedefregister_custom_op_symbolic(symbolic_name:str,symbolic_fn:Callable,opset_version:int):"""Registers a symbolic function for a custom operator. When the user registers symbolic for custom/contrib ops, it is highly recommended to add shape inference for that operator via setType API, otherwise the exported graph may have incorrect shape inference in some extreme cases. An example of setType is `test_aten_embedding_2` in `test_operators.py`. See "Custom Operators" in the module documentation for an example usage. Args: symbolic_name (str): The name of the custom operator in "<domain>::<op>" format. symbolic_fn (Callable): A function that takes in the ONNX graph and the input arguments to the current operator, and returns new operator nodes to add to the graph. opset_version (int): The ONNX opset version in which to register. """ifsymbolic_name.startswith("::"):symbolic_name=f"aten{symbolic_name}"_verify_custom_op_name(symbolic_name)registration.custom_onnx_symbolic(symbolic_name,opset_version,decorate=[_symbolic_context_handler,],)(symbolic_fn)
[docs]@_beartype.beartypedefunregister_custom_op_symbolic(symbolic_name:str,opset_version:int):"""Unregisters ``symbolic_name``. See "Custom Operators" in the module documentation for an example usage. Args: symbolic_name (str): The name of the custom operator in "<domain>::<op>" format. opset_version (int): The ONNX opset version in which to unregister. """ifsymbolic_name.startswith("::"):symbolic_name=f"aten{symbolic_name}"_verify_custom_op_name(symbolic_name)registration.registry.unregister(symbolic_name,opset_version)
@_beartype.beartypedef_validate_dynamic_axes(dynamic_axes,model,input_names,output_names):"""Ensures dynamic axes argument is follows the expected format."""iflen(dynamic_axes)==0:returnifhasattr(model,"graph"):# Extracting set of valid input/output names that shall be used for dynamic_axesif(input_namesisNone)orlen(input_names)==0:input_names=[x.debugName()forxinmodel.graph.inputs()]if(output_namesisNone)orlen(output_names)==0:output_names=[y.debugName()foryinmodel.graph.outputs()]valid_names=set((input_namesor[])+(output_namesor[]))# If dynamic axes are provided as a list rather than dictionary, they should# first get converted to a dictionary in expected format. If desired axes names# are not provided for dynamic axes, automatic names shall be generated for# provided dynamic axes of specified input/outputforkey,valueindynamic_axes.items():ifkeynotinvalid_names:warnings.warn(f"Provided key {key} for dynamic axes is not a valid input/output name")ifisinstance(value,list):warnings.warn("No names were found for specified dynamic axes of provided input."f"Automatically generated names will be applied to each dynamic axes of input {key}")value_dict={}fori,xinenumerate(value):ifnotisinstance(x,int):raiseValueError("The type of axis index is expected to be an integer")ifxinvalue_dict:warnings.warn(f"Duplicate dynamic axis index {x} was provided for input {key}.")else:value_dict[x]=str(key)+"_dynamic_axes_"+str(i+1)dynamic_axes[key]=value_dict
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.