[docs]classQConfig(namedtuple('QConfig',['activation','weight'])):""" Describes how to quantize a layer or a part of the network by providing settings (observer classes) for activations and weights respectively. Note that QConfig needs to contain observer **classes** (like MinMaxObserver) or a callable that returns instances on invocation, not the concrete observer instances themselves. Quantization preparation function will instantiate observers multiple times for each of the layers. Observer classes have usually reasonable default arguments, but they can be overwritten with `with_args` method (that behaves like functools.partial): my_qconfig = QConfig(activation=MinMaxObserver.with_args(dtype=torch.qint8), weight=default_observer.with_args(dtype=torch.qint8)) """def__new__(cls,activation,weight):# catch common mistakesifisinstance(activation,nn.Module)orisinstance(weight,nn.Module):raiseValueError("QConfig received observer instance, please pass observer class instead. "+"Use MyObserver.with_args(x=1) to override arguments to constructor if needed")returnsuper(QConfig,cls).__new__(cls,activation,weight)
[docs]classQConfigDynamic(namedtuple('QConfigDynamic',['activation','weight'])):""" Describes how to dynamically quantize a layer or a part of the network by providing settings (observer classes) for weights. It's like QConfig, but for dynamic quantization. Note that QConfigDynamic needs to contain observer **classes** (like MinMaxObserver) or a callable that returns instances on invocation, not the concrete observer instances themselves. Quantization function will instantiate observers multiple times for each of the layers. Observer classes have usually reasonable default arguments, but they can be overwritten with `with_args` method (that behaves like functools.partial): my_qconfig = QConfigDynamic(weight=default_observer.with_args(dtype=torch.qint8)) """def__new__(cls,activation=torch.nn.Identity,weight=torch.nn.Identity):# catch common mistakesifisinstance(weight,nn.Module):raiseValueError("QConfigDynamic received observer instance, please pass observer class instead. "+"Use MyObserver.with_args(x=1) to override arguments to constructor if needed")returnsuper(QConfigDynamic,cls).__new__(cls,activation,weight)
default_dynamic_qconfig=QConfigDynamic(activation=default_dynamic_quant_observer,weight=default_weight_observer)float16_dynamic_qconfig=QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float32),weight=PlaceholderObserver.with_args(dtype=torch.float16))float16_static_qconfig=QConfigDynamic(activation=PlaceholderObserver.with_args(dtype=torch.float16),weight=PlaceholderObserver.with_args(dtype=torch.float16))per_channel_dynamic_qconfig=QConfigDynamic(activation=default_dynamic_quant_observer,weight=default_per_channel_weight_observer)# TODO: this is weight only quant, change this to QConfigWeightOnly# or remove the QConfigDynamic laterfloat_qparams_weight_only_qconfig=QConfigDynamic(activation=default_placeholder_observer,weight=default_float_qparams_observer)default_qat_qconfig=QConfig(activation=default_fake_quant,weight=default_weight_fake_quant)default_weight_only_qconfig=QConfig(activation=torch.nn.Identity,weight=default_weight_fake_quant)default_activation_only_qconfig=QConfig(activation=default_fake_quant,weight=torch.nn.Identity)# QAT config that uses a fused observer + fake quant modules for optimized training performance.# to modify the activation/weight observers, the default entries in fake_quantize.py can be modified.default_qat_qconfig_v2=QConfig(activation=default_fused_act_fake_quant,weight=default_fused_wt_fake_quant)defget_default_qconfig(backend='fbgemm'):ifbackend=='fbgemm':qconfig=QConfig(activation=HistogramObserver.with_args(reduce_range=True),weight=default_per_channel_weight_observer)elifbackend=='qnnpack':qconfig=QConfig(activation=HistogramObserver.with_args(reduce_range=False),weight=default_weight_observer)else:qconfig=default_qconfigreturnqconfigdefget_default_qat_qconfig(backend='fbgemm',version=1):# Histogram observer is too slow for quantization aware trainingifversionisNone:ifbackend=='fbgemm':qconfig=QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,quant_min=0,quant_max=255,reduce_range=True),weight=default_per_channel_weight_fake_quant)elifbackend=='qnnpack':qconfig=QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,quant_min=0,quant_max=255,reduce_range=False),weight=default_weight_fake_quant)else:qconfig=default_qat_qconfig# Use the fused observer + fake_quant modules for doing QAT.ifversion==1:ifbackend=='fbgemm':qconfig=QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,quant_min=0,quant_max=255,reduce_range=True),weight=default_fused_per_channel_wt_fake_quant)elifbackend=='qnnpack':qconfig=QConfig(activation=FusedMovingAvgObsFakeQuantize.with_args(observer=MovingAverageMinMaxObserver,quant_min=0,quant_max=255,reduce_range=False),weight=default_fused_wt_fake_quant)else:qconfig=default_qat_qconfig_v2returnqconfigdefassert_valid_qconfig(qconfig:Optional[Union[QConfig,QConfigDynamic]],mod:torch.nn.Module)->None:ifqconfigisNone:returnis_conv_transpose_mod=(isinstance(mod,torch.nn.ConvTranspose1d)orisinstance(mod,torch.nn.ConvTranspose2d)orisinstance(mod,torch.nn.ConvTranspose3d))ifis_conv_transpose_mod:example_observer=qconfig.weight()is_per_channel=(isinstance(example_observer,torch.quantization.PerChannelMinMaxObserver)orisinstance(example_observer,torch.quantization.MovingAveragePerChannelMinMaxObserver))assertnotis_per_channel, \
'Per channel weight observer is not supported yet for ConvTranspose{n}d.'QConfigAny=Union[QConfig,QConfigDynamic,None]defadd_module_to_qconfig_obs_ctr(qconfig:QConfigAny,module:Union[nn.Module,None])->Any:r"""This is a helper function for use in quantization prepare that updates a qconfig so that the constructors stored in the qconfig will create observers on the same device that 'module' is on. This is intended to be used when the qconfigs are propagated to each module in order to avoid potential device alignment issues. Args: qconfig: QConfig or QConfigDynamic with obs constructors stored in activation and weight module: module which the qconfig is related to Return: qconfig: configured so that obs constructors set to construct on the same device as module """ifmoduleisNoneorqconfigisNoneorqconfig._fields!=('activation','weight'):returnqconfigdefget_factory_kwargs_based_on_module_device():assertisinstance(module,torch.nn.Module)devices={p.deviceforpinmodule.parameters()}| \
{p.deviceforpinmodule.buffers()}device=next(iter(devices))iflen(devices)>0elseNonereturnNoneifdeviceisNoneelse{'device':device}defconfigure_constructor_to_put_obs_on_module_device(original_constructor):try:# check if constructor can accept factory_kwargscheck=original_constructor.with_args(factory_kwargs=None)check()returnoriginal_constructor.with_callable_args(factory_kwargs=get_factory_kwargs_based_on_module_device)exceptAttributeError:# qconfig doesn't have activation or weightreturnoriginal_constructorexceptTypeError:# the class doesn't accept factory_kwargs argumentreturnoriginal_constructoractivation=configure_constructor_to_put_obs_on_module_device(qconfig.activation)weight=configure_constructor_to_put_obs_on_module_device(qconfig.weight)ifisinstance(qconfig,QConfig):returnQConfig(activation,weight)else:returnQConfigDynamic(activation,weight)defqconfig_equals(q1:QConfigAny,q2:QConfigAny):# functools.partial has no __eq__ operator defined so '==' defaults to 'is'defpartial_equals(p1,p2):same=p1.func==p2.funcsame=sameandp1.args==p2.argsreturnsameandp1.keywords==p2.keywordsifq1isNoneorq2isNone:returnq1==q2else:assertq1isnotNoneandq2isnotNonetry:returnpartial_equals(q1.activation.p,q2.activation.p)andpartial_equals(q1.weight.p,q2.weight.p)exceptAttributeError:returnq1==q2
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.