Source code for torchvision.models.quantization.mobilenetv2
from functools import partial
from typing import Any, Optional, Union
from torch import nn, Tensor
from torch.ao.quantization import DeQuantStub, QuantStub
from torchvision.models.mobilenetv2 import InvertedResidual, MobileNet_V2_Weights, MobileNetV2
from ...ops.misc import Conv2dNormActivation
from ...transforms._presets import ImageClassification
from .._api import register_model, Weights, WeightsEnum
from .._meta import _IMAGENET_CATEGORIES
from .._utils import _ovewrite_named_param, handle_legacy_interface
from .utils import _fuse_modules, _replace_relu, quantize_model
__all__ = [
"QuantizableMobileNetV2",
"MobileNet_V2_QuantizedWeights",
"mobilenet_v2",
]
class QuantizableInvertedResidual(InvertedResidual):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.skip_add = nn.quantized.FloatFunctional()
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return self.skip_add.add(x, self.conv(x))
else:
return self.conv(x)
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
for idx in range(len(self.conv)):
if type(self.conv[idx]) is nn.Conv2d:
_fuse_modules(self.conv, [str(idx), str(idx + 1)], is_qat, inplace=True)
class QuantizableMobileNetV2(MobileNetV2):
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""
MobileNet V2 main class
Args:
Inherits args from floating point MobileNetV2
"""
super().__init__(*args, **kwargs)
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x: Tensor) -> Tensor:
x = self.quant(x)
x = self._forward_impl(x)
x = self.dequant(x)
return x
def fuse_model(self, is_qat: Optional[bool] = None) -> None:
for m in self.modules():
if type(m) is Conv2dNormActivation:
_fuse_modules(m, ["0", "1", "2"], is_qat, inplace=True)
if type(m) is QuantizableInvertedResidual:
m.fuse_model(is_qat)
[docs]class MobileNet_V2_QuantizedWeights(WeightsEnum):
IMAGENET1K_QNNPACK_V1 = Weights(
url="https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth",
transforms=partial(ImageClassification, crop_size=224),
meta={
"num_params": 3504872,
"min_size": (1, 1),
"categories": _IMAGENET_CATEGORIES,
"backend": "qnnpack",
"recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2",
"unquantized": MobileNet_V2_Weights.IMAGENET1K_V1,
"_metrics": {
"ImageNet-1K": {
"acc@1": 71.658,
"acc@5": 90.150,
}
},
"_docs": """
These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
weights listed below.
""",
},
)
DEFAULT = IMAGENET1K_QNNPACK_V1
[docs]@register_model(name="quantized_mobilenet_v2")
@handle_legacy_interface(
weights=(
"pretrained",
lambda kwargs: MobileNet_V2_QuantizedWeights.IMAGENET1K_QNNPACK_V1
if kwargs.get("quantize", False)
else MobileNet_V2_Weights.IMAGENET1K_V1,
)
)
def mobilenet_v2(
*,
weights: Optional[Union[MobileNet_V2_QuantizedWeights, MobileNet_V2_Weights]] = None,
progress: bool = True,
quantize: bool = False,
**kwargs: Any,
) -> QuantizableMobileNetV2:
"""
Constructs a MobileNetV2 architecture from
`MobileNetV2: Inverted Residuals and Linear Bottlenecks
<https://arxiv.org/abs/1801.04381>`_.
.. note::
Note that ``quantize = True`` returns a quantized model with 8 bit
weights. Quantized models only support inference and run on CPUs.
GPU inference is not yet supported.
Args:
weights (:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
pretrained weights for the model. See
:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` below for
more details, and possible values. By default, no pre-trained
weights are used.
progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
quantize (bool, optional): If True, returns a quantized version of the model. Default is False.
**kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableMobileNetV2``
base class. Please refer to the `source code
<https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv2.py>`_
for more details about this class.
.. autoclass:: torchvision.models.quantization.MobileNet_V2_QuantizedWeights
:members:
.. autoclass:: torchvision.models.MobileNet_V2_Weights
:members:
:noindex:
"""
weights = (MobileNet_V2_QuantizedWeights if quantize else MobileNet_V2_Weights).verify(weights)
if weights is not None:
_ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
if "backend" in weights.meta:
_ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
backend = kwargs.pop("backend", "qnnpack")
model = QuantizableMobileNetV2(block=QuantizableInvertedResidual, **kwargs)
_replace_relu(model)
if quantize:
quantize_model(model, backend)
if weights is not None:
model.load_state_dict(weights.get_state_dict(progress=progress))
return model
# The dictionary below is internal implementation detail and will be removed in v0.15
from .._utils import _ModelURLs
from ..mobilenetv2 import model_urls # noqa: F401
quant_model_urls = _ModelURLs(
{
"mobilenet_v2_qnnpack": MobileNet_V2_QuantizedWeights.IMAGENET1K_QNNPACK_V1.url,
}
)