Source code for torchvision.datapoints._bounding_box
from__future__importannotationsfromenumimportEnumfromtypingimportAny,List,Optional,Sequence,Tuple,Unionimporttorchfromtorchvision.transformsimportInterpolationMode# TODO: this needs to be moved out of transformsfrom._datapointimport_FillTypeJIT,Datapoint
[docs]classBoundingBoxFormat(Enum):"""[BETA] Coordinate format of a bounding box. Available formats are * ``XYXY`` * ``XYWH`` * ``CXCYWH`` """XYXY="XYXY"XYWH="XYWH"CXCYWH="CXCYWH"
[docs]classBoundingBox(Datapoint):"""[BETA] :class:`torch.Tensor` subclass for bounding boxes. Args: data: Any data that can be turned into a tensor with :func:`torch.as_tensor`. format (BoundingBoxFormat, str): Format of the bounding box. spatial_size (two-tuple of ints): Height and width of the corresponding image or video. dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from ``data``. device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a :class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU. requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``. """format:BoundingBoxFormatspatial_size:Tuple[int,int]@classmethoddef_wrap(cls,tensor:torch.Tensor,*,format:BoundingBoxFormat,spatial_size:Tuple[int,int])->BoundingBox:bounding_box=tensor.as_subclass(cls)bounding_box.format=formatbounding_box.spatial_size=spatial_sizereturnbounding_boxdef__new__(cls,data:Any,*,format:Union[BoundingBoxFormat,str],spatial_size:Tuple[int,int],dtype:Optional[torch.dtype]=None,device:Optional[Union[torch.device,str,int]]=None,requires_grad:Optional[bool]=None,)->BoundingBox:tensor=cls._to_tensor(data,dtype=dtype,device=device,requires_grad=requires_grad)ifisinstance(format,str):format=BoundingBoxFormat[format.upper()]returncls._wrap(tensor,format=format,spatial_size=spatial_size)
[docs]@classmethoddefwrap_like(cls,other:BoundingBox,tensor:torch.Tensor,*,format:Optional[BoundingBoxFormat]=None,spatial_size:Optional[Tuple[int,int]]=None,)->BoundingBox:"""Wrap a :class:`torch.Tensor` as :class:`BoundingBox` from a reference. Args: other (BoundingBox): Reference bounding box. tensor (Tensor): Tensor to be wrapped as :class:`BoundingBox` format (BoundingBoxFormat, str, optional): Format of the bounding box. If omitted, it is taken from the reference. spatial_size (two-tuple of ints, optional): Height and width of the corresponding image or video. If omitted, it is taken from the reference. """ifisinstance(format,str):format=BoundingBoxFormat[format.upper()]returncls._wrap(tensor,format=formatifformatisnotNoneelseother.format,spatial_size=spatial_sizeifspatial_sizeisnotNoneelseother.spatial_size,)
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.