Source code for torchvision.datasets.oxford_iiit_pet
import os
import os.path
import pathlib
from typing import Any, Callable, Optional, Sequence, Tuple, Union
from PIL import Image
from .utils import download_and_extract_archive, verify_str_arg
from .vision import VisionDataset
[docs]class OxfordIIITPet(VisionDataset):
"""`Oxford-IIIT Pet Dataset <https://www.robots.ox.ac.uk/~vgg/data/pets/>`_.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``"trainval"`` (default) or ``"test"``.
target_types (string, sequence of strings, optional): Types of target to use. Can be ``category`` (default) or
``segmentation``. Can also be a list to output a tuple with all specified target types. The types represent:
- ``category`` (int): Label for one of the 37 pet categories.
- ``segmentation`` (PIL image): Segmentation trimap of the image.
If empty, ``None`` will be returned as target.
transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed
version. E.g, ``transforms.RandomCrop``.
target_transform (callable, optional): A function/transform that takes in the target and transforms it.
download (bool, optional): If True, downloads the dataset from the internet and puts it into
``root/oxford-iiit-pet``. If dataset is already downloaded, it is not downloaded again.
"""
_RESOURCES = (
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz", "5c4f3ee8e5d25df40f4fd59a7f44e54c"),
("https://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz", "95a8c909bbe2e81eed6a22bccdf3f68f"),
)
_VALID_TARGET_TYPES = ("category", "segmentation")
def __init__(
self,
root: str,
split: str = "trainval",
target_types: Union[Sequence[str], str] = "category",
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
):
self._split = verify_str_arg(split, "split", ("trainval", "test"))
if isinstance(target_types, str):
target_types = [target_types]
self._target_types = [
verify_str_arg(target_type, "target_types", self._VALID_TARGET_TYPES) for target_type in target_types
]
super().__init__(root, transforms=transforms, transform=transform, target_transform=target_transform)
self._base_folder = pathlib.Path(self.root) / "oxford-iiit-pet"
self._images_folder = self._base_folder / "images"
self._anns_folder = self._base_folder / "annotations"
self._segs_folder = self._anns_folder / "trimaps"
if download:
self._download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You can use download=True to download it")
image_ids = []
self._labels = []
with open(self._anns_folder / f"{self._split}.txt") as file:
for line in file:
image_id, label, *_ = line.strip().split()
image_ids.append(image_id)
self._labels.append(int(label) - 1)
self.classes = [
" ".join(part.title() for part in raw_cls.split("_"))
for raw_cls, _ in sorted(
{(image_id.rsplit("_", 1)[0], label) for image_id, label in zip(image_ids, self._labels)},
key=lambda image_id_and_label: image_id_and_label[1],
)
]
self.class_to_idx = dict(zip(self.classes, range(len(self.classes))))
self._images = [self._images_folder / f"{image_id}.jpg" for image_id in image_ids]
self._segs = [self._segs_folder / f"{image_id}.png" for image_id in image_ids]
def __len__(self) -> int:
return len(self._images)
[docs] def __getitem__(self, idx: int) -> Tuple[Any, Any]:
image = Image.open(self._images[idx]).convert("RGB")
target: Any = []
for target_type in self._target_types:
if target_type == "category":
target.append(self._labels[idx])
else: # target_type == "segmentation"
target.append(Image.open(self._segs[idx]))
if not target:
target = None
elif len(target) == 1:
target = target[0]
else:
target = tuple(target)
if self.transforms:
image, target = self.transforms(image, target)
return image, target
def _check_exists(self) -> bool:
for folder in (self._images_folder, self._anns_folder):
if not (os.path.exists(folder) and os.path.isdir(folder)):
return False
else:
return True
def _download(self) -> None:
if self._check_exists():
return
for url, md5 in self._RESOURCES:
download_and_extract_archive(url, download_root=str(self._base_folder), md5=md5)