[docs]classWIDERFace(VisionDataset):"""`WIDERFace <http://shuoyang1213.me/WIDERFACE/>`_ Dataset. Args: root (str or ``pathlib.Path``): Root directory where images and annotations are downloaded to. Expects the following folder structure if download=False: .. code:: <root> └── widerface ├── wider_face_split ('wider_face_split.zip' if compressed) ├── WIDER_train ('WIDER_train.zip' if compressed) ├── WIDER_val ('WIDER_val.zip' if compressed) └── WIDER_test ('WIDER_test.zip' if compressed) split (string): The dataset split to use. One of {``train``, ``val``, ``test``}. Defaults to ``train``. transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. .. warning:: To download the dataset `gdown <https://github.com/wkentaro/gdown>`_ is required. """BASE_FOLDER="widerface"FILE_LIST=[# File ID MD5 Hash Filename("15hGDLhsx8bLgLcIRD5DhYt5iBxnjNF1M","3fedf70df600953d25982bcd13d91ba2","WIDER_train.zip"),("1GUCogbp16PMGa39thoMMeWxp7Rp5oM8Q","dfa7d7e790efa35df3788964cf0bbaea","WIDER_val.zip"),("1HIfDbVEWKmsYKJZm4lchTBDLW5N7dY5T","e5d8f4248ed24c334bbd12f49c29dd40","WIDER_test.zip"),]ANNOTATIONS_FILE=("http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip","0e3767bcf0e326556d407bf5bff5d27c","wider_face_split.zip",)def__init__(self,root:Union[str,Path],split:str="train",transform:Optional[Callable]=None,target_transform:Optional[Callable]=None,download:bool=False,)->None:super().__init__(root=os.path.join(root,self.BASE_FOLDER),transform=transform,target_transform=target_transform)# check argumentsself.split=verify_str_arg(split,"split",("train","val","test"))ifdownload:self.download()ifnotself._check_integrity():raiseRuntimeError("Dataset not found or corrupted. You can use download=True to download and prepare it")self.img_info:List[Dict[str,Union[str,Dict[str,torch.Tensor]]]]=[]ifself.splitin("train","val"):self.parse_train_val_annotations_file()else:self.parse_test_annotations_file()
[docs]def__getitem__(self,index:int)->Tuple[Any,Any]:""" Args: index (int): Index Returns: tuple: (image, target) where target is a dict of annotations for all faces in the image. target=None for the test split. """# stay consistent with other datasets and return a PIL Imageimg=Image.open(self.img_info[index]["img_path"])# type: ignore[arg-type]ifself.transformisnotNone:img=self.transform(img)target=Noneifself.split=="test"elseself.img_info[index]["annotations"]ifself.target_transformisnotNone:target=self.target_transform(target)returnimg,target
def__len__(self)->int:returnlen(self.img_info)defextra_repr(self)->str:lines=["Split: {split}"]return"\n".join(lines).format(**self.__dict__)defparse_train_val_annotations_file(self)->None:filename="wider_face_train_bbx_gt.txt"ifself.split=="train"else"wider_face_val_bbx_gt.txt"filepath=os.path.join(self.root,"wider_face_split",filename)withopen(filepath)asf:lines=f.readlines()file_name_line,num_boxes_line,box_annotation_line=True,False,Falsenum_boxes,box_counter=0,0labels=[]forlineinlines:line=line.rstrip()iffile_name_line:img_path=os.path.join(self.root,"WIDER_"+self.split,"images",line)img_path=abspath(expanduser(img_path))file_name_line=Falsenum_boxes_line=Trueelifnum_boxes_line:num_boxes=int(line)num_boxes_line=Falsebox_annotation_line=Trueelifbox_annotation_line:box_counter+=1line_split=line.split(" ")line_values=[int(x)forxinline_split]labels.append(line_values)ifbox_counter>=num_boxes:box_annotation_line=Falsefile_name_line=Truelabels_tensor=torch.tensor(labels)self.img_info.append({"img_path":img_path,"annotations":{"bbox":labels_tensor[:,0:4].clone(),# x, y, width, height"blur":labels_tensor[:,4].clone(),"expression":labels_tensor[:,5].clone(),"illumination":labels_tensor[:,6].clone(),"occlusion":labels_tensor[:,7].clone(),"pose":labels_tensor[:,8].clone(),"invalid":labels_tensor[:,9].clone(),},})box_counter=0labels.clear()else:raiseRuntimeError(f"Error parsing annotation file {filepath}")defparse_test_annotations_file(self)->None:filepath=os.path.join(self.root,"wider_face_split","wider_face_test_filelist.txt")filepath=abspath(expanduser(filepath))withopen(filepath)asf:lines=f.readlines()forlineinlines:line=line.rstrip()img_path=os.path.join(self.root,"WIDER_test","images",line)img_path=abspath(expanduser(img_path))self.img_info.append({"img_path":img_path})def_check_integrity(self)->bool:# Allow original archive to be deleted (zip). Only need the extracted imagesall_files=self.FILE_LIST.copy()all_files.append(self.ANNOTATIONS_FILE)for(_,md5,filename)inall_files:file,ext=os.path.splitext(filename)extracted_dir=os.path.join(self.root,file)ifnotos.path.exists(extracted_dir):returnFalsereturnTruedefdownload(self)->None:ifself._check_integrity():return# download and extract image datafor(file_id,md5,filename)inself.FILE_LIST:download_file_from_google_drive(file_id,self.root,filename,md5)filepath=os.path.join(self.root,filename)extract_archive(filepath)# download and extract annotation filesdownload_and_extract_archive(url=self.ANNOTATIONS_FILE[0],download_root=self.root,md5=self.ANNOTATIONS_FILE[1])
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.