[docs]classKinetics(VisionDataset):"""`Generic Kinetics <https://www.deepmind.com/open-source/kinetics>`_ dataset. Kinetics-400/600/700 are action recognition video datasets. This dataset consider every video as a collection of video clips of fixed size, specified by ``frames_per_clip``, where the step in frames between each clip is given by ``step_between_clips``. To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two elements will come from video 1, and the next three elements from video 2. Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all frames in a video might be present. Args: root (string): Root directory of the Kinetics Dataset. Directory should be structured as follows: .. code:: root/ ├── split │ ├── class1 │ │ ├── clip1.mp4 │ │ ├── clip2.mp4 │ │ ├── clip3.mp4 │ │ ├── ... │ ├── class2 │ │ ├── clipx.mp4 │ │ └── ... Note: split is appended automatically using the split argument. frames_per_clip (int): number of frames in a clip num_classes (int): select between Kinetics-400 (default), Kinetics-600, and Kinetics-700 split (str): split of the dataset to consider; supports ``"train"`` (default) ``"val"`` ``"test"`` frame_rate (float): If omitted, interpolate different frame rate for each clip. step_between_clips (int): number of frames between each clip transform (callable, optional): A function/transform that takes in a TxHxWxC video and returns a transformed version. download (bool): Download the official version of the dataset to root folder. num_workers (int): Use multiple workers for VideoClips creation num_download_workers (int): Use multiprocessing in order to speed up download. output_format (str, optional): The format of the output video tensors (before transforms). Can be either "THWC" or "TCHW" (default). Note that in most other utils and datasets, the default is actually "THWC". Returns: tuple: A 3-tuple with the following entries: - video (Tensor[T, C, H, W] or Tensor[T, H, W, C]): the `T` video frames in torch.uint8 tensor - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points in torch.float tensor - label (int): class of the video clip Raises: RuntimeError: If ``download is True`` and the video archives are already extracted. """_TAR_URLS={"400":"https://s3.amazonaws.com/kinetics/400/{split}/k400_{split}_path.txt","600":"https://s3.amazonaws.com/kinetics/600/{split}/k600_{split}_path.txt","700":"https://s3.amazonaws.com/kinetics/700_2020/{split}/k700_2020_{split}_path.txt",}_ANNOTATION_URLS={"400":"https://s3.amazonaws.com/kinetics/400/annotations/{split}.csv","600":"https://s3.amazonaws.com/kinetics/600/annotations/{split}.csv","700":"https://s3.amazonaws.com/kinetics/700_2020/annotations/{split}.csv",}def__init__(self,root:str,frames_per_clip:int,num_classes:str="400",split:str="train",frame_rate:Optional[int]=None,step_between_clips:int=1,transform:Optional[Callable]=None,extensions:Tuple[str,...]=("avi","mp4"),download:bool=False,num_download_workers:int=1,num_workers:int=1,_precomputed_metadata:Optional[Dict[str,Any]]=None,_video_width:int=0,_video_height:int=0,_video_min_dimension:int=0,_audio_samples:int=0,_audio_channels:int=0,_legacy:bool=False,output_format:str="TCHW",)->None:# TODO: support testself.num_classes=verify_str_arg(num_classes,arg="num_classes",valid_values=["400","600","700"])self.extensions=extensionsself.num_download_workers=num_download_workersself.root=rootself._legacy=_legacyif_legacy:print("Using legacy structure")self.split_folder=rootself.split="unknown"output_format="THWC"ifdownload:raiseValueError("Cannot download the videos using legacy_structure.")else:self.split_folder=path.join(root,split)self.split=verify_str_arg(split,arg="split",valid_values=["train","val","test"])ifdownload:self.download_and_process_videos()super().__init__(self.root)self.classes,class_to_idx=find_classes(self.split_folder)self.samples=make_dataset(self.split_folder,class_to_idx,extensions,is_valid_file=None)video_list=[x[0]forxinself.samples]self.video_clips=VideoClips(video_list,frames_per_clip,step_between_clips,frame_rate,_precomputed_metadata,num_workers=num_workers,_video_width=_video_width,_video_height=_video_height,_video_min_dimension=_video_min_dimension,_audio_samples=_audio_samples,_audio_channels=_audio_channels,output_format=output_format,)self.transform=transformdefdownload_and_process_videos(self)->None:"""Downloads all the videos to the _root_ folder in the expected format."""tic=time.time()self._download_videos()toc=time.time()print("Elapsed time for downloading in mins ",(toc-tic)/60)self._make_ds_structure()toc2=time.time()print("Elapsed time for processing in mins ",(toc2-toc)/60)print("Elapsed time overall in mins ",(toc2-tic)/60)def_download_videos(self)->None:"""download tarballs containing the video to "tars" folder and extract them into the _split_ folder where split is one of the official dataset splits. Raises: RuntimeError: if download folder exists, break to prevent downloading entire dataset again. """ifpath.exists(self.split_folder):raiseRuntimeError(f"The directory {self.split_folder} already exists. "f"If you want to re-download or re-extract the images, delete the directory.")tar_path=path.join(self.root,"tars")file_list_path=path.join(self.root,"files")split_url=self._TAR_URLS[self.num_classes].format(split=self.split)split_url_filepath=path.join(file_list_path,path.basename(split_url))ifnotcheck_integrity(split_url_filepath):download_url(split_url,file_list_path)withopen(split_url_filepath)asfile:list_video_urls=[urllib.parse.quote(line,safe="/,:")forlineinfile.read().splitlines()]ifself.num_download_workers==1:forlineinlist_video_urls:download_and_extract_archive(line,tar_path,self.split_folder)else:part=partial(_dl_wrap,tar_path,self.split_folder)poolproc=Pool(self.num_download_workers)poolproc.map(part,list_video_urls)def_make_ds_structure(self)->None:"""move videos from split_folder/ ├── clip1.avi ├── clip2.avi to the correct format as described below: split_folder/ ├── class1 │ ├── clip1.avi """annotation_path=path.join(self.root,"annotations")ifnotcheck_integrity(path.join(annotation_path,f"{self.split}.csv")):download_url(self._ANNOTATION_URLS[self.num_classes].format(split=self.split),annotation_path)annotations=path.join(annotation_path,f"{self.split}.csv")file_fmtstr="{ytid}_{start:06}_{end:06}.mp4"withopen(annotations)ascsvfile:reader=csv.DictReader(csvfile)forrowinreader:f=file_fmtstr.format(ytid=row["youtube_id"],start=int(row["time_start"]),end=int(row["time_end"]),)label=row["label"].replace(" ","_").replace("'","").replace("(","").replace(")","")os.makedirs(path.join(self.split_folder,label),exist_ok=True)downloaded_file=path.join(self.split_folder,f)ifpath.isfile(downloaded_file):os.replace(downloaded_file,path.join(self.split_folder,label,f),)@propertydefmetadata(self)->Dict[str,Any]:returnself.video_clips.metadatadef__len__(self)->int:returnself.video_clips.num_clips()
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.