[docs]classVideo(Datapoint):"""[BETA] :class:`torch.Tensor` subclass for videos. Args: data (tensor-like): Any data that can be turned into a tensor with :func:`torch.as_tensor`. dtype (torch.dtype, optional): Desired data type of the bounding box. If omitted, will be inferred from ``data``. device (torch.device, optional): Desired device of the bounding box. If omitted and ``data`` is a :class:`torch.Tensor`, the device is taken from it. Otherwise, the bounding box is constructed on the CPU. requires_grad (bool, optional): Whether autograd should record operations on the bounding box. If omitted and ``data`` is a :class:`torch.Tensor`, the value is taken from it. Otherwise, defaults to ``False``. """@classmethoddef_wrap(cls,tensor:torch.Tensor)->Video:video=tensor.as_subclass(cls)returnvideodef__new__(cls,data:Any,*,dtype:Optional[torch.dtype]=None,device:Optional[Union[torch.device,str,int]]=None,requires_grad:Optional[bool]=None,)->Video:tensor=cls._to_tensor(data,dtype=dtype,device=device,requires_grad=requires_grad)ifdata.ndim<4:raiseValueErrorreturncls._wrap(tensor)@classmethoddefwrap_like(cls,other:Video,tensor:torch.Tensor)->Video:returncls._wrap(tensor)def__repr__(self,*,tensor_contents:Any=None)->str:# type: ignore[override]returnself._make_repr()@propertydefspatial_size(self)->Tuple[int,int]:returntuple(self.shape[-2:])# type: ignore[return-value]@propertydefnum_channels(self)->int:returnself.shape[-3]@propertydefnum_frames(self)->int:returnself.shape[-4]defhorizontal_flip(self)->Video:output=self._F.horizontal_flip_video(self.as_subclass(torch.Tensor))returnVideo.wrap_like(self,output)defvertical_flip(self)->Video:output=self._F.vertical_flip_video(self.as_subclass(torch.Tensor))returnVideo.wrap_like(self,output)defresize(# type: ignore[override]self,size:List[int],interpolation:Union[InterpolationMode,int]=InterpolationMode.BILINEAR,max_size:Optional[int]=None,antialias:Optional[Union[str,bool]]="warn",)->Video:output=self._F.resize_video(self.as_subclass(torch.Tensor),size,interpolation=interpolation,max_size=max_size,antialias=antialias,)returnVideo.wrap_like(self,output)defcrop(self,top:int,left:int,height:int,width:int)->Video:output=self._F.crop_video(self.as_subclass(torch.Tensor),top,left,height,width)returnVideo.wrap_like(self,output)defcenter_crop(self,output_size:List[int])->Video:output=self._F.center_crop_video(self.as_subclass(torch.Tensor),output_size=output_size)returnVideo.wrap_like(self,output)defresized_crop(self,top:int,left:int,height:int,width:int,size:List[int],interpolation:Union[InterpolationMode,int]=InterpolationMode.BILINEAR,antialias:Optional[Union[str,bool]]="warn",)->Video:output=self._F.resized_crop_video(self.as_subclass(torch.Tensor),top,left,height,width,size=list(size),interpolation=interpolation,antialias=antialias,)returnVideo.wrap_like(self,output)defpad(self,padding:List[int],fill:Optional[Union[int,float,List[float]]]=None,padding_mode:str="constant",)->Video:output=self._F.pad_video(self.as_subclass(torch.Tensor),padding,fill=fill,padding_mode=padding_mode)returnVideo.wrap_like(self,output)defrotate(self,angle:float,interpolation:Union[InterpolationMode,int]=InterpolationMode.NEAREST,expand:bool=False,center:Optional[List[float]]=None,fill:_FillTypeJIT=None,)->Video:output=self._F.rotate_video(self.as_subclass(torch.Tensor),angle,interpolation=interpolation,expand=expand,fill=fill,center=center)returnVideo.wrap_like(self,output)defaffine(self,angle:Union[int,float],translate:List[float],scale:float,shear:List[float],interpolation:Union[InterpolationMode,int]=InterpolationMode.NEAREST,fill:_FillTypeJIT=None,center:Optional[List[float]]=None,)->Video:output=self._F.affine_video(self.as_subclass(torch.Tensor),angle,translate=translate,scale=scale,shear=shear,interpolation=interpolation,fill=fill,center=center,)returnVideo.wrap_like(self,output)defperspective(self,startpoints:Optional[List[List[int]]],endpoints:Optional[List[List[int]]],interpolation:Union[InterpolationMode,int]=InterpolationMode.BILINEAR,fill:_FillTypeJIT=None,coefficients:Optional[List[float]]=None,)->Video:output=self._F.perspective_video(self.as_subclass(torch.Tensor),startpoints,endpoints,interpolation=interpolation,fill=fill,coefficients=coefficients,)returnVideo.wrap_like(self,output)defelastic(self,displacement:torch.Tensor,interpolation:Union[InterpolationMode,int]=InterpolationMode.BILINEAR,fill:_FillTypeJIT=None,)->Video:output=self._F.elastic_video(self.as_subclass(torch.Tensor),displacement,interpolation=interpolation,fill=fill)returnVideo.wrap_like(self,output)defrgb_to_grayscale(self,num_output_channels:int=1)->Video:output=self._F.rgb_to_grayscale_image_tensor(self.as_subclass(torch.Tensor),num_output_channels=num_output_channels)returnVideo.wrap_like(self,output)defadjust_brightness(self,brightness_factor:float)->Video:output=self._F.adjust_brightness_video(self.as_subclass(torch.Tensor),brightness_factor=brightness_factor)returnVideo.wrap_like(self,output)defadjust_saturation(self,saturation_factor:float)->Video:output=self._F.adjust_saturation_video(self.as_subclass(torch.Tensor),saturation_factor=saturation_factor)returnVideo.wrap_like(self,output)defadjust_contrast(self,contrast_factor:float)->Video:output=self._F.adjust_contrast_video(self.as_subclass(torch.Tensor),contrast_factor=contrast_factor)returnVideo.wrap_like(self,output)defadjust_sharpness(self,sharpness_factor:float)->Video:output=self._F.adjust_sharpness_video(self.as_subclass(torch.Tensor),sharpness_factor=sharpness_factor)returnVideo.wrap_like(self,output)defadjust_hue(self,hue_factor:float)->Video:output=self._F.adjust_hue_video(self.as_subclass(torch.Tensor),hue_factor=hue_factor)returnVideo.wrap_like(self,output)defadjust_gamma(self,gamma:float,gain:float=1)->Video:output=self._F.adjust_gamma_video(self.as_subclass(torch.Tensor),gamma=gamma,gain=gain)returnVideo.wrap_like(self,output)defposterize(self,bits:int)->Video:output=self._F.posterize_video(self.as_subclass(torch.Tensor),bits=bits)returnVideo.wrap_like(self,output)defsolarize(self,threshold:float)->Video:output=self._F.solarize_video(self.as_subclass(torch.Tensor),threshold=threshold)returnVideo.wrap_like(self,output)defautocontrast(self)->Video:output=self._F.autocontrast_video(self.as_subclass(torch.Tensor))returnVideo.wrap_like(self,output)defequalize(self)->Video:output=self._F.equalize_video(self.as_subclass(torch.Tensor))returnVideo.wrap_like(self,output)definvert(self)->Video:output=self._F.invert_video(self.as_subclass(torch.Tensor))returnVideo.wrap_like(self,output)defgaussian_blur(self,kernel_size:List[int],sigma:Optional[List[float]]=None)->Video:output=self._F.gaussian_blur_video(self.as_subclass(torch.Tensor),kernel_size=kernel_size,sigma=sigma)returnVideo.wrap_like(self,output)defnormalize(self,mean:List[float],std:List[float],inplace:bool=False)->Video:output=self._F.normalize_video(self.as_subclass(torch.Tensor),mean=mean,std=std,inplace=inplace)returnVideo.wrap_like(self,output)
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.