[docs]classRandomErasing(_RandomApplyTransform):"""[BETA] Randomly select a rectangle region in the input image or video and erase its pixels. .. v2betastatus:: RandomErasing transform This transform does not support PIL Image. 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896 Args: p (float, optional): probability that the random erasing operation will be performed. scale (tuple of float, optional): range of proportion of erased area against input image. ratio (tuple of float, optional): range of aspect ratio of erased area. value (number or tuple of numbers): erasing value. Default is 0. If a single int, it is used to erase all pixels. If a tuple of length 3, it is used to erase R, G, B channels respectively. If a str of 'random', erasing each pixel with random values. inplace (bool, optional): boolean to make this transform inplace. Default set to False. Returns: Erased input. Example: >>> from torchvision.transforms import v2 as transforms >>> >>> transform = transforms.Compose([ >>> transforms.RandomHorizontalFlip(), >>> transforms.PILToTensor(), >>> transforms.ConvertImageDtype(torch.float), >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), >>> transforms.RandomErasing(), >>> ]) """_v1_transform_cls=_transforms.RandomErasingdef_extract_params_for_v1_transform(self)->Dict[str,Any]:returndict(super()._extract_params_for_v1_transform(),value="random"ifself.valueisNoneelseself.value,)_transformed_types=(is_simple_tensor,datapoints.Image,PIL.Image.Image,datapoints.Video)def__init__(self,p:float=0.5,scale:Tuple[float,float]=(0.02,0.33),ratio:Tuple[float,float]=(0.3,3.3),value:float=0.0,inplace:bool=False,):super().__init__(p=p)ifnotisinstance(value,(numbers.Number,str,tuple,list)):raiseTypeError("Argument value should be either a number or str or a sequence")ifisinstance(value,str)andvalue!="random":raiseValueError("If value is str, it should be 'random'")ifnotisinstance(scale,(tuple,list)):raiseTypeError("Scale should be a sequence")ifnotisinstance(ratio,(tuple,list)):raiseTypeError("Ratio should be a sequence")if(scale[0]>scale[1])or(ratio[0]>ratio[1]):warnings.warn("Scale and ratio should be of kind (min, max)")ifscale[0]<0orscale[1]>1:raiseValueError("Scale should be between 0 and 1")self.scale=scaleself.ratio=ratioifisinstance(value,(int,float)):self.value=[float(value)]elifisinstance(value,str):self.value=Noneelifisinstance(value,(list,tuple)):self.value=[float(v)forvinvalue]else:self.value=valueself.inplace=inplaceself._log_ratio=torch.log(torch.tensor(self.ratio))def_get_params(self,flat_inputs:List[Any])->Dict[str,Any]:img_c,img_h,img_w=query_chw(flat_inputs)ifself.valueisnotNoneandnot(len(self.value)in(1,img_c)):raiseValueError(f"If value is a sequence, it should have either a single value or {img_c} (number of inpt channels)")area=img_h*img_wlog_ratio=self._log_ratiofor_inrange(10):erase_area=area*torch.empty(1).uniform_(self.scale[0],self.scale[1]).item()aspect_ratio=torch.exp(torch.empty(1).uniform_(log_ratio[0],# type: ignore[arg-type]log_ratio[1],# type: ignore[arg-type])).item()h=int(round(math.sqrt(erase_area*aspect_ratio)))w=int(round(math.sqrt(erase_area/aspect_ratio)))ifnot(h<img_handw<img_w):continueifself.valueisNone:v=torch.empty([img_c,h,w],dtype=torch.float32).normal_()else:v=torch.tensor(self.value)[:,None,None]i=torch.randint(0,img_h-h+1,size=(1,)).item()j=torch.randint(0,img_w-w+1,size=(1,)).item()breakelse:i,j,h,w,v=0,0,img_h,img_w,Nonereturndict(i=i,j=j,h=h,w=w,v=v)def_transform(self,inpt:Union[datapoints._ImageType,datapoints._VideoType],params:Dict[str,Any])->Union[datapoints._ImageType,datapoints._VideoType]:ifparams["v"]isnotNone:inpt=F.erase(inpt,**params,inplace=self.inplace)returninpt
Docs
Access comprehensive developer documentation for PyTorch
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.