[docs]classLaplace(Distribution):r""" Creates a Laplace distribution parameterized by :attr:`loc` and :attr:`scale`. Example:: >>> # xdoctest: +IGNORE_WANT("non-deterinistic") >>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0])) >>> m.sample() # Laplace distributed with loc=0, scale=1 tensor([ 0.1046]) Args: loc (float or Tensor): mean of the distribution scale (float or Tensor): scale of the distribution """arg_constraints={"loc":constraints.real,"scale":constraints.positive}support=constraints.realhas_rsample=True@propertydefmean(self):returnself.loc@propertydefmode(self):returnself.loc@propertydefvariance(self):return2*self.scale.pow(2)@propertydefstddev(self):return(2**0.5)*self.scaledef__init__(self,loc,scale,validate_args=None):self.loc,self.scale=broadcast_all(loc,scale)ifisinstance(loc,Number)andisinstance(scale,Number):batch_shape=torch.Size()else:batch_shape=self.loc.size()super().__init__(batch_shape,validate_args=validate_args)
[docs]defrsample(self,sample_shape=torch.Size()):shape=self._extended_shape(sample_shape)finfo=torch.finfo(self.loc.dtype)iftorch._C._get_tracing_state():# [JIT WORKAROUND] lack of support for .uniform_()u=torch.rand(shape,dtype=self.loc.dtype,device=self.loc.device)*2-1returnself.loc-self.scale*u.sign()*torch.log1p(-u.abs().clamp(min=finfo.tiny))u=self.loc.new(shape).uniform_(finfo.eps-1,1)# TODO: If we ever implement tensor.nextafter, below is what we want ideally.# u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5)returnself.loc-self.scale*u.sign()*torch.log1p(-u.abs())
To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: Cookies Policy.