TorchScript Builtins¶
This is a full reference of functions and Tensor methods accessible in TorchScript
Supported Tensor Methods¶
Tensor.__and__(other : number) -> Tensor
Tensor.__and__(other : Tensor) -> Tensor
Tensor.__iand__(other : Tensor) -> Tensor
Tensor.__iand__(other : number) -> Tensor
Tensor.__ilshift__(other : Tensor) -> Tensor
Tensor.__ilshift__(other : number) -> Tensor
Tensor.__ior__(other : Tensor) -> Tensor
Tensor.__ior__(other : number) -> Tensor
Tensor.__irshift__(other : Tensor) -> Tensor
Tensor.__irshift__(other : number) -> Tensor
Tensor.__ixor__(other : Tensor) -> Tensor
Tensor.__ixor__(other : number) -> Tensor
Tensor.__lshift__(other : Tensor) -> Tensor
Tensor.__lshift__(other : number) -> Tensor
Tensor.__lshift__(other : number,
out : Tensor) -> Tensor
Tensor.__lshift__(other : Tensor,
out : Tensor) -> Tensor
Tensor.__or__(other : Tensor) -> Tensor
Tensor.__or__(other : number) -> Tensor
Tensor.__rshift__(other : Tensor) -> Tensor
Tensor.__rshift__(other : number) -> Tensor
Tensor.__rshift__(other : number,
out : Tensor) -> Tensor
Tensor.__rshift__(other : Tensor,
out : Tensor) -> Tensor
Tensor.__xor__(other : Tensor) -> Tensor
Tensor.__xor__(other : number) -> Tensor
Tensor.abs() -> Tensor
Tensor.abs(out : Tensor) -> Tensor
Tensor.abs_() -> Tensor
Tensor.absolute() -> Tensor
Tensor.absolute(out : Tensor) -> Tensor
Tensor.absolute_() -> Tensor
Tensor.acos() -> Tensor
Tensor.acos(out : Tensor) -> Tensor
Tensor.acos_() -> Tensor
Tensor.acosh() -> Tensor
Tensor.acosh(out : Tensor) -> Tensor
Tensor.acosh_() -> Tensor
Tensor.add(other : Tensor,
alpha : number=1) -> Tensor
Tensor.add(other : number,
alpha : number=1) -> Tensor
Tensor.add(other : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.add(other : number,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.add_(other : Tensor,
alpha : number=1) -> Tensor
Tensor.add_(other : number,
alpha : number=1) -> Tensor
Tensor.addbmm(batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.addbmm(batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.addbmm_(batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.addcdiv(tensor1 : Tensor,
tensor2 : Tensor,
value : number=1) -> Tensor
Tensor.addcdiv(tensor1 : Tensor,
tensor2 : Tensor,
value : number=1,
out : Tensor) -> Tensor
Tensor.addcdiv_(tensor1 : Tensor,
tensor2 : Tensor,
value : number=1) -> Tensor
Tensor.addcmul(tensor1 : Tensor,
tensor2 : Tensor,
value : number=1) -> Tensor
Tensor.addcmul(tensor1 : Tensor,
tensor2 : Tensor,
value : number=1,
out : Tensor) -> Tensor
Tensor.addcmul_(tensor1 : Tensor,
tensor2 : Tensor,
value : number=1) -> Tensor
Tensor.addmm(mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.addmm(mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.addmm_(mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.addmv(mat : Tensor,
vec : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.addmv(mat : Tensor,
vec : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.addmv_(mat : Tensor,
vec : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.addr(vec1 : Tensor,
vec2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.addr(vec1 : Tensor,
vec2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.addr_(vec1 : Tensor,
vec2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.adjoint() -> Tensor
Tensor.align_as(other : Tensor) -> Tensor
Tensor.align_to(names : List[str]) -> Tensor
Tensor.align_to(order : List[str],
ellipsis_idx : int) -> Tensor
Tensor.all() -> Tensor
Tensor.all(dim : int,
keepdim : bool=False) -> Tensor
Tensor.all(dim : Optional[List[int]],
keepdim : bool=False) -> Tensor
Tensor.all(dim : int,
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.all(dim : Optional[List[int]],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.all(out : Tensor) -> Tensor
Tensor.all(dim : str,
keepdim : bool=False) -> Tensor
Tensor.all(dim : str,
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.allclose(other : Tensor,
rtol : float=1e-05,
atol : float=1e-08,
equal_nan : bool=False) -> bool
Tensor.amax(dim : List[int]=[],
keepdim : bool=False) -> Tensor
Tensor.amax(dim : List[int]=[],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.amin(dim : List[int]=[],
keepdim : bool=False) -> Tensor
Tensor.amin(dim : List[int]=[],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.aminmax(dim : Optional[int],
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.aminmax(dim : Optional[int],
keepdim : bool=False,
min : Tensor,
max : Tensor) -> Tuple[Tensor, Tensor]
Tensor.angle() -> Tensor
Tensor.angle(out : Tensor) -> Tensor
Tensor.any() -> Tensor
Tensor.any(dim : int,
keepdim : bool=False) -> Tensor
Tensor.any(dim : Optional[List[int]],
keepdim : bool=False) -> Tensor
Tensor.any(dim : int,
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.any(dim : Optional[List[int]],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.any(out : Tensor) -> Tensor
Tensor.any(dim : str,
keepdim : bool=False) -> Tensor
Tensor.any(dim : str,
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.arccos() -> Tensor
Tensor.arccos(out : Tensor) -> Tensor
Tensor.arccos_() -> Tensor
Tensor.arccosh() -> Tensor
Tensor.arccosh(out : Tensor) -> Tensor
Tensor.arccosh_() -> Tensor
Tensor.arcsin() -> Tensor
Tensor.arcsin(out : Tensor) -> Tensor
Tensor.arcsin_() -> Tensor
Tensor.arcsinh() -> Tensor
Tensor.arcsinh(out : Tensor) -> Tensor
Tensor.arcsinh_() -> Tensor
Tensor.arctan() -> Tensor
Tensor.arctan(out : Tensor) -> Tensor
Tensor.arctan2(other : Tensor) -> Tensor
Tensor.arctan2(other : Tensor,
out : Tensor) -> Tensor
Tensor.arctan2_(other : Tensor) -> Tensor
Tensor.arctan_() -> Tensor
Tensor.arctanh() -> Tensor
Tensor.arctanh(out : Tensor) -> Tensor
Tensor.arctanh_() -> Tensor
Tensor.argmax(dim : Optional[int],
keepdim : bool=False) -> Tensor
Tensor.argmax(dim : Optional[int],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.argmin(dim : Optional[int],
keepdim : bool=False) -> Tensor
Tensor.argmin(dim : Optional[int],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.argsort(dim : int=-1,
descending : bool=False) -> Tensor
Tensor.argsort(stable : bool,
dim : int=-1,
descending : bool=False) -> Tensor
Tensor.argsort(stable : bool,
dim : int=-1,
descending : bool=False,
out : Tensor) -> Tensor
Tensor.argsort(dim : str,
descending : bool=False) -> Tensor
Tensor.argwhere() -> Tensor
Tensor.as_strided(size : List[int],
stride : List[int],
storage_offset : Optional[int]) -> Tensor
Tensor.as_strided_(size : List[int],
stride : List[int],
storage_offset : Optional[int]) -> Tensor
Tensor.as_strided_scatter(src : Tensor,
size : List[int],
stride : List[int],
storage_offset : Optional[int]) -> Tensor
Tensor.as_strided_scatter(src : Tensor,
size : List[int],
stride : List[int],
storage_offset : Optional[int],
out : Tensor) -> Tensor
Tensor.asin() -> Tensor
Tensor.asin(out : Tensor) -> Tensor
Tensor.asin_() -> Tensor
Tensor.asinh() -> Tensor
Tensor.asinh(out : Tensor) -> Tensor
Tensor.asinh_() -> Tensor
Tensor.atan() -> Tensor
Tensor.atan(out : Tensor) -> Tensor
Tensor.atan2(other : Tensor) -> Tensor
Tensor.atan2(other : Tensor,
out : Tensor) -> Tensor
Tensor.atan2_(other : Tensor) -> Tensor
Tensor.atan_() -> Tensor
Tensor.atanh() -> Tensor
Tensor.atanh(out : Tensor) -> Tensor
Tensor.atanh_() -> Tensor
Tensor.backward(gradient : Optional[Tensor],
retain_graph : Optional[bool],
create_graph : bool=False) -> Tuple[]
Tensor.baddbmm(batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.baddbmm(batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.baddbmm_(batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.bernoulli(generator : Optional[Generator]) -> Tensor
Tensor.bernoulli(generator : Optional[Generator],
out : Tensor) -> Tensor
Tensor.bernoulli(p : float,
generator : Optional[Generator]) -> Tensor
Tensor.bernoulli(p : Tensor,
generator : Optional[Generator]) -> Tensor
Tensor.bernoulli(p : Tensor,
generator : Optional[Generator],
out : Tensor) -> Tensor
Tensor.bernoulli(p : float=0.5,
generator : Optional[Generator],
out : Tensor) -> Tensor
Tensor.bernoulli_(p : Tensor,
generator : Optional[Generator]) -> Tensor
Tensor.bernoulli_(p : float=0.5,
generator : Optional[Generator]) -> Tensor
Tensor.bincount(weights : Optional[Tensor],
minlength : int=0) -> Tensor
Tensor.bincount(weights : Optional[Tensor],
minlength : int=0,
out : Tensor) -> Tensor
Tensor.bitwise_and(other : Tensor) -> Tensor
Tensor.bitwise_and(other : number) -> Tensor
Tensor.bitwise_and(other : Tensor,
out : Tensor) -> Tensor
Tensor.bitwise_and(other : number,
out : Tensor) -> Tensor
Tensor.bitwise_and_(other : Tensor) -> Tensor
Tensor.bitwise_and_(other : number) -> Tensor
Tensor.bitwise_left_shift(other : Tensor) -> Tensor
Tensor.bitwise_left_shift(other : number) -> Tensor
Tensor.bitwise_left_shift(other : Tensor,
out : Tensor) -> Tensor
Tensor.bitwise_left_shift(other : number,
out : Tensor) -> Tensor
Tensor.bitwise_left_shift_(other : number) -> Tensor
Tensor.bitwise_left_shift_(other : Tensor) -> Tensor
Tensor.bitwise_not() -> Tensor
Tensor.bitwise_not(out : Tensor) -> Tensor
Tensor.bitwise_not_() -> Tensor
Tensor.bitwise_or(other : Tensor) -> Tensor
Tensor.bitwise_or(other : number) -> Tensor
Tensor.bitwise_or(other : Tensor,
out : Tensor) -> Tensor
Tensor.bitwise_or(other : number,
out : Tensor) -> Tensor
Tensor.bitwise_or_(other : Tensor) -> Tensor
Tensor.bitwise_or_(other : number) -> Tensor
Tensor.bitwise_right_shift(other : Tensor) -> Tensor
Tensor.bitwise_right_shift(other : number) -> Tensor
Tensor.bitwise_right_shift(other : Tensor,
out : Tensor) -> Tensor
Tensor.bitwise_right_shift(other : number,
out : Tensor) -> Tensor
Tensor.bitwise_right_shift_(other : number) -> Tensor
Tensor.bitwise_right_shift_(other : Tensor) -> Tensor
Tensor.bitwise_xor(other : Tensor) -> Tensor
Tensor.bitwise_xor(other : number) -> Tensor
Tensor.bitwise_xor(other : Tensor,
out : Tensor) -> Tensor
Tensor.bitwise_xor(other : number,
out : Tensor) -> Tensor
Tensor.bitwise_xor_(other : Tensor) -> Tensor
Tensor.bitwise_xor_(other : number) -> Tensor
Tensor.bmm(mat2 : Tensor) -> Tensor
Tensor.bmm(mat2 : Tensor,
out : Tensor) -> Tensor
Tensor.broadcast_to(size : List[int]) -> Tensor
Tensor.cauchy_(median : float=0.0,
sigma : float=1.0,
generator : Optional[Generator]) -> Tensor
Tensor.ccol_indices() -> Tensor
Tensor.ceil() -> Tensor
Tensor.ceil(out : Tensor) -> Tensor
Tensor.ceil_() -> Tensor
Tensor.chalf(memory_format : Optional[int]) -> Tensor
Tensor.cholesky(upper : bool=False) -> Tensor
Tensor.cholesky(upper : bool=False,
out : Tensor) -> Tensor
Tensor.cholesky_inverse(upper : bool=False) -> Tensor
Tensor.cholesky_inverse(upper : bool=False,
out : Tensor) -> Tensor
Tensor.cholesky_solve(input2 : Tensor,
upper : bool=False) -> Tensor
Tensor.cholesky_solve(input2 : Tensor,
upper : bool=False,
out : Tensor) -> Tensor
Tensor.chunk(chunks : int,
dim : int=0) -> List[Tensor]
Tensor.clamp(min : Optional[number],
max : Optional[number]) -> Tensor
Tensor.clamp(min : Optional[Tensor],
max : Optional[Tensor]) -> Tensor
Tensor.clamp(min : Optional[number],
max : Optional[number],
out : Tensor) -> Tensor
Tensor.clamp(min : Optional[Tensor],
max : Optional[Tensor],
out : Tensor) -> Tensor
Tensor.clamp_(min : Optional[number],
max : Optional[number]) -> Tensor
Tensor.clamp_(min : Optional[Tensor],
max : Optional[Tensor]) -> Tensor
Tensor.clamp_max(max : number) -> Tensor
Tensor.clamp_max(max : Tensor) -> Tensor
Tensor.clamp_max(max : number,
out : Tensor) -> Tensor
Tensor.clamp_max(max : Tensor,
out : Tensor) -> Tensor
Tensor.clamp_max_(max : number) -> Tensor
Tensor.clamp_max_(max : Tensor) -> Tensor
Tensor.clamp_min(min : number) -> Tensor
Tensor.clamp_min(min : Tensor) -> Tensor
Tensor.clamp_min(min : number,
out : Tensor) -> Tensor
Tensor.clamp_min(min : Tensor,
out : Tensor) -> Tensor
Tensor.clamp_min_(min : number) -> Tensor
Tensor.clamp_min_(min : Tensor) -> Tensor
Tensor.clip(min : Optional[number],
max : Optional[number]) -> Tensor
Tensor.clip(min : Optional[Tensor],
max : Optional[Tensor]) -> Tensor
Tensor.clip(min : Optional[number],
max : Optional[number],
out : Tensor) -> Tensor
Tensor.clip(min : Optional[Tensor],
max : Optional[Tensor],
out : Tensor) -> Tensor
Tensor.clip_(min : Optional[number],
max : Optional[number]) -> Tensor
Tensor.clip_(min : Optional[Tensor],
max : Optional[Tensor]) -> Tensor
Tensor.clone(memory_format : Optional[int]) -> Tensor
Tensor.clone(memory_format : Optional[int],
out : Tensor) -> Tensor
Tensor.coalesce() -> Tensor
Tensor.col_indices() -> Tensor
Tensor.conj() -> Tensor
Tensor.conj_physical() -> Tensor
Tensor.conj_physical(out : Tensor) -> Tensor
Tensor.conj_physical_() -> Tensor
Tensor.contiguous(memory_format : int=0) -> Tensor
Tensor.copy_(src : Tensor,
non_blocking : bool=False) -> Tensor
Tensor.copy_(other : Tensor) -> Tensor
Tensor.copy_(other : int) -> Tensor
Tensor.copy_(other : float) -> Tensor
Tensor.copysign(other : Tensor) -> Tensor
Tensor.copysign(other : number) -> Tensor
Tensor.copysign(other : Tensor,
out : Tensor) -> Tensor
Tensor.copysign(other : number,
out : Tensor) -> Tensor
Tensor.copysign_(other : Tensor) -> Tensor
Tensor.copysign_(other : number) -> Tensor
Tensor.corrcoef() -> Tensor
Tensor.cos() -> Tensor
Tensor.cos(out : Tensor) -> Tensor
Tensor.cos_() -> Tensor
Tensor.cosh() -> Tensor
Tensor.cosh(out : Tensor) -> Tensor
Tensor.cosh_() -> Tensor
Tensor.count_nonzero(dim : List[int]) -> Tensor
Tensor.count_nonzero(dim : List[int],
out : Tensor) -> Tensor
Tensor.count_nonzero(dim : Optional[int]) -> Tensor
Tensor.count_nonzero(dim : Optional[int],
out : Tensor) -> Tensor
Tensor.cov(correction : int=1,
fweights : Optional[Tensor],
aweights : Optional[Tensor]) -> Tensor
Tensor.cpu() -> Tensor
Tensor.cross(other : Tensor,
dim : Optional[int]) -> Tensor
Tensor.cross(other : Tensor,
dim : Optional[int],
out : Tensor) -> Tensor
Tensor.crow_indices() -> Tensor
Tensor.cuda() -> Tensor
Tensor.cummax(dim : int) -> Tuple[Tensor, Tensor]
Tensor.cummax(dim : str) -> Tuple[Tensor, Tensor]
Tensor.cummax(dim : str,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.cummax(dim : int,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.cummin(dim : int) -> Tuple[Tensor, Tensor]
Tensor.cummin(dim : str) -> Tuple[Tensor, Tensor]
Tensor.cummin(dim : str,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.cummin(dim : int,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.cumprod(dim : int,
dtype : Optional[int]) -> Tensor
Tensor.cumprod(dim : str,
dtype : Optional[int]) -> Tensor
Tensor.cumprod(dim : str,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.cumprod(dim : int,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.cumprod_(dim : int,
dtype : Optional[int]) -> Tensor
Tensor.cumprod_(dim : str,
dtype : Optional[int]) -> Tensor
Tensor.cumsum(dim : int,
dtype : Optional[int]) -> Tensor
Tensor.cumsum(dim : str,
dtype : Optional[int]) -> Tensor
Tensor.cumsum(dim : str,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.cumsum(dim : int,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.cumsum_(dim : int,
dtype : Optional[int]) -> Tensor
Tensor.cumsum_(dim : str,
dtype : Optional[int]) -> Tensor
Tensor.data() -> Tensor
Tensor.deg2rad() -> Tensor
Tensor.deg2rad(out : Tensor) -> Tensor
Tensor.deg2rad_() -> Tensor
Tensor.dense_dim() -> int
Tensor.dequantize() -> Tensor
Tensor.dequantize(out : Tensor) -> Tensor
Tensor.det() -> Tensor
Tensor.detach() -> Tensor
Tensor.detach_() -> Tensor
Tensor.diag(diagonal : int=0) -> Tensor
Tensor.diag(diagonal : int=0,
out : Tensor) -> Tensor
Tensor.diag_embed(offset : int=0,
dim1 : int=-2,
dim2 : int=-1) -> Tensor
Tensor.diag_embed(offset : int=0,
dim1 : int=-2,
dim2 : int=-1,
out : Tensor) -> Tensor
Tensor.diagflat(offset : int=0) -> Tensor
Tensor.diagonal(offset : int=0,
dim1 : int=0,
dim2 : int=1) -> Tensor
Tensor.diagonal(outdim : str,
dim1 : str,
dim2 : str,
offset : int=0) -> Tensor
Tensor.diagonal_scatter(src : Tensor,
offset : int=0,
dim1 : int=0,
dim2 : int=1) -> Tensor
Tensor.diagonal_scatter(src : Tensor,
offset : int=0,
dim1 : int=0,
dim2 : int=1,
out : Tensor) -> Tensor
Tensor.diff(n : int=1,
dim : int=-1,
prepend : Optional[Tensor],
append : Optional[Tensor]) -> Tensor
Tensor.diff(n : int=1,
dim : int=-1,
prepend : Optional[Tensor],
append : Optional[Tensor],
out : Tensor) -> Tensor
Tensor.digamma() -> Tensor
Tensor.digamma(out : Tensor) -> Tensor
Tensor.digamma_() -> Tensor
Tensor.dim() -> int
Tensor.dist(other : Tensor,
p : number=2) -> Tensor
Tensor.dist(other : Tensor,
p : number=2,
out : Tensor) -> Tensor
Tensor.div(other : Tensor) -> Tensor
Tensor.div(other : number) -> Tensor
Tensor.div(other : Tensor,
rounding_mode : Optional[str]) -> Tensor
Tensor.div(other : number,
rounding_mode : Optional[str]) -> Tensor
Tensor.div(other : Tensor,
out : Tensor) -> Tensor
Tensor.div(other : Tensor,
rounding_mode : Optional[str],
out : Tensor) -> Tensor
Tensor.div(other : number,
out : Tensor) -> Tensor
Tensor.div(other : number,
rounding_mode : Optional[str],
out : Tensor) -> Tensor
Tensor.div_(other : Tensor) -> Tensor
Tensor.div_(other : Tensor,
rounding_mode : Optional[str]) -> Tensor
Tensor.div_(other : number) -> Tensor
Tensor.div_(other : number,
rounding_mode : Optional[str]) -> Tensor
Tensor.divide(other : Tensor) -> Tensor
Tensor.divide(other : number) -> Tensor
Tensor.divide(other : Tensor,
rounding_mode : Optional[str]) -> Tensor
Tensor.divide(other : number,
rounding_mode : Optional[str]) -> Tensor
Tensor.divide(other : Tensor,
out : Tensor) -> Tensor
Tensor.divide(other : Tensor,
rounding_mode : Optional[str],
out : Tensor) -> Tensor
Tensor.divide_(other : Tensor) -> Tensor
Tensor.divide_(other : Tensor,
rounding_mode : Optional[str]) -> Tensor
Tensor.divide_(other : number,
rounding_mode : Optional[str]) -> Tensor
Tensor.divide_(other : number) -> Tensor
Tensor.dot(tensor : Tensor) -> Tensor
Tensor.dot(tensor : Tensor,
out : Tensor) -> Tensor
Tensor.dsplit(sections : int) -> List[Tensor]
Tensor.dsplit(indices : List[int]) -> List[Tensor]
Tensor.element_size() -> int
Tensor.eq(other : Tensor) -> Tensor
Tensor.eq(other : number) -> Tensor
Tensor.eq(other : number,
out : Tensor) -> Tensor
Tensor.eq(other : Tensor,
out : Tensor) -> Tensor
Tensor.eq_(other : number) -> Tensor
Tensor.eq_(other : Tensor) -> Tensor
Tensor.equal(other : Tensor) -> bool
Tensor.erf() -> Tensor
Tensor.erf(out : Tensor) -> Tensor
Tensor.erf_() -> Tensor
Tensor.erfc() -> Tensor
Tensor.erfc(out : Tensor) -> Tensor
Tensor.erfc_() -> Tensor
Tensor.erfinv() -> Tensor
Tensor.erfinv(out : Tensor) -> Tensor
Tensor.erfinv_() -> Tensor
Tensor.exp() -> Tensor
Tensor.exp(out : Tensor) -> Tensor
Tensor.exp2() -> Tensor
Tensor.exp2(out : Tensor) -> Tensor
Tensor.exp2_() -> Tensor
Tensor.exp_() -> Tensor
Tensor.expand(size : List[int],
implicit : bool=False) -> Tensor
Tensor.expand_as(other : Tensor) -> Tensor
Tensor.expm1() -> Tensor
Tensor.expm1(out : Tensor) -> Tensor
Tensor.expm1_() -> Tensor
Tensor.exponential_(lambd : float=1.0,
generator : Optional[Generator]) -> Tensor
Tensor.fill_(value : number) -> Tensor
Tensor.fill_(value : Tensor) -> Tensor
Tensor.fill_diagonal_(fill_value : number,
wrap : bool=False) -> Tensor
Tensor.fix() -> Tensor
Tensor.fix(out : Tensor) -> Tensor
Tensor.fix_() -> Tensor
Tensor.flatten(start_dim : int=0,
end_dim : int=-1) -> Tensor
Tensor.flatten(dims : List[str],
out_dim : str) -> Tensor
Tensor.flatten(start_dim : int,
end_dim : int,
out_dim : str) -> Tensor
Tensor.flatten(start_dim : str,
end_dim : str,
out_dim : str) -> Tensor
Tensor.flip(dims : List[int]) -> Tensor
Tensor.flip(dims : List[int],
out : Tensor) -> Tensor
Tensor.fliplr() -> Tensor
Tensor.flipud() -> Tensor
Tensor.float_power(exponent : Tensor) -> Tensor
Tensor.float_power(exponent : number) -> Tensor
Tensor.float_power(exponent : Tensor,
out : Tensor) -> Tensor
Tensor.float_power(exponent : number,
out : Tensor) -> Tensor
Tensor.float_power_(exponent : Tensor) -> Tensor
Tensor.float_power_(exponent : number) -> Tensor
Tensor.floor() -> Tensor
Tensor.floor(out : Tensor) -> Tensor
Tensor.floor_() -> Tensor
Tensor.floor_divide(other : Tensor) -> Tensor
Tensor.floor_divide(other : number) -> Tensor
Tensor.floor_divide(other : Tensor,
out : Tensor) -> Tensor
Tensor.floor_divide(other : number,
out : Tensor) -> Tensor
Tensor.floor_divide_(other : number) -> Tensor
Tensor.floor_divide_(other : Tensor) -> Tensor
Tensor.fmax(other : Tensor) -> Tensor
Tensor.fmax(other : Tensor,
out : Tensor) -> Tensor
Tensor.fmin(other : Tensor) -> Tensor
Tensor.fmin(other : Tensor,
out : Tensor) -> Tensor
Tensor.fmod(other : Tensor) -> Tensor
Tensor.fmod(other : number) -> Tensor
Tensor.fmod(other : Tensor,
out : Tensor) -> Tensor
Tensor.fmod(other : number,
out : Tensor) -> Tensor
Tensor.fmod_(other : Tensor) -> Tensor
Tensor.fmod_(other : number) -> Tensor
Tensor.frac() -> Tensor
Tensor.frac(out : Tensor) -> Tensor
Tensor.frac_() -> Tensor
Tensor.frexp() -> Tuple[Tensor, Tensor]
Tensor.frexp(mantissa : Tensor,
exponent : Tensor) -> Tuple[Tensor, Tensor]
Tensor.gather(dim : int,
index : Tensor,
sparse_grad : bool=False) -> Tensor
Tensor.gather(dim : int,
index : Tensor,
sparse_grad : bool=False,
out : Tensor) -> Tensor
Tensor.gather(dim : str,
index : Tensor,
sparse_grad : bool=False) -> Tensor
Tensor.gather(dim : str,
index : Tensor,
sparse_grad : bool=False,
out : Tensor) -> Tensor
Tensor.gcd(other : Tensor) -> Tensor
Tensor.gcd(other : Tensor,
out : Tensor) -> Tensor
Tensor.gcd_(other : Tensor) -> Tensor
Tensor.ge(other : Tensor) -> Tensor
Tensor.ge(other : number) -> Tensor
Tensor.ge(other : number,
out : Tensor) -> Tensor
Tensor.ge(other : Tensor,
out : Tensor) -> Tensor
Tensor.ge_(other : number) -> Tensor
Tensor.ge_(other : Tensor) -> Tensor
Tensor.geometric_(p : float,
generator : Optional[Generator]) -> Tensor
Tensor.geqrf() -> Tuple[Tensor, Tensor]
Tensor.geqrf(a : Tensor,
tau : Tensor) -> Tuple[Tensor, Tensor]
Tensor.ger(vec2 : Tensor) -> Tensor
Tensor.ger(vec2 : Tensor,
out : Tensor) -> Tensor
Tensor.get_device() -> int
Tensor.greater(other : Tensor) -> Tensor
Tensor.greater(other : number) -> Tensor
Tensor.greater(other : number,
out : Tensor) -> Tensor
Tensor.greater(other : Tensor,
out : Tensor) -> Tensor
Tensor.greater_(other : number) -> Tensor
Tensor.greater_(other : Tensor) -> Tensor
Tensor.greater_equal(other : Tensor) -> Tensor
Tensor.greater_equal(other : number) -> Tensor
Tensor.greater_equal(other : number,
out : Tensor) -> Tensor
Tensor.greater_equal(other : Tensor,
out : Tensor) -> Tensor
Tensor.greater_equal_(other : number) -> Tensor
Tensor.greater_equal_(other : Tensor) -> Tensor
Tensor.gt(other : Tensor) -> Tensor
Tensor.gt(other : number) -> Tensor
Tensor.gt(other : number,
out : Tensor) -> Tensor
Tensor.gt(other : Tensor,
out : Tensor) -> Tensor
Tensor.gt_(other : number) -> Tensor
Tensor.gt_(other : Tensor) -> Tensor
Tensor.hardshrink(lambd : number=0.5) -> Tensor
Tensor.hardshrink(lambd : number=0.5,
out : Tensor) -> Tensor
Tensor.heaviside(values : Tensor) -> Tensor
Tensor.heaviside(values : Tensor,
out : Tensor) -> Tensor
Tensor.heaviside_(values : Tensor) -> Tensor
Tensor.histc(bins : int=100,
min : number=0,
max : number=0) -> Tensor
Tensor.histc(bins : int=100,
min : number=0,
max : number=0,
out : Tensor) -> Tensor
Tensor.histogram(bins : Tensor,
weight : Optional[Tensor],
density : bool=False) -> Tuple[Tensor, Tensor]
Tensor.histogram(bins : Tensor,
weight : Optional[Tensor],
density : bool=False,
hist : Tensor,
bin_edges : Tensor) -> Tuple[Tensor, Tensor]
Tensor.histogram(bins : int=100,
range : Optional[List[float]],
weight : Optional[Tensor],
density : bool=False) -> Tuple[Tensor, Tensor]
Tensor.histogram(bins : int=100,
range : Optional[List[float]],
weight : Optional[Tensor],
density : bool=False,
hist : Tensor,
bin_edges : Tensor) -> Tuple[Tensor, Tensor]
Tensor.hsplit(sections : int) -> List[Tensor]
Tensor.hsplit(indices : List[int]) -> List[Tensor]
Tensor.hypot(other : Tensor) -> Tensor
Tensor.hypot(other : Tensor,
out : Tensor) -> Tensor
Tensor.hypot_(other : Tensor) -> Tensor
Tensor.i0() -> Tensor
Tensor.i0(out : Tensor) -> Tensor
Tensor.i0_() -> Tensor
Tensor.igamma(other : Tensor) -> Tensor
Tensor.igamma(other : Tensor,
out : Tensor) -> Tensor
Tensor.igamma_(other : Tensor) -> Tensor
Tensor.igammac(other : Tensor) -> Tensor
Tensor.igammac(other : Tensor,
out : Tensor) -> Tensor
Tensor.igammac_(other : Tensor) -> Tensor
Tensor.imag() -> Tensor
Tensor.index_add(dim : int,
index : Tensor,
source : Tensor,
alpha : number=1) -> Tensor
Tensor.index_add(dim : int,
index : Tensor,
source : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.index_add(dim : str,
index : Tensor,
source : Tensor,
alpha : number=1) -> Tensor
Tensor.index_add_(dim : int,
index : Tensor,
source : Tensor,
alpha : number=1) -> Tensor
Tensor.index_copy(dim : int,
index : Tensor,
source : Tensor) -> Tensor
Tensor.index_copy(dim : str,
index : Tensor,
source : Tensor) -> Tensor
Tensor.index_copy(dim : int,
index : Tensor,
source : Tensor,
out : Tensor) -> Tensor
Tensor.index_copy_(dim : int,
index : Tensor,
source : Tensor) -> Tensor
Tensor.index_copy_(dim : str,
index : Tensor,
source : Tensor) -> Tensor
Tensor.index_fill(dim : int,
index : Tensor,
value : Tensor) -> Tensor
Tensor.index_fill(dim : int,
index : Tensor,
value : number) -> Tensor
Tensor.index_fill(dim : str,
index : Tensor,
value : number) -> Tensor
Tensor.index_fill(dim : str,
index : Tensor,
value : Tensor) -> Tensor
Tensor.index_fill(dim : int,
index : Tensor,
value : number,
out : Tensor) -> Tensor
Tensor.index_fill(dim : int,
index : Tensor,
value : Tensor,
out : Tensor) -> Tensor
Tensor.index_fill_(dim : int,
index : Tensor,
value : Tensor) -> Tensor
Tensor.index_fill_(dim : int,
index : Tensor,
value : number) -> Tensor
Tensor.index_fill_(dim : str,
index : Tensor,
value : number) -> Tensor
Tensor.index_fill_(dim : str,
index : Tensor,
value : Tensor) -> Tensor
Tensor.index_put(indices : List[Optional[Tensor]],
values : Tensor,
accumulate : bool=False) -> Tensor
Tensor.index_put(indices : List[Optional[Tensor]],
values : Tensor,
accumulate : bool=False,
out : Tensor) -> Tensor
Tensor.index_put(indices : List[Tensor],
values : Tensor,
accumulate : bool=False) -> Tensor
Tensor.index_put_(indices : List[Optional[Tensor]],
values : Tensor,
accumulate : bool=False) -> Tensor
Tensor.index_put_(indices : List[Tensor],
values : Tensor,
accumulate : bool=False) -> Tensor
Tensor.index_reduce(dim : int,
index : Tensor,
source : Tensor,
reduce : str,
include_self : bool=True) -> Tensor
Tensor.index_reduce(dim : int,
index : Tensor,
source : Tensor,
reduce : str,
include_self : bool=True,
out : Tensor) -> Tensor
Tensor.index_reduce_(dim : int,
index : Tensor,
source : Tensor,
reduce : str,
include_self : bool=True) -> Tensor
Tensor.index_select(dim : int,
index : Tensor) -> Tensor
Tensor.index_select(dim : int,
index : Tensor,
out : Tensor) -> Tensor
Tensor.index_select(dim : str,
index : Tensor) -> Tensor
Tensor.index_select(dim : str,
index : Tensor,
out : Tensor) -> Tensor
Tensor.indices() -> Tensor
Tensor.inner(other : Tensor) -> Tensor
Tensor.inner(other : Tensor,
out : Tensor) -> Tensor
Tensor.int_repr(out : Tensor) -> Tensor
Tensor.int_repr() -> Tensor
Tensor.inverse() -> Tensor
Tensor.inverse(out : Tensor) -> Tensor
Tensor.is_coalesced() -> bool
Tensor.is_complex() -> bool
Tensor.is_conj() -> bool
Tensor.is_contiguous() -> bool
Tensor.is_contiguous(memory_format : int) -> bool
Tensor.is_distributed() -> bool
Tensor.is_floating_point() -> bool
Tensor.is_inference() -> bool
Tensor.is_leaf() -> bool
Tensor.is_neg() -> bool
Tensor.is_nonzero() -> bool
Tensor.is_pinned(device : Optional[Device]) -> bool
Tensor.is_same_size(other : Tensor) -> bool
Tensor.is_set_to(tensor : Tensor) -> bool
Tensor.is_signed() -> bool
Tensor.isclose(other : Tensor,
rtol : float=1e-05,
atol : float=1e-08,
equal_nan : bool=False) -> Tensor
Tensor.isfinite() -> Tensor
Tensor.isinf() -> Tensor
Tensor.isinf(out : Tensor) -> Tensor
Tensor.isnan() -> Tensor
Tensor.isnan(out : Tensor) -> Tensor
Tensor.isneginf() -> Tensor
Tensor.isneginf(out : Tensor) -> Tensor
Tensor.isposinf() -> Tensor
Tensor.isposinf(out : Tensor) -> Tensor
Tensor.isreal() -> Tensor
Tensor.istft(n_fft : int,
hop_length : Optional[int],
win_length : Optional[int],
window : Optional[Tensor],
center : bool=True,
normalized : bool=False,
onesided : Optional[bool],
length : Optional[int],
return_complex : bool=False) -> Tensor
Tensor.item() -> number
Tensor.kron(other : Tensor) -> Tensor
Tensor.kron(other : Tensor,
out : Tensor) -> Tensor
Tensor.kthvalue(k : int,
dim : int=-1,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.kthvalue(k : int,
dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.kthvalue(k : int,
dim : str,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.kthvalue(k : int,
dim : int=-1,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.lcm(other : Tensor) -> Tensor
Tensor.lcm(other : Tensor,
out : Tensor) -> Tensor
Tensor.lcm_(other : Tensor) -> Tensor
Tensor.ldexp(other : Tensor) -> Tensor
Tensor.ldexp(other : Tensor,
out : Tensor) -> Tensor
Tensor.ldexp_(other : Tensor) -> Tensor
Tensor.le(other : Tensor) -> Tensor
Tensor.le(other : number) -> Tensor
Tensor.le(other : number,
out : Tensor) -> Tensor
Tensor.le(other : Tensor,
out : Tensor) -> Tensor
Tensor.le_(other : number) -> Tensor
Tensor.le_(other : Tensor) -> Tensor
Tensor.lerp(end : Tensor,
weight : number) -> Tensor
Tensor.lerp(end : Tensor,
weight : Tensor) -> Tensor
Tensor.lerp(end : Tensor,
weight : number,
out : Tensor) -> Tensor
Tensor.lerp(end : Tensor,
weight : Tensor,
out : Tensor) -> Tensor
Tensor.lerp_(end : Tensor,
weight : number) -> Tensor
Tensor.lerp_(end : Tensor,
weight : Tensor) -> Tensor
Tensor.less(other : Tensor) -> Tensor
Tensor.less(other : number) -> Tensor
Tensor.less(other : number,
out : Tensor) -> Tensor
Tensor.less(other : Tensor,
out : Tensor) -> Tensor
Tensor.less_(other : number) -> Tensor
Tensor.less_(other : Tensor) -> Tensor
Tensor.less_equal(other : Tensor) -> Tensor
Tensor.less_equal(other : number) -> Tensor
Tensor.less_equal(other : number,
out : Tensor) -> Tensor
Tensor.less_equal(other : Tensor,
out : Tensor) -> Tensor
Tensor.less_equal_(other : number) -> Tensor
Tensor.less_equal_(other : Tensor) -> Tensor
Tensor.lgamma() -> Tensor
Tensor.lgamma(out : Tensor) -> Tensor
Tensor.lgamma_() -> Tensor
Tensor.log() -> Tensor
Tensor.log(out : Tensor) -> Tensor
Tensor.log10() -> Tensor
Tensor.log10(out : Tensor) -> Tensor
Tensor.log10_() -> Tensor
Tensor.log1p() -> Tensor
Tensor.log1p(out : Tensor) -> Tensor
Tensor.log1p_() -> Tensor
Tensor.log2() -> Tensor
Tensor.log2(out : Tensor) -> Tensor
Tensor.log2_() -> Tensor
Tensor.log_() -> Tensor
Tensor.log_normal_(mean : float=1.0,
std : float=2.0,
generator : Optional[Generator]) -> Tensor
Tensor.log_softmax(dim : int,
dtype : Optional[int]) -> Tensor
Tensor.log_softmax(dim : str,
dtype : Optional[int]) -> Tensor
Tensor.log_softmax(dim : int,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.logaddexp(other : Tensor) -> Tensor
Tensor.logaddexp(other : Tensor,
out : Tensor) -> Tensor
Tensor.logaddexp2(other : Tensor) -> Tensor
Tensor.logaddexp2(other : Tensor,
out : Tensor) -> Tensor
Tensor.logcumsumexp(dim : int) -> Tensor
Tensor.logcumsumexp(dim : str) -> Tensor
Tensor.logcumsumexp(dim : str,
out : Tensor) -> Tensor
Tensor.logcumsumexp(dim : int,
out : Tensor) -> Tensor
Tensor.logdet() -> Tensor
Tensor.logical_and(other : Tensor) -> Tensor
Tensor.logical_and(other : Tensor,
out : Tensor) -> Tensor
Tensor.logical_and_(other : Tensor) -> Tensor
Tensor.logical_not() -> Tensor
Tensor.logical_not(out : Tensor) -> Tensor
Tensor.logical_not_() -> Tensor
Tensor.logical_or(other : Tensor) -> Tensor
Tensor.logical_or(other : Tensor,
out : Tensor) -> Tensor
Tensor.logical_or_(other : Tensor) -> Tensor
Tensor.logical_xor(other : Tensor) -> Tensor
Tensor.logical_xor(other : Tensor,
out : Tensor) -> Tensor
Tensor.logical_xor_(other : Tensor) -> Tensor
Tensor.logit(eps : Optional[float]) -> Tensor
Tensor.logit(eps : Optional[float],
out : Tensor) -> Tensor
Tensor.logit_(eps : Optional[float]) -> Tensor
Tensor.logsumexp(dim : List[int],
keepdim : bool=False) -> Tensor
Tensor.logsumexp(dim : List[str],
keepdim : bool=False) -> Tensor
Tensor.logsumexp(dim : List[str],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.logsumexp(dim : List[int],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.lt(other : Tensor) -> Tensor
Tensor.lt(other : number) -> Tensor
Tensor.lt(other : number,
out : Tensor) -> Tensor
Tensor.lt(other : Tensor,
out : Tensor) -> Tensor
Tensor.lt_(other : number) -> Tensor
Tensor.lt_(other : Tensor) -> Tensor
Tensor.lu_solve(LU_data : Tensor,
LU_pivots : Tensor) -> Tensor
Tensor.lu_solve(LU_data : Tensor,
LU_pivots : Tensor,
out : Tensor) -> Tensor
Tensor.mH() -> Tensor
Tensor.mH() -> Tensor
Tensor.mT() -> Tensor
Tensor.mT() -> Tensor
Tensor.masked_fill(mask : Tensor,
value : number) -> Tensor
Tensor.masked_fill(mask : Tensor,
value : Tensor) -> Tensor
Tensor.masked_fill(mask : Tensor,
value : number,
out : Tensor) -> Tensor
Tensor.masked_fill(mask : Tensor,
value : Tensor,
out : Tensor) -> Tensor
Tensor.masked_fill_(mask : Tensor,
value : number) -> Tensor
Tensor.masked_fill_(mask : Tensor,
value : Tensor) -> Tensor
Tensor.masked_scatter(mask : Tensor,
source : Tensor) -> Tensor
Tensor.masked_scatter(mask : Tensor,
source : Tensor,
out : Tensor) -> Tensor
Tensor.masked_scatter_(mask : Tensor,
source : Tensor) -> Tensor
Tensor.masked_select(mask : Tensor) -> Tensor
Tensor.masked_select(mask : Tensor,
out : Tensor) -> Tensor
Tensor.matmul(other : Tensor) -> Tensor
Tensor.matmul(other : Tensor,
out : Tensor) -> Tensor
Tensor.matrix_exp() -> Tensor
Tensor.matrix_power(n : int) -> Tensor
Tensor.matrix_power(n : int,
out : Tensor) -> Tensor
Tensor.max(other : Tensor) -> Tensor
Tensor.max() -> Tensor
Tensor.max(dim : int,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.max(dim : int,
keepdim : bool=False,
max : Tensor,
max_values : Tensor) -> Tuple[Tensor, Tensor]
Tensor.max(dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.max(dim : str,
keepdim : bool=False,
max : Tensor,
max_values : Tensor) -> Tuple[Tensor, Tensor]
Tensor.max(out : Tensor) -> Tensor
Tensor.max(other : Tensor,
out : Tensor) -> Tensor
Tensor.maximum(other : Tensor) -> Tensor
Tensor.maximum(other : Tensor,
out : Tensor) -> Tensor
Tensor.mean(dtype : Optional[int]) -> Tensor
Tensor.mean(dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
Tensor.mean(dim : List[str],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
Tensor.mean(dim : List[str],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.mean(dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.mean(dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.median() -> Tensor
Tensor.median(dim : int,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.median(dim : int,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.median(dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.median(dim : str,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.median(out : Tensor) -> Tensor
Tensor.min(other : Tensor) -> Tensor
Tensor.min() -> Tensor
Tensor.min(dim : int,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.min(dim : int,
keepdim : bool=False,
min : Tensor,
min_indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.min(dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.min(dim : str,
keepdim : bool=False,
min : Tensor,
min_indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.min(out : Tensor) -> Tensor
Tensor.min(other : Tensor,
out : Tensor) -> Tensor
Tensor.minimum(other : Tensor) -> Tensor
Tensor.minimum(other : Tensor,
out : Tensor) -> Tensor
Tensor.mm(mat2 : Tensor) -> Tensor
Tensor.mm(mat2 : Tensor,
out : Tensor) -> Tensor
Tensor.mode(dim : int=-1,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.mode(dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.mode(dim : str,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.mode(dim : int=-1,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.moveaxis(source : List[int],
destination : List[int]) -> Tensor
Tensor.moveaxis(source : int,
destination : int) -> Tensor
Tensor.movedim(source : int,
destination : int) -> Tensor
Tensor.movedim(source : List[int],
destination : List[int]) -> Tensor
Tensor.msort() -> Tensor
Tensor.msort(out : Tensor) -> Tensor
Tensor.mul(other : Tensor) -> Tensor
Tensor.mul(other : number) -> Tensor
Tensor.mul(other : Tensor,
out : Tensor) -> Tensor
Tensor.mul(other : number,
out : Tensor) -> Tensor
Tensor.mul_(other : Tensor) -> Tensor
Tensor.mul_(other : number) -> Tensor
Tensor.multinomial(num_samples : int,
replacement : bool=False,
generator : Optional[Generator]) -> Tensor
Tensor.multinomial(num_samples : int,
replacement : bool=False,
generator : Optional[Generator],
out : Tensor) -> Tensor
Tensor.multiply(other : Tensor) -> Tensor
Tensor.multiply(other : number) -> Tensor
Tensor.multiply(other : Tensor,
out : Tensor) -> Tensor
Tensor.multiply_(other : Tensor) -> Tensor
Tensor.multiply_(other : number) -> Tensor
Tensor.mv(vec : Tensor) -> Tensor
Tensor.mv(vec : Tensor,
out : Tensor) -> Tensor
Tensor.mvlgamma(p : int) -> Tensor
Tensor.mvlgamma(p : int,
out : Tensor) -> Tensor
Tensor.mvlgamma_(p : int) -> Tensor
Tensor.nan_to_num(nan : Optional[float],
posinf : Optional[float],
neginf : Optional[float]) -> Tensor
Tensor.nan_to_num(nan : Optional[float],
posinf : Optional[float],
neginf : Optional[float],
out : Tensor) -> Tensor
Tensor.nan_to_num_(nan : Optional[float],
posinf : Optional[float],
neginf : Optional[float]) -> Tensor
Tensor.nanmean(dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
Tensor.nanmean(dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.nanmedian() -> Tensor
Tensor.nanmedian(dim : int,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.nanmedian(dim : int,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.nanmedian(dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
Tensor.nanmedian(dim : str,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.nanmedian(out : Tensor) -> Tensor
Tensor.nanquantile(q : Tensor,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear) -> Tensor
Tensor.nanquantile(q : float,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear) -> Tensor
Tensor.nanquantile(q : Tensor,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear,
out : Tensor) -> Tensor
Tensor.nanquantile(q : float,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear,
out : Tensor) -> Tensor
Tensor.nansum(dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
Tensor.nansum(dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.narrow(dim : int,
start : int,
length : int) -> Tensor
Tensor.narrow(dim : int,
start : Tensor,
length : int) -> Tensor
Tensor.narrow_copy(dim : int,
start : int,
length : int) -> Tensor
Tensor.narrow_copy(dim : int,
start : int,
length : int,
out : Tensor) -> Tensor
Tensor.ne(other : Tensor) -> Tensor
Tensor.ne(other : number) -> Tensor
Tensor.ne(other : number,
out : Tensor) -> Tensor
Tensor.ne(other : Tensor,
out : Tensor) -> Tensor
Tensor.ne_(other : number) -> Tensor
Tensor.ne_(other : Tensor) -> Tensor
Tensor.neg() -> Tensor
Tensor.neg(out : Tensor) -> Tensor
Tensor.neg_() -> Tensor
Tensor.negative() -> Tensor
Tensor.negative(out : Tensor) -> Tensor
Tensor.negative_() -> Tensor
Tensor.new_empty(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
Tensor.new_empty(size : List[int],
out : Tensor) -> Tensor
Tensor.new_empty_strided(size : List[int],
stride : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
Tensor.new_empty_strided(size : List[int],
stride : List[int],
out : Tensor) -> Tensor
Tensor.new_full(size : List[int],
fill_value : number,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
Tensor.new_full(size : List[int],
fill_value : number,
out : Tensor) -> Tensor
Tensor.new_ones(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
Tensor.new_ones(size : List[int],
out : Tensor) -> Tensor
Tensor.new_zeros(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
Tensor.new_zeros(size : List[int],
out : Tensor) -> Tensor
Tensor.nextafter(other : Tensor) -> Tensor
Tensor.nextafter(other : Tensor,
out : Tensor) -> Tensor
Tensor.nextafter_(other : Tensor) -> Tensor
Tensor.nonzero() -> Tensor
Tensor.nonzero(out : Tensor) -> Tensor
Tensor.nonzero_static(size : int,
fill_value : int=-1) -> Tensor
Tensor.nonzero_static(size : int,
fill_value : int=-1,
out : Tensor) -> Tensor
Tensor.norm(p : number=2) -> Tensor
Tensor.norm(p : Optional[number],
dim : List[int],
keepdim : bool=False) -> Tensor
Tensor.norm(p : Optional[number],
dim : List[str],
keepdim : bool=False) -> Tensor
Tensor.norm(p : Optional[number],
dim : List[int],
keepdim : bool,
dtype : int) -> Tensor
Tensor.norm(p : Optional[number],
dim : List[int],
keepdim : bool,
dtype : int,
out : Tensor) -> Tensor
Tensor.norm(p : Optional[number],
dim : List[int],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.norm(p : Optional[number],
dtype : int) -> Tensor
Tensor.norm(p : Optional[number],
dtype : int,
out : Tensor) -> Tensor
Tensor.norm(p : number=2,
out : Tensor) -> Tensor
Tensor.norm(p : Optional[number],
dim : List[str],
keepdim : bool,
dtype : int) -> Tensor
Tensor.norm(p : Optional[number],
dim : List[str],
keepdim : bool,
dtype : int,
out : Tensor) -> Tensor
Tensor.norm(p : Optional[number],
dim : List[str],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.normal_(mean : float=0.0,
std : float=1.0,
generator : Optional[Generator]) -> Tensor
Tensor.not_equal(other : Tensor) -> Tensor
Tensor.not_equal(other : number) -> Tensor
Tensor.not_equal(other : number,
out : Tensor) -> Tensor
Tensor.not_equal(other : Tensor,
out : Tensor) -> Tensor
Tensor.not_equal_(other : number) -> Tensor
Tensor.not_equal_(other : Tensor) -> Tensor
Tensor.numel() -> int
Tensor.orgqr(input2 : Tensor) -> Tensor
Tensor.orgqr(input2 : Tensor,
out : Tensor) -> Tensor
Tensor.ormqr(input2 : Tensor,
input3 : Tensor,
left : bool=True,
transpose : bool=False) -> Tensor
Tensor.ormqr(input2 : Tensor,
input3 : Tensor,
left : bool=True,
transpose : bool=False,
out : Tensor) -> Tensor
Tensor.outer(vec2 : Tensor) -> Tensor
Tensor.outer(vec2 : Tensor,
out : Tensor) -> Tensor
Tensor.output_nr() -> int
Tensor.permute(dims : List[int]) -> Tensor
Tensor.pin_memory(device : Optional[Device]) -> Tensor
Tensor.pinverse(rcond : float=1e-15) -> Tensor
Tensor.polygamma_(n : int) -> Tensor
Tensor.positive() -> Tensor
Tensor.pow(exponent : Tensor) -> Tensor
Tensor.pow(exponent : number) -> Tensor
Tensor.pow(exponent : number,
out : Tensor) -> Tensor
Tensor.pow(exponent : Tensor,
out : Tensor) -> Tensor
Tensor.pow_(exponent : number) -> Tensor
Tensor.pow_(exponent : Tensor) -> Tensor
Tensor.prelu(weight : Tensor) -> Tensor
Tensor.prod(dtype : Optional[int]) -> Tensor
Tensor.prod(dim : int,
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
Tensor.prod(dim : str,
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
Tensor.prod(dim : str,
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.prod(dim : int,
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.prod(dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.put(index : Tensor,
source : Tensor,
accumulate : bool=False) -> Tensor
Tensor.put(index : Tensor,
source : Tensor,
accumulate : bool=False,
out : Tensor) -> Tensor
Tensor.put_(index : Tensor,
source : Tensor,
accumulate : bool=False) -> Tensor
Tensor.q_per_channel_axis() -> int
Tensor.q_per_channel_scales(out : Tensor) -> Tensor
Tensor.q_per_channel_scales() -> Tensor
Tensor.q_per_channel_zero_points(out : Tensor) -> Tensor
Tensor.q_per_channel_zero_points() -> Tensor
Tensor.q_scale() -> float
Tensor.q_zero_point() -> int
Tensor.qr(some : bool=True) -> Tuple[Tensor, Tensor]
Tensor.qr(some : bool=True,
Q : Tensor,
R : Tensor) -> Tuple[Tensor, Tensor]
Tensor.qscheme() -> QScheme
Tensor.quantile(q : Tensor,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear) -> Tensor
Tensor.quantile(q : float,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear) -> Tensor
Tensor.quantile(q : Tensor,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear,
out : Tensor) -> Tensor
Tensor.quantile(q : float,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear,
out : Tensor) -> Tensor
Tensor.rad2deg() -> Tensor
Tensor.rad2deg(out : Tensor) -> Tensor
Tensor.rad2deg_() -> Tensor
Tensor.random_(from : int,
to : Optional[int],
generator : Optional[Generator]) -> Tensor
Tensor.random_(to : int,
generator : Optional[Generator]) -> Tensor
Tensor.random_(generator : Optional[Generator]) -> Tensor
Tensor.ravel() -> Tensor
Tensor.real() -> Tensor
Tensor.reciprocal() -> Tensor
Tensor.reciprocal(out : Tensor) -> Tensor
Tensor.reciprocal_() -> Tensor
Tensor.record_stream(s : Stream) -> Tuple[]
Tensor.refine_names(names : List[str]) -> Tensor
Tensor.relu() -> Tensor
Tensor.relu(out : Tensor) -> Tensor
Tensor.relu_() -> Tensor
Tensor.remainder(other : Tensor) -> Tensor
Tensor.remainder(other : number) -> Tensor
Tensor.remainder(other : Tensor,
out : Tensor) -> Tensor
Tensor.remainder(other : number,
out : Tensor) -> Tensor
Tensor.remainder_(other : Tensor) -> Tensor
Tensor.remainder_(other : number) -> Tensor
Tensor.rename(names : Optional[List[str]]) -> Tensor
Tensor.rename_(names : Optional[List[str]]) -> Tensor
Tensor.renorm(p : number,
dim : int,
maxnorm : number) -> Tensor
Tensor.renorm(p : number,
dim : int,
maxnorm : number,
out : Tensor) -> Tensor
Tensor.renorm_(p : number,
dim : int,
maxnorm : number) -> Tensor
Tensor.repeat(repeats : List[int]) -> Tensor
Tensor.repeat(repeats : List[int],
out : Tensor) -> Tensor
Tensor.repeat_interleave(repeats : Tensor,
dim : Optional[int],
output_size : Optional[int]) -> Tensor
Tensor.repeat_interleave(repeats : int,
dim : Optional[int],
output_size : Optional[int]) -> Tensor
Tensor.requires_grad_(requires_grad : bool=True) -> Tensor
Tensor.reshape(shape : List[int]) -> Tensor
Tensor.reshape_as(other : Tensor) -> Tensor
Tensor.resize(size : List[int],
memory_format : Optional[int]) -> Tensor
Tensor.resize(size : List[int],
memory_format : Optional[int],
out : Tensor) -> Tensor
Tensor.resize_(size : List[int],
memory_format : Optional[int]) -> Tensor
Tensor.resize_as(the_template : Tensor,
memory_format : Optional[int]) -> Tensor
Tensor.resize_as(the_template : Tensor,
memory_format : Optional[int],
out : Tensor) -> Tensor
Tensor.resize_as_(the_template : Tensor,
memory_format : Optional[int]) -> Tensor
Tensor.resize_as_sparse_(the_template : Tensor) -> Tensor
Tensor.resolve_conj() -> Tensor
Tensor.resolve_neg() -> Tensor
Tensor.retain_grad() -> Tuple[]
Tensor.retains_grad() -> bool
Tensor.roll(shifts : List[int],
dims : List[int]=[]) -> Tensor
Tensor.roll(shifts : List[int],
dims : List[int]=[],
out : Tensor) -> Tensor
Tensor.rot90(k : int=1,
dims : List[int]=[0, 1]) -> Tensor
Tensor.rot90(k : int=1,
dims : List[int]=[0, 1],
out : Tensor) -> Tensor
Tensor.round() -> Tensor
Tensor.round(decimals : int) -> Tensor
Tensor.round(out : Tensor) -> Tensor
Tensor.round(decimals : int,
out : Tensor) -> Tensor
Tensor.round_() -> Tensor
Tensor.round_(decimals : int) -> Tensor
Tensor.row_indices() -> Tensor
Tensor.rsqrt() -> Tensor
Tensor.rsqrt(out : Tensor) -> Tensor
Tensor.rsqrt_() -> Tensor
Tensor.scatter(dim : int,
index : Tensor,
value : number) -> Tensor
Tensor.scatter(dim : int,
index : Tensor,
src : Tensor) -> Tensor
Tensor.scatter(dim : int,
index : Tensor,
src : Tensor,
reduce : str) -> Tensor
Tensor.scatter(dim : int,
index : Tensor,
value : number,
reduce : str) -> Tensor
Tensor.scatter(dim : int,
index : Tensor,
src : Tensor,
out : Tensor) -> Tensor
Tensor.scatter(dim : int,
index : Tensor,
value : number,
out : Tensor) -> Tensor
Tensor.scatter(dim : int,
index : Tensor,
src : Tensor,
reduce : str,
out : Tensor) -> Tensor
Tensor.scatter(dim : int,
index : Tensor,
value : number,
reduce : str,
out : Tensor) -> Tensor
Tensor.scatter(dim : str,
index : Tensor,
src : Tensor) -> Tensor
Tensor.scatter(dim : str,
index : Tensor,
value : number) -> Tensor
Tensor.scatter_(dim : int,
index : Tensor,
src : Tensor) -> Tensor
Tensor.scatter_(dim : int,
index : Tensor,
value : number) -> Tensor
Tensor.scatter_(dim : int,
index : Tensor,
src : Tensor,
reduce : str) -> Tensor
Tensor.scatter_(dim : int,
index : Tensor,
value : number,
reduce : str) -> Tensor
Tensor.scatter_add(dim : int,
index : Tensor,
src : Tensor) -> Tensor
Tensor.scatter_add(dim : int,
index : Tensor,
src : Tensor,
out : Tensor) -> Tensor
Tensor.scatter_add(dim : str,
index : Tensor,
src : Tensor) -> Tensor
Tensor.scatter_add_(dim : int,
index : Tensor,
src : Tensor) -> Tensor
Tensor.scatter_reduce(dim : int,
index : Tensor,
src : Tensor,
reduce : str,
include_self : bool=True) -> Tensor
Tensor.scatter_reduce(dim : int,
index : Tensor,
src : Tensor,
reduce : str,
include_self : bool=True,
out : Tensor) -> Tensor
Tensor.scatter_reduce_(dim : int,
index : Tensor,
src : Tensor,
reduce : str,
include_self : bool=True) -> Tensor
Tensor.select(dim : str,
index : int) -> Tensor
Tensor.select(dim : int,
index : int) -> Tensor
Tensor.select_scatter(src : Tensor,
dim : int,
index : int) -> Tensor
Tensor.select_scatter(src : Tensor,
dim : int,
index : int,
out : Tensor) -> Tensor
Tensor.set_(source : Storage,
storage_offset : int,
size : List[int],
stride : List[int]=[]) -> Tensor
Tensor.set_(source : Tensor) -> Tensor
Tensor.set_() -> Tensor
Tensor.set_(source : Storage) -> Tensor
Tensor.set_(source : Tensor,
storage_offset : int,
size : List[int],
stride : List[int]=[]) -> Tensor
Tensor.sgn() -> Tensor
Tensor.sgn(out : Tensor) -> Tensor
Tensor.sgn_() -> Tensor
Tensor.sigmoid() -> Tensor
Tensor.sigmoid(out : Tensor) -> Tensor
Tensor.sigmoid_() -> Tensor
Tensor.sign() -> Tensor
Tensor.sign(out : Tensor) -> Tensor
Tensor.sign_() -> Tensor
Tensor.signbit() -> Tensor
Tensor.signbit(out : Tensor) -> Tensor
Tensor.sin() -> Tensor
Tensor.sin(out : Tensor) -> Tensor
Tensor.sin_() -> Tensor
Tensor.sinc() -> Tensor
Tensor.sinc(out : Tensor) -> Tensor
Tensor.sinc_() -> Tensor
Tensor.sinh() -> Tensor
Tensor.sinh(out : Tensor) -> Tensor
Tensor.sinh_() -> Tensor
Tensor.size(dim : int) -> int
Tensor.size(dim : str) -> int
Tensor.size() -> List[int]
Tensor.slice_inverse(src : Tensor,
dim : int=0,
start : Optional[int],
end : Optional[int],
step : int=1) -> Tensor
Tensor.slice_scatter(src : Tensor,
dim : int=0,
start : Optional[int],
end : Optional[int],
step : int=1) -> Tensor
Tensor.slice_scatter(src : Tensor,
dim : int=0,
start : Optional[int],
end : Optional[int],
step : int=1,
out : Tensor) -> Tensor
Tensor.slogdet() -> Tuple[Tensor, Tensor]
Tensor.slogdet(sign : Tensor,
logabsdet : Tensor) -> Tuple[Tensor, Tensor]
Tensor.smm(mat2 : Tensor) -> Tensor
Tensor.softmax(dim : int,
dtype : Optional[int]) -> Tensor
Tensor.softmax(dim : str,
dtype : Optional[int]) -> Tensor
Tensor.softmax(dim : int,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.sort(dim : int=-1,
descending : bool=False) -> Tuple[Tensor, Tensor]
Tensor.sort(stable : Optional[bool],
dim : int=-1,
descending : bool=False) -> Tuple[Tensor, Tensor]
Tensor.sort(stable : Optional[bool],
dim : int=-1,
descending : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.sort(dim : int=-1,
descending : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.sort(dim : str,
descending : bool=False) -> Tuple[Tensor, Tensor]
Tensor.sort(dim : str,
descending : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.sort(stable : Optional[bool],
dim : str,
descending : bool=False) -> Tuple[Tensor, Tensor]
Tensor.sort(stable : Optional[bool],
dim : str,
descending : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.sparse_dim() -> int
Tensor.sparse_mask(mask : Tensor,
out : Tensor) -> Tensor
Tensor.sparse_mask(mask : Tensor) -> Tensor
Tensor.sparse_resize_(size : List[int],
sparse_dim : int,
dense_dim : int) -> Tensor
Tensor.sparse_resize_and_clear_(size : List[int],
sparse_dim : int,
dense_dim : int) -> Tensor
Tensor.split(split_size : int,
dim : int=0) -> List[Tensor]
Tensor.split(split_size : List[int],
dim : int=0) -> List[Tensor]
Tensor.split(split_sizes : List[int],
dim : int=0) -> List[Tensor]
Tensor.split_with_sizes(split_sizes : List[int],
dim : int=0) -> List[Tensor]
Tensor.sqrt() -> Tensor
Tensor.sqrt(out : Tensor) -> Tensor
Tensor.sqrt_() -> Tensor
Tensor.square() -> Tensor
Tensor.square(out : Tensor) -> Tensor
Tensor.square_() -> Tensor
Tensor.squeeze() -> Tensor
Tensor.squeeze(dim : int) -> Tensor
Tensor.squeeze(dim : List[int]) -> Tensor
Tensor.squeeze(dim : str) -> Tensor
Tensor.squeeze_() -> Tensor
Tensor.squeeze_(dim : int) -> Tensor
Tensor.squeeze_(dim : List[int]) -> Tensor
Tensor.squeeze_(dim : str) -> Tensor
Tensor.sspaddmm(mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.sspaddmm(mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
Tensor.std(unbiased : bool=True) -> Tensor
Tensor.std(dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False) -> Tensor
Tensor.std(dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False) -> Tensor
Tensor.std(dim : List[str],
unbiased : bool=True,
keepdim : bool=False) -> Tensor
Tensor.std(dim : List[str],
unbiased : bool=True,
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.std(dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.std(dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.std(dim : List[str],
correction : Optional[number],
keepdim : bool=False) -> Tensor
Tensor.std(dim : List[str],
correction : Optional[number],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.stft(n_fft : int,
hop_length : Optional[int],
win_length : Optional[int],
window : Optional[Tensor],
normalized : bool=False,
onesided : Optional[bool],
return_complex : Optional[bool]) -> Tensor
Tensor.stft(n_fft : int,
hop_length : Optional[int],
win_length : Optional[int],
window : Optional[Tensor],
center : bool=True,
pad_mode : str=reflect,
normalized : bool=False,
onesided : Optional[bool],
return_complex : Optional[bool]) -> Tensor
Tensor.storage_offset() -> int
Tensor.stride(dim : int) -> int
Tensor.stride(dim : str) -> int
Tensor.stride() -> List[int]
Tensor.sub(other : Tensor,
alpha : number=1) -> Tensor
Tensor.sub(other : number,
alpha : number=1) -> Tensor
Tensor.sub(other : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.sub(other : number,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.sub_(other : Tensor,
alpha : number=1) -> Tensor
Tensor.sub_(other : number,
alpha : number=1) -> Tensor
Tensor.subtract(other : Tensor,
alpha : number=1) -> Tensor
Tensor.subtract(other : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
Tensor.subtract(other : number,
alpha : number=1) -> Tensor
Tensor.subtract_(other : Tensor,
alpha : number=1) -> Tensor
Tensor.subtract_(other : number,
alpha : number=1) -> Tensor
Tensor.sum(dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
Tensor.sum(dtype : Optional[int]) -> Tensor
Tensor.sum(dim : List[str],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
Tensor.sum(dim : List[str],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.sum(dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.sum(dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.sum_to_size(size : List[int]) -> Tensor
Tensor.svd(some : bool=True,
compute_uv : bool=True) -> Tuple[Tensor, Tensor, Tensor]
Tensor.svd(some : bool=True,
compute_uv : bool=True,
U : Tensor,
S : Tensor,
V : Tensor) -> Tuple[Tensor, Tensor, Tensor]
Tensor.swapaxes(axis0 : int,
axis1 : int) -> Tensor
Tensor.swapaxes_(axis0 : int,
axis1 : int) -> Tensor
Tensor.swapdims(dim0 : int,
dim1 : int) -> Tensor
Tensor.swapdims_(dim0 : int,
dim1 : int) -> Tensor
Tensor.t() -> Tensor
Tensor.t_() -> Tensor
Tensor.take(index : Tensor) -> Tensor
Tensor.take(index : Tensor,
out : Tensor) -> Tensor
Tensor.take_along_dim(indices : Tensor,
dim : Optional[int]) -> Tensor
Tensor.take_along_dim(indices : Tensor,
dim : Optional[int],
out : Tensor) -> Tensor
Tensor.tan() -> Tensor
Tensor.tan(out : Tensor) -> Tensor
Tensor.tan_() -> Tensor
Tensor.tanh() -> Tensor
Tensor.tanh(out : Tensor) -> Tensor
Tensor.tanh_() -> Tensor
Tensor.tensor_split(sections : int,
dim : int=0) -> List[Tensor]
Tensor.tensor_split(indices : List[int],
dim : int=0) -> List[Tensor]
Tensor.tensor_split(tensor_indices_or_sections : Tensor,
dim : int=0) -> List[Tensor]
Tensor.tile(dims : List[int]) -> Tensor
Tensor.to(device : Device,
dtype : int,
non_blocking : bool=False,
copy : bool=False,
memory_format : Optional[int]) -> Tensor
Tensor.to(dtype : int,
non_blocking : bool=False,
copy : bool=False,
memory_format : Optional[int]) -> Tensor
Tensor.to(other : Tensor,
non_blocking : bool=False,
copy : bool=False,
memory_format : Optional[int]) -> Tensor
Tensor.to(dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
non_blocking : bool=False,
copy : bool=False,
memory_format : Optional[int]) -> Tensor
Tensor.to(device : Optional[Device],
dtype : Optional[int],
non_blocking : bool=False,
copy : bool=False) -> Tensor
Tensor.to(dtype : Optional[int],
non_blocking : bool=False,
copy : bool=False) -> Tensor
Tensor.to(non_blocking : bool=False,
copy : bool=False) -> Tensor
Tensor.to_dense(dtype : Optional[int],
masked_grad : Optional[bool]) -> Tensor
Tensor.to_mkldnn(dtype : Optional[int]) -> Tensor
Tensor.to_mkldnn(dtype : Optional[int],
out : Tensor) -> Tensor
Tensor.to_padded_tensor(padding : float,
output_size : Optional[List[int]],
out : Tensor) -> Tensor
Tensor.to_padded_tensor(padding : float,
output_size : Optional[List[int]]) -> Tensor
Tensor.to_sparse(sparse_dim : int) -> Tensor
Tensor.to_sparse(layout : Optional[int],
blocksize : Optional[List[int]],
dense_dim : Optional[int]) -> Tensor
Tensor.to_sparse_bsc(blocksize : List[int],
dense_dim : Optional[int]) -> Tensor
Tensor.to_sparse_bsr(blocksize : List[int],
dense_dim : Optional[int]) -> Tensor
Tensor.to_sparse_csc(dense_dim : Optional[int]) -> Tensor
Tensor.to_sparse_csr(dense_dim : Optional[int]) -> Tensor
Tensor.topk(k : int,
dim : int=-1,
largest : bool=True,
sorted : bool=True) -> Tuple[Tensor, Tensor]
Tensor.topk(k : int,
dim : int=-1,
largest : bool=True,
sorted : bool=True,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
Tensor.trace() -> Tensor
Tensor.trace(out : Tensor) -> Tensor
Tensor.transpose(dim0 : int,
dim1 : int) -> Tensor
Tensor.transpose(dim0 : str,
dim1 : str) -> Tensor
Tensor.transpose_(dim0 : int,
dim1 : int) -> Tensor
Tensor.triangular_solve(A : Tensor,
upper : bool=True,
transpose : bool=False,
unitriangular : bool=False) -> Tuple[Tensor, Tensor]
Tensor.triangular_solve(A : Tensor,
upper : bool=True,
transpose : bool=False,
unitriangular : bool=False,
X : Tensor,
M : Tensor) -> Tuple[Tensor, Tensor]
Tensor.tril(diagonal : int=0) -> Tensor
Tensor.tril(diagonal : int=0,
out : Tensor) -> Tensor
Tensor.tril_(diagonal : int=0) -> Tensor
Tensor.triu(diagonal : int=0) -> Tensor
Tensor.triu(diagonal : int=0,
out : Tensor) -> Tensor
Tensor.triu_(diagonal : int=0) -> Tensor
Tensor.true_divide(other : Tensor) -> Tensor
Tensor.true_divide(other : number) -> Tensor
Tensor.true_divide(other : Tensor,
out : Tensor) -> Tensor
Tensor.true_divide_(other : Tensor) -> Tensor
Tensor.true_divide_(other : number) -> Tensor
Tensor.trunc() -> Tensor
Tensor.trunc(out : Tensor) -> Tensor
Tensor.trunc_() -> Tensor
Tensor.type_as(other : Tensor) -> Tensor
Tensor.unbind(dim : int=0) -> List[Tensor]
Tensor.unbind(dim : str) -> List[Tensor]
Tensor.unflatten(dim : int,
sizes : List[int]) -> Tensor
Tensor.unflatten(dim : str,
sizes : List[int],
names : List[str]) -> Tensor
Tensor.unfold(dimension : int,
size : int,
step : int) -> Tensor
Tensor.uniform_(from : float=0.0,
to : float=1.0,
generator : Optional[Generator]) -> Tensor
Tensor.unique_consecutive(return_inverse : bool=False,
return_counts : bool=False,
dim : Optional[int]) -> Tuple[Tensor, Tensor, Tensor]
Tensor.unique_consecutive(return_inverse : bool=False,
return_counts : bool=False,
dim : Optional[int],
out0 : Tensor,
out1 : Tensor,
out2 : Tensor) -> Tuple[Tensor, Tensor, Tensor]
Tensor.unsafe_chunk(chunks : int,
dim : int=0) -> List[Tensor]
Tensor.unsafe_split(split_size : int,
dim : int=0) -> List[Tensor]
Tensor.unsafe_split(split_size : int,
dim : int=0,
out : List[Tensor]) -> Tuple[]
Tensor.unsafe_split_with_sizes(split_sizes : List[int],
dim : int=0) -> List[Tensor]
Tensor.unsafe_split_with_sizes(split_sizes : List[int],
dim : int=0,
out : List[Tensor]) -> Tuple[]
Tensor.unsqueeze(dim : int) -> Tensor
Tensor.unsqueeze_(dim : int) -> Tensor
Tensor.values() -> Tensor
Tensor.var(unbiased : bool=True) -> Tensor
Tensor.var(dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False) -> Tensor
Tensor.var(dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False) -> Tensor
Tensor.var(dim : List[str],
unbiased : bool=True,
keepdim : bool=False) -> Tensor
Tensor.var(dim : List[str],
unbiased : bool=True,
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.var(dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.var(dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.var(dim : List[str],
correction : Optional[number],
keepdim : bool=False) -> Tensor
Tensor.var(dim : List[str],
correction : Optional[number],
keepdim : bool=False,
out : Tensor) -> Tensor
Tensor.vdot(other : Tensor) -> Tensor
Tensor.vdot(other : Tensor,
out : Tensor) -> Tensor
Tensor.view(size : List[int]) -> Tensor
Tensor.view(dtype : int) -> Tensor
Tensor.view_as(other : Tensor) -> Tensor
Tensor.vsplit(sections : int) -> List[Tensor]
Tensor.vsplit(indices : List[int]) -> List[Tensor]
Tensor.xlogy(other : Tensor) -> Tensor
Tensor.xlogy(other : number) -> Tensor
Tensor.xlogy(other : Tensor,
out : Tensor) -> Tensor
Tensor.xlogy(other : number,
out : Tensor) -> Tensor
Tensor.xlogy_(other : Tensor) -> Tensor
Tensor.xlogy_(other : number) -> Tensor
Tensor.zero_() -> Tensor
Supported PyTorch Functions¶
torch.nn.functional.adaptive_avg_pool2d(input : Tensor,
output_size : List[int]) -> Tensor
torch.nn.functional.adaptive_avg_pool3d(input : Tensor,
output_size : List[int]) -> Tensor
torch.nn.functional.adaptive_max_pool1d_with_indices(input : Tensor,
output_size : List[int],
return_indices : bool=False) -> Tuple[Tensor, Tensor]
torch.nn.functional.adaptive_max_pool2d_with_indices(input : Tensor,
output_size : List[int],
return_indices : bool=False) -> Tuple[Tensor, Tensor]
torch.nn.functional.adaptive_max_pool3d_with_indices(input : Tensor,
output_size : List[int],
return_indices : bool=False) -> Tuple[Tensor, Tensor]
torch.nn.functional.affine_grid(theta : Tensor,
size : List[int],
align_corners : Optional[bool]) -> Tensor
torch.nn.functional.alpha_dropout(input : Tensor,
p : float=0.5,
training : bool=False,
inplace : bool=False) -> Tensor
torch.nn.functional.assert_int_or_pair(arg : List[int],
arg_name : str,
message : str) -> NoneType
torch.nn.functional.batch_norm(input : Tensor,
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
weight : Optional[Tensor],
bias : Optional[Tensor],
training : bool=False,
momentum : float=0.1,
eps : float=1e-05) -> Tensor
torch.nn.functional.binary_cross_entropy(input : Tensor,
target : Tensor,
weight : Optional[Tensor],
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.binary_cross_entropy_with_logits(input : Tensor,
target : Tensor,
weight : Optional[Tensor],
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean,
pos_weight : Optional[Tensor]) -> Tensor
torch.nn.functional.celu(input : Tensor,
alpha : float=1.0,
inplace : bool=False) -> Tensor
torch.nn.functional.cosine_embedding_loss(input1 : Tensor,
input2 : Tensor,
target : Tensor,
margin : float=0.0,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.cross_entropy(input : Tensor,
target : Tensor,
weight : Optional[Tensor],
size_average : Optional[bool],
ignore_index : int=-100,
reduce : Optional[bool],
reduction : str=mean,
label_smoothing : float=0.0) -> Tensor
torch.nn.functional.ctc_loss(log_probs : Tensor,
targets : Tensor,
input_lengths : Tensor,
target_lengths : Tensor,
blank : int=0,
reduction : str=mean,
zero_infinity : bool=False) -> Tensor
torch.nn.functional.dropout(input : Tensor,
p : float=0.5,
training : bool=True,
inplace : bool=False) -> Tensor
torch.nn.functional.dropout1d(input : Tensor,
p : float=0.5,
training : bool=True,
inplace : bool=False) -> Tensor
torch.nn.functional.dropout2d(input : Tensor,
p : float=0.5,
training : bool=True,
inplace : bool=False) -> Tensor
torch.nn.functional.dropout3d(input : Tensor,
p : float=0.5,
training : bool=True,
inplace : bool=False) -> Tensor
torch.nn.functional.elu(input : Tensor,
alpha : float=1.0,
inplace : bool=False) -> Tensor
torch.nn.functional.embedding(input : Tensor,
weight : Tensor,
padding_idx : Optional[int],
max_norm : Optional[float],
norm_type : float=2.0,
scale_grad_by_freq : bool=False,
sparse : bool=False) -> Tensor
torch.nn.functional.embedding_bag(input : Tensor,
weight : Tensor,
offsets : Optional[Tensor],
max_norm : Optional[float],
norm_type : float=2.0,
scale_grad_by_freq : bool=False,
mode : str=mean,
sparse : bool=False,
per_sample_weights : Optional[Tensor],
include_last_offset : bool=False,
padding_idx : Optional[int]) -> Tensor
torch.nn.functional.feature_alpha_dropout(input : Tensor,
p : float=0.5,
training : bool=False,
inplace : bool=False) -> Tensor
torch.nn.functional.fold(input : Tensor,
output_size : List[int],
kernel_size : List[int],
dilation : List[int]=1,
padding : List[int]=0,
stride : List[int]=1) -> Tensor
torch.nn.functional.fractional_max_pool2d_with_indices(input : Tensor,
kernel_size : List[int],
output_size : Optional[List[int]],
output_ratio : Optional[List[float]],
return_indices : bool=False,
_random_samples : Optional[Tensor]) -> Tuple[Tensor, Tensor]
torch.nn.functional.fractional_max_pool3d_with_indices(input : Tensor,
kernel_size : List[int],
output_size : Optional[List[int]],
output_ratio : Optional[List[float]],
return_indices : bool=False,
_random_samples : Optional[Tensor]) -> Tuple[Tensor, Tensor]
torch.nn.functional.gaussian_nll_loss(input : Tensor,
target : Tensor,
var : Union[Tensor, float],
full : bool=False,
eps : float=1e-06,
reduction : str=mean) -> Tensor
torch.nn.functional.glu(input : Tensor,
dim : int=-1) -> Tensor
torch.nn.functional.grid_sample(input : Tensor,
grid : Tensor,
mode : str=bilinear,
padding_mode : str=zeros,
align_corners : Optional[bool]) -> Tensor
torch.nn.functional.group_norm(input : Tensor,
num_groups : int,
weight : Optional[Tensor],
bias : Optional[Tensor],
eps : float=1e-05) -> Tensor
torch.nn.functional.gumbel_softmax(logits : Tensor,
tau : float=1.0,
hard : bool=False,
eps : float=1e-10,
dim : int=-1) -> Tensor
torch.nn.functional.hardsigmoid(input : Tensor,
inplace : bool=False) -> Tensor
torch.nn.functional.hardswish(input : Tensor,
inplace : bool=False) -> Tensor
torch.nn.functional.hardtanh(input : Tensor,
min_val : float=-1.0,
max_val : float=1.0,
inplace : bool=False) -> Tensor
torch.nn.functional.hinge_embedding_loss(input : Tensor,
target : Tensor,
margin : float=1.0,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.huber_loss(input : Tensor,
target : Tensor,
reduction : str=mean,
delta : float=1.0,
weight : Optional[Tensor]) -> Tensor
torch.nn.functional.instance_norm(input : Tensor,
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
weight : Optional[Tensor],
bias : Optional[Tensor],
use_input_stats : bool=True,
momentum : float=0.1,
eps : float=1e-05) -> Tensor
torch.nn.functional.kl_div(input : Tensor,
target : Tensor,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean,
log_target : bool=False) -> Tensor
torch.nn.functional.l1_loss(input : Tensor,
target : Tensor,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean,
weight : Optional[Tensor]) -> Tensor
torch.nn.functional.layer_norm(input : Tensor,
normalized_shape : List[int],
weight : Optional[Tensor],
bias : Optional[Tensor],
eps : float=1e-05) -> Tensor
torch.nn.functional.leaky_relu(input : Tensor,
negative_slope : float=0.01,
inplace : bool=False) -> Tensor
torch.nn.functional.local_response_norm(input : Tensor,
size : int,
alpha : float=0.0001,
beta : float=0.75,
k : float=1.0) -> Tensor
torch.nn.functional.log_softmax(input : Tensor,
dim : Optional[int],
_stacklevel : int=3,
dtype : Optional[int]) -> Tensor
torch.nn.functional.lp_pool1d(input : Tensor,
norm_type : Union[float, int],
kernel_size : int,
stride : Optional[List[int]],
ceil_mode : bool=False) -> Tensor
torch.nn.functional.lp_pool2d(input : Tensor,
norm_type : Union[float, int],
kernel_size : List[int],
stride : Optional[List[int]],
ceil_mode : bool=False) -> Tensor
torch.nn.functional.lp_pool3d(input : Tensor,
norm_type : Union[float, int],
kernel_size : List[int],
stride : Optional[List[int]],
ceil_mode : bool=False) -> Tensor
torch.nn.functional.margin_ranking_loss(input1 : Tensor,
input2 : Tensor,
target : Tensor,
margin : float=0.0,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.max_pool1d_with_indices(input : Tensor,
kernel_size : List[int],
stride : Optional[List[int]],
padding : List[int]=0,
dilation : List[int]=1,
ceil_mode : bool=False,
return_indices : bool=False) -> Tuple[Tensor, Tensor]
torch.nn.functional.max_pool2d_with_indices(input : Tensor,
kernel_size : List[int],
stride : Optional[List[int]],
padding : List[int]=0,
dilation : List[int]=1,
ceil_mode : bool=False,
return_indices : bool=False) -> Tuple[Tensor, Tensor]
torch.nn.functional.max_pool3d_with_indices(input : Tensor,
kernel_size : List[int],
stride : Optional[List[int]],
padding : List[int]=0,
dilation : List[int]=1,
ceil_mode : bool=False,
return_indices : bool=False) -> Tuple[Tensor, Tensor]
torch.nn.functional.max_unpool1d(input : Tensor,
indices : Tensor,
kernel_size : List[int],
stride : Optional[List[int]],
padding : List[int]=0,
output_size : Optional[List[int]]) -> Tensor
torch.nn.functional.max_unpool2d(input : Tensor,
indices : Tensor,
kernel_size : List[int],
stride : Optional[List[int]],
padding : List[int]=0,
output_size : Optional[List[int]]) -> Tensor
torch.nn.functional.max_unpool3d(input : Tensor,
indices : Tensor,
kernel_size : List[int],
stride : Optional[List[int]],
padding : List[int]=0,
output_size : Optional[List[int]]) -> Tensor
torch.nn.functional.mish(input : Tensor,
inplace : bool=False) -> Tensor
torch.nn.functional.mse_loss(input : Tensor,
target : Tensor,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean,
weight : Optional[Tensor]) -> Tensor
torch.nn.functional.multi_head_attention_forward(query : Tensor,
key : Tensor,
value : Tensor,
embed_dim_to_check : int,
num_heads : int,
in_proj_weight : Optional[Tensor],
in_proj_bias : Optional[Tensor],
bias_k : Optional[Tensor],
bias_v : Optional[Tensor],
add_zero_attn : bool,
dropout_p : float,
out_proj_weight : Tensor,
out_proj_bias : Optional[Tensor],
training : bool=True,
key_padding_mask : Optional[Tensor],
need_weights : bool=True,
attn_mask : Optional[Tensor],
use_separate_proj_weight : bool=False,
q_proj_weight : Optional[Tensor],
k_proj_weight : Optional[Tensor],
v_proj_weight : Optional[Tensor],
static_k : Optional[Tensor],
static_v : Optional[Tensor],
average_attn_weights : bool=True,
is_causal : bool=False) -> Tuple[Tensor, Optional[Tensor]]
torch.nn.functional.multi_margin_loss(input : Tensor,
target : Tensor,
p : int=1,
margin : float=1.0,
weight : Optional[Tensor],
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.multilabel_margin_loss(input : Tensor,
target : Tensor,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.multilabel_soft_margin_loss(input : Tensor,
target : Tensor,
weight : Optional[Tensor],
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.nll_loss(input : Tensor,
target : Tensor,
weight : Optional[Tensor],
size_average : Optional[bool],
ignore_index : int=-100,
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.normalize(input : Tensor,
p : float=2.0,
dim : int=1,
eps : float=1e-12,
out : Optional[Tensor]) -> Tensor
torch.nn.functional.pad(input : Tensor,
pad : List[int],
mode : str=constant,
value : Optional[float]) -> Tensor
torch.nn.functional.poisson_nll_loss(input : Tensor,
target : Tensor,
log_input : bool=True,
full : bool=False,
size_average : Optional[bool],
eps : float=1e-08,
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.relu(input : Tensor,
inplace : bool=False) -> Tensor
torch.nn.functional.relu6(input : Tensor,
inplace : bool=False) -> Tensor
torch.nn.functional.rms_norm(input : Tensor,
normalized_shape : List[int],
weight : Optional[Tensor],
eps : Optional[float]) -> Tensor
torch.nn.functional.rrelu(input : Tensor,
lower : float=0.125,
upper : float=0.3333333333333333,
training : bool=False,
inplace : bool=False) -> Tensor
torch.nn.functional.selu(input : Tensor,
inplace : bool=False) -> Tensor
torch.nn.functional.sigmoid(input : Tensor) -> Tensor
torch.nn.functional.silu(input : Tensor,
inplace : bool=False) -> Tensor
torch.nn.functional.smooth_l1_loss(input : Tensor,
target : Tensor,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean,
beta : float=1.0) -> Tensor
torch.nn.functional.soft_margin_loss(input : Tensor,
target : Tensor,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.softmax(input : Tensor,
dim : Optional[int],
_stacklevel : int=3,
dtype : Optional[int]) -> Tensor
torch.nn.functional.softmin(input : Tensor,
dim : Optional[int],
_stacklevel : int=3,
dtype : Optional[int]) -> Tensor
torch.nn.functional.softsign(input : Tensor) -> Tensor
torch.nn.functional.tanh(input : Tensor) -> Tensor
torch.nn.functional.tanhshrink(input : Tensor) -> Tensor
torch.nn.functional.threshold(input : Tensor,
threshold : float,
value : float,
inplace : bool=False) -> Tensor
torch.nn.functional.triplet_margin_loss(anchor : Tensor,
positive : Tensor,
negative : Tensor,
margin : float=1.0,
p : float=2.0,
eps : float=1e-06,
swap : bool=False,
size_average : Optional[bool],
reduce : Optional[bool],
reduction : str=mean) -> Tensor
torch.nn.functional.unfold(input : Tensor,
kernel_size : List[int],
dilation : List[int]=1,
padding : List[int]=0,
stride : List[int]=1) -> Tensor
torch.Generator(device : Optional[Device],
seed : Optional[int]) -> Generator
torch.Size(sizes : List[int]) -> List[int]
torch.abs(self : Tensor) -> Tensor
torch.abs(self : Tensor,
out : Tensor) -> Tensor
torch.abs_(self : Tensor) -> Tensor
torch.absolute(self : Tensor) -> Tensor
torch.absolute(self : Tensor,
out : Tensor) -> Tensor
torch.acos(self : Tensor) -> Tensor
torch.acos(self : Tensor,
out : Tensor) -> Tensor
torch.acos(a : int) -> float
torch.acos(a : float) -> float
torch.acos(a : complex) -> complex
torch.acos(a : number) -> number
torch.acos_(self : Tensor) -> Tensor
torch.acosh(self : Tensor) -> Tensor
torch.acosh(self : Tensor,
out : Tensor) -> Tensor
torch.acosh(a : int) -> float
torch.acosh(a : float) -> float
torch.acosh(a : complex) -> complex
torch.acosh(a : number) -> number
torch.acosh_(self : Tensor) -> Tensor
torch.adaptive_avg_pool1d(self : Tensor,
output_size : List[int]) -> Tensor
torch.adaptive_avg_pool1d(self : Tensor,
output_size : List[int],
out : Tensor) -> Tensor
torch.adaptive_max_pool1d(self : Tensor,
output_size : List[int]) -> Tuple[Tensor, Tensor]
torch.add(self : Tensor,
other : Tensor,
alpha : number=1) -> Tensor
torch.add(self : Tensor,
other : number,
alpha : number=1) -> Tensor
torch.add(self : Tensor,
other : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
torch.add(self : Tensor,
other : number,
alpha : number=1,
out : Tensor) -> Tensor
torch.add(a : List[t],
b : List[t]) -> List[t]
torch.add(a : str,
b : str) -> str
torch.add(a : int,
b : int) -> int
torch.add(a : complex,
b : complex) -> complex
torch.add(a : float,
b : float) -> float
torch.add(a : int,
b : complex) -> complex
torch.add(a : complex,
b : int) -> complex
torch.add(a : float,
b : complex) -> complex
torch.add(a : complex,
b : float) -> complex
torch.add(a : int,
b : float) -> float
torch.add(a : float,
b : int) -> float
torch.add(a : number,
b : number) -> number
torch.addbmm(self : Tensor,
batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
torch.addbmm(self : Tensor,
batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
torch.addcdiv(self : Tensor,
tensor1 : Tensor,
tensor2 : Tensor,
value : number=1) -> Tensor
torch.addcdiv(self : Tensor,
tensor1 : Tensor,
tensor2 : Tensor,
value : number=1,
out : Tensor) -> Tensor
torch.addcmul(self : Tensor,
tensor1 : Tensor,
tensor2 : Tensor,
value : number=1) -> Tensor
torch.addcmul(self : Tensor,
tensor1 : Tensor,
tensor2 : Tensor,
value : number=1,
out : Tensor) -> Tensor
torch.addmm(self : Tensor,
mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
torch.addmm(self : Tensor,
mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
torch.addmv(self : Tensor,
mat : Tensor,
vec : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
torch.addmv(self : Tensor,
mat : Tensor,
vec : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
torch.addmv_(self : Tensor,
mat : Tensor,
vec : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
torch.addr(self : Tensor,
vec1 : Tensor,
vec2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
torch.addr(self : Tensor,
vec1 : Tensor,
vec2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
torch.adjoint(self : Tensor) -> Tensor
torch.affine_grid_generator(theta : Tensor,
size : List[int],
align_corners : bool) -> Tensor
torch.affine_grid_generator(theta : Tensor,
size : List[int],
align_corners : bool,
out : Tensor) -> Tensor
torch.alias_copy(self : Tensor) -> Tensor
torch.alias_copy(self : Tensor,
out : Tensor) -> Tensor
torch.align_tensors(tensors : List[Tensor]) -> List[Tensor]
torch.all(self : Tensor) -> Tensor
torch.all(self : Tensor,
dim : int,
keepdim : bool=False) -> Tensor
torch.all(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False) -> Tensor
torch.all(self : Tensor,
dim : int,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.all(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.all(self : Tensor,
out : Tensor) -> Tensor
torch.all(self : Tensor,
dim : str,
keepdim : bool=False) -> Tensor
torch.all(self : Tensor,
dim : str,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.all(self : List[int]) -> bool
torch.all(self : List[float]) -> bool
torch.all(self : List[bool]) -> bool
torch.allclose(self : Tensor,
other : Tensor,
rtol : float=1e-05,
atol : float=1e-08,
equal_nan : bool=False) -> bool
torch.alpha_dropout(input : Tensor,
p : float,
train : bool) -> Tensor
torch.alpha_dropout_(self : Tensor,
p : float,
train : bool) -> Tensor
torch.amax(self : Tensor,
dim : List[int]=[],
keepdim : bool=False) -> Tensor
torch.amax(self : Tensor,
dim : List[int]=[],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.amin(self : Tensor,
dim : List[int]=[],
keepdim : bool=False) -> Tensor
torch.amin(self : Tensor,
dim : List[int]=[],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.aminmax(self : Tensor,
dim : Optional[int],
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.aminmax(self : Tensor,
dim : Optional[int],
keepdim : bool=False,
min : Tensor,
max : Tensor) -> Tuple[Tensor, Tensor]
torch.angle(self : Tensor) -> Tensor
torch.angle(self : Tensor,
out : Tensor) -> Tensor
torch.angle(a : int) -> float
torch.angle(a : float) -> float
torch.angle(a : complex) -> float
torch.angle(a : number) -> number
torch.any(self : Tensor) -> Tensor
torch.any(self : Tensor,
dim : int,
keepdim : bool=False) -> Tensor
torch.any(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False) -> Tensor
torch.any(self : Tensor,
dim : int,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.any(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.any(self : Tensor,
out : Tensor) -> Tensor
torch.any(self : Tensor,
dim : str,
keepdim : bool=False) -> Tensor
torch.any(self : Tensor,
dim : str,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.any(self : List[str]) -> bool
torch.any(self : List[int]) -> bool
torch.any(self : List[float]) -> bool
torch.any(self : List[bool]) -> bool
torch.arange(end : number,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.arange(start : number,
end : number,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.arange(start : number,
end : number,
step : number=1,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.arange(start : number,
end : number,
step : number=1,
out : Tensor) -> Tensor
torch.arange(end : number,
out : Tensor) -> Tensor
torch.arccos(self : Tensor) -> Tensor
torch.arccos(self : Tensor,
out : Tensor) -> Tensor
torch.arccos_(self : Tensor) -> Tensor
torch.arccosh(self : Tensor) -> Tensor
torch.arccosh(self : Tensor,
out : Tensor) -> Tensor
torch.arccosh_(self : Tensor) -> Tensor
torch.arcsin(self : Tensor) -> Tensor
torch.arcsin(self : Tensor,
out : Tensor) -> Tensor
torch.arcsin_(self : Tensor) -> Tensor
torch.arcsinh(self : Tensor) -> Tensor
torch.arcsinh(self : Tensor,
out : Tensor) -> Tensor
torch.arcsinh_(self : Tensor) -> Tensor
torch.arctan(self : Tensor) -> Tensor
torch.arctan(self : Tensor,
out : Tensor) -> Tensor
torch.arctan2(self : Tensor,
other : Tensor) -> Tensor
torch.arctan2(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.arctan_(self : Tensor) -> Tensor
torch.arctanh(self : Tensor) -> Tensor
torch.arctanh(self : Tensor,
out : Tensor) -> Tensor
torch.arctanh_(self : Tensor) -> Tensor
torch.argmax(self : Tensor,
dim : Optional[int],
keepdim : bool=False) -> Tensor
torch.argmax(self : Tensor,
dim : Optional[int],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.argmin(self : Tensor,
dim : Optional[int],
keepdim : bool=False) -> Tensor
torch.argmin(self : Tensor,
dim : Optional[int],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.argsort(self : Tensor,
dim : int=-1,
descending : bool=False) -> Tensor
torch.argsort(self : Tensor,
stable : bool,
dim : int=-1,
descending : bool=False) -> Tensor
torch.argsort(self : Tensor,
stable : bool,
dim : int=-1,
descending : bool=False,
out : Tensor) -> Tensor
torch.argsort(self : Tensor,
dim : str,
descending : bool=False) -> Tensor
torch.argwhere(self : Tensor) -> Tensor
torch.as_strided(self : Tensor,
size : List[int],
stride : List[int],
storage_offset : Optional[int]) -> Tensor
torch.as_strided_(self : Tensor,
size : List[int],
stride : List[int],
storage_offset : Optional[int]) -> Tensor
torch.as_strided_copy(self : Tensor,
size : List[int],
stride : List[int],
storage_offset : Optional[int]) -> Tensor
torch.as_strided_copy(self : Tensor,
size : List[int],
stride : List[int],
storage_offset : Optional[int],
out : Tensor) -> Tensor
torch.as_strided_scatter(self : Tensor,
src : Tensor,
size : List[int],
stride : List[int],
storage_offset : Optional[int]) -> Tensor
torch.as_strided_scatter(self : Tensor,
src : Tensor,
size : List[int],
stride : List[int],
storage_offset : Optional[int],
out : Tensor) -> Tensor
torch.as_tensor(t : bool,
dtype : Optional[int],
device : Optional[Device]) -> Tensor
torch.as_tensor(t : float,
dtype : Optional[int],
device : Optional[Device]) -> Tensor
torch.as_tensor(t : int,
dtype : Optional[int],
device : Optional[Device]) -> Tensor
torch.as_tensor(t : complex,
dtype : Optional[int],
device : Optional[Device]) -> Tensor
torch.as_tensor(data : Tensor,
dtype : Optional[int],
device : Optional[Device]) -> Tensor
torch.as_tensor(data : List[t],
dtype : Optional[int],
device : Optional[Device]) -> Tensor
torch.asin(self : Tensor) -> Tensor
torch.asin(self : Tensor,
out : Tensor) -> Tensor
torch.asin(a : int) -> float
torch.asin(a : float) -> float
torch.asin(a : complex) -> complex
torch.asin(a : number) -> number
torch.asin_(self : Tensor) -> Tensor
torch.asinh(self : Tensor) -> Tensor
torch.asinh(self : Tensor,
out : Tensor) -> Tensor
torch.asinh(a : int) -> float
torch.asinh(a : float) -> float
torch.asinh(a : complex) -> complex
torch.asinh(a : number) -> number
torch.asinh_(self : Tensor) -> Tensor
torch.atan(self : Tensor) -> Tensor
torch.atan(self : Tensor,
out : Tensor) -> Tensor
torch.atan(a : int) -> float
torch.atan(a : float) -> float
torch.atan(a : complex) -> complex
torch.atan(a : number) -> number
torch.atan2(self : Tensor,
other : Tensor) -> Tensor
torch.atan2(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.atan2(a : int,
b : int) -> float
torch.atan2(a : float,
b : float) -> float
torch.atan2(a : int,
b : float) -> float
torch.atan2(a : float,
b : int) -> float
torch.atan2(a : number,
b : number) -> float
torch.atan_(self : Tensor) -> Tensor
torch.atanh(self : Tensor) -> Tensor
torch.atanh(self : Tensor,
out : Tensor) -> Tensor
torch.atanh(a : int) -> float
torch.atanh(a : float) -> float
torch.atanh(a : complex) -> complex
torch.atanh(a : number) -> number
torch.atanh_(self : Tensor) -> Tensor
torch.atleast_1d(self : Tensor) -> Tensor
torch.atleast_1d(tensors : List[Tensor]) -> List[Tensor]
torch.atleast_2d(self : Tensor) -> Tensor
torch.atleast_2d(tensors : List[Tensor]) -> List[Tensor]
torch.atleast_3d(self : Tensor) -> Tensor
torch.atleast_3d(tensors : List[Tensor]) -> List[Tensor]
torch.avg_pool1d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0],
ceil_mode : bool=False,
count_include_pad : bool=True) -> Tensor
torch.avg_pool1d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0],
ceil_mode : bool=False,
count_include_pad : bool=True,
out : Tensor) -> Tensor
torch.baddbmm(self : Tensor,
batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
torch.baddbmm(self : Tensor,
batch1 : Tensor,
batch2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
torch.bartlett_window(window_length : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.bartlett_window(window_length : int,
periodic : bool,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.bartlett_window(window_length : int,
out : Tensor) -> Tensor
torch.bartlett_window(window_length : int,
periodic : bool,
out : Tensor) -> Tensor
torch.batch_norm(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
training : bool,
momentum : float,
eps : float,
cudnn_enabled : bool) -> Tensor
torch.batch_norm_backward_elemt(grad_out : Tensor,
input : Tensor,
mean : Tensor,
invstd : Tensor,
weight : Optional[Tensor],
sum_dy : Tensor,
sum_dy_xmu : Tensor,
count : Tensor,
out : Tensor) -> Tensor
torch.batch_norm_backward_elemt(grad_out : Tensor,
input : Tensor,
mean : Tensor,
invstd : Tensor,
weight : Optional[Tensor],
sum_dy : Tensor,
sum_dy_xmu : Tensor,
count : Tensor) -> Tensor
torch.batch_norm_backward_reduce(grad_out : Tensor,
input : Tensor,
mean : Tensor,
invstd : Tensor,
weight : Optional[Tensor],
input_g : bool,
weight_g : bool,
bias_g : bool,
out0 : Tensor,
out1 : Tensor,
out2 : Tensor,
out3 : Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch.batch_norm_backward_reduce(grad_out : Tensor,
input : Tensor,
mean : Tensor,
invstd : Tensor,
weight : Optional[Tensor],
input_g : bool,
weight_g : bool,
bias_g : bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch.batch_norm_elemt(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
mean : Tensor,
invstd : Tensor,
eps : float,
out : Tensor) -> Tensor
torch.batch_norm_elemt(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
mean : Tensor,
invstd : Tensor,
eps : float) -> Tensor
torch.batch_norm_gather_stats(input : Tensor,
mean : Tensor,
invstd : Tensor,
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
momentum : float,
eps : float,
count : int,
out0 : Tensor,
out1 : Tensor) -> Tuple[Tensor, Tensor]
torch.batch_norm_gather_stats(input : Tensor,
mean : Tensor,
invstd : Tensor,
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
momentum : float,
eps : float,
count : int) -> Tuple[Tensor, Tensor]
torch.batch_norm_gather_stats_with_counts(input : Tensor,
mean : Tensor,
invstd : Tensor,
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
momentum : float,
eps : float,
counts : Tensor,
out0 : Tensor,
out1 : Tensor) -> Tuple[Tensor, Tensor]
torch.batch_norm_gather_stats_with_counts(input : Tensor,
mean : Tensor,
invstd : Tensor,
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
momentum : float,
eps : float,
counts : Tensor) -> Tuple[Tensor, Tensor]
torch.batch_norm_stats(input : Tensor,
eps : float,
out0 : Tensor,
out1 : Tensor) -> Tuple[Tensor, Tensor]
torch.batch_norm_stats(input : Tensor,
eps : float) -> Tuple[Tensor, Tensor]
torch.batch_norm_update_stats(input : Tensor,
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
momentum : float) -> Tuple[Tensor, Tensor]
torch.batch_norm_update_stats(input : Tensor,
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
momentum : float,
out0 : Tensor,
out1 : Tensor) -> Tuple[Tensor, Tensor]
torch.bernoulli(self : Tensor,
generator : Optional[Generator]) -> Tensor
torch.bernoulli(self : Tensor,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.bernoulli(self : Tensor,
p : float,
generator : Optional[Generator]) -> Tensor
torch.bernoulli(self : Tensor,
p : Tensor,
generator : Optional[Generator]) -> Tensor
torch.bernoulli(self : Tensor,
p : Tensor,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.bernoulli(self : Tensor,
p : float=0.5,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.bilinear(input1 : Tensor,
input2 : Tensor,
weight : Tensor,
bias : Optional[Tensor]) -> Tensor
torch.binary_cross_entropy_with_logits(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
pos_weight : Optional[Tensor],
reduction : int=1) -> Tensor
torch.binary_cross_entropy_with_logits(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
pos_weight : Optional[Tensor],
reduction : int=1,
out : Tensor) -> Tensor
torch.bincount(self : Tensor,
weights : Optional[Tensor],
minlength : int=0) -> Tensor
torch.bincount(self : Tensor,
weights : Optional[Tensor],
minlength : int=0,
out : Tensor) -> Tensor
torch.binomial(count : Tensor,
prob : Tensor,
generator : Optional[Generator]) -> Tensor
torch.binomial(count : Tensor,
prob : Tensor,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.bitwise_and(self : Tensor,
other : Tensor) -> Tensor
torch.bitwise_and(self : Tensor,
other : number) -> Tensor
torch.bitwise_and(self : number,
other : Tensor) -> Tensor
torch.bitwise_and(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_and(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.bitwise_and(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_left_shift(self : Tensor,
other : Tensor) -> Tensor
torch.bitwise_left_shift(self : Tensor,
other : number) -> Tensor
torch.bitwise_left_shift(self : number,
other : Tensor) -> Tensor
torch.bitwise_left_shift(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_left_shift(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.bitwise_left_shift(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_not(self : Tensor) -> Tensor
torch.bitwise_not(self : Tensor,
out : Tensor) -> Tensor
torch.bitwise_or(self : Tensor,
other : Tensor) -> Tensor
torch.bitwise_or(self : Tensor,
other : number) -> Tensor
torch.bitwise_or(self : number,
other : Tensor) -> Tensor
torch.bitwise_or(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_or(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.bitwise_or(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_right_shift(self : Tensor,
other : Tensor) -> Tensor
torch.bitwise_right_shift(self : Tensor,
other : number) -> Tensor
torch.bitwise_right_shift(self : number,
other : Tensor) -> Tensor
torch.bitwise_right_shift(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_right_shift(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.bitwise_right_shift(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_xor(self : Tensor,
other : Tensor) -> Tensor
torch.bitwise_xor(self : Tensor,
other : number) -> Tensor
torch.bitwise_xor(self : number,
other : Tensor) -> Tensor
torch.bitwise_xor(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.bitwise_xor(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.bitwise_xor(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch.blackman_window(window_length : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.blackman_window(window_length : int,
periodic : bool,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.blackman_window(window_length : int,
out : Tensor) -> Tensor
torch.blackman_window(window_length : int,
periodic : bool,
out : Tensor) -> Tensor
torch.block_diag(tensors : List[Tensor]) -> Tensor
torch.block_diag(tensors : List[Tensor],
out : Tensor) -> Tensor
torch.bmm(self : Tensor,
mat2 : Tensor) -> Tensor
torch.bmm(self : Tensor,
mat2 : Tensor,
out : Tensor) -> Tensor
torch.broadcast_tensors(tensors : List[Tensor]) -> List[Tensor]
torch.broadcast_to(self : Tensor,
size : List[int]) -> Tensor
torch.bucketize(self : Tensor,
boundaries : Tensor,
out_int32 : bool=False,
right : bool=False) -> Tensor
torch.bucketize(self : number,
boundaries : Tensor,
out_int32 : bool=False,
right : bool=False) -> Tensor
torch.bucketize(self : Tensor,
boundaries : Tensor,
out_int32 : bool=False,
right : bool=False,
out : Tensor) -> Tensor
torch.bucketize(self : number,
boundaries : Tensor,
out_int32 : bool=False,
right : bool=False,
out : Tensor) -> Tensor
torch.can_cast(from_ : int,
to : int) -> bool
torch.cartesian_prod(tensors : List[Tensor]) -> Tensor
torch.cat(tensors : List[Tensor],
dim : int=0) -> Tensor
torch.cat(tensors : List[Tensor],
dim : str) -> Tensor
torch.cat(tensors : List[Tensor],
dim : str,
out : Tensor) -> Tensor
torch.cat(tensors : List[Tensor],
dim : int=0,
out : Tensor) -> Tensor
torch.ccol_indices_copy(self : Tensor,
out : Tensor) -> Tensor
torch.ccol_indices_copy(self : Tensor) -> Tensor
torch.ceil(self : Tensor) -> Tensor
torch.ceil(self : Tensor,
out : Tensor) -> Tensor
torch.ceil(a : int) -> int
torch.ceil(a : float) -> int
torch.ceil(a : number) -> number
torch.ceil_(self : Tensor) -> Tensor
torch.celu(self : Tensor,
alpha : number=1.0) -> Tensor
torch.celu(self : Tensor,
alpha : number=1.0,
out : Tensor) -> Tensor
torch.celu_(self : Tensor,
alpha : number=1.0) -> Tensor
torch.chain_matmul(matrices : List[Tensor]) -> Tensor
torch.chain_matmul(matrices : List[Tensor],
out : Tensor) -> Tensor
torch.channel_shuffle(self : Tensor,
groups : int) -> Tensor
torch.channel_shuffle(self : Tensor,
groups : int,
out : Tensor) -> Tensor
torch.cholesky(self : Tensor,
upper : bool=False) -> Tensor
torch.cholesky(self : Tensor,
upper : bool=False,
out : Tensor) -> Tensor
torch.cholesky_inverse(self : Tensor,
upper : bool=False) -> Tensor
torch.cholesky_inverse(self : Tensor,
upper : bool=False,
out : Tensor) -> Tensor
torch.cholesky_solve(self : Tensor,
input2 : Tensor,
upper : bool=False) -> Tensor
torch.cholesky_solve(self : Tensor,
input2 : Tensor,
upper : bool=False,
out : Tensor) -> Tensor
torch.choose_qparams_optimized(input : Tensor,
numel : int,
n_bins : int,
ratio : float,
bit_width : int) -> Tuple[Tensor, Tensor]
torch.chunk(self : Tensor,
chunks : int,
dim : int=0) -> List[Tensor]
torch.clamp(self : Tensor,
min : Optional[number],
max : Optional[number]) -> Tensor
torch.clamp(self : Tensor,
min : Optional[Tensor],
max : Optional[Tensor]) -> Tensor
torch.clamp(self : Tensor,
min : Optional[number],
max : Optional[number],
out : Tensor) -> Tensor
torch.clamp(self : Tensor,
min : Optional[Tensor],
max : Optional[Tensor],
out : Tensor) -> Tensor
torch.clamp_(self : Tensor,
min : Optional[number],
max : Optional[number]) -> Tensor
torch.clamp_(self : Tensor,
min : Optional[Tensor],
max : Optional[Tensor]) -> Tensor
torch.clamp_max(self : Tensor,
max : number) -> Tensor
torch.clamp_max(self : Tensor,
max : Tensor) -> Tensor
torch.clamp_max(self : Tensor,
max : number,
out : Tensor) -> Tensor
torch.clamp_max(self : Tensor,
max : Tensor,
out : Tensor) -> Tensor
torch.clamp_max_(self : Tensor,
max : number) -> Tensor
torch.clamp_max_(self : Tensor,
max : Tensor) -> Tensor
torch.clamp_min(self : Tensor,
min : number) -> Tensor
torch.clamp_min(self : Tensor,
min : Tensor) -> Tensor
torch.clamp_min(self : Tensor,
min : number,
out : Tensor) -> Tensor
torch.clamp_min(self : Tensor,
min : Tensor,
out : Tensor) -> Tensor
torch.clamp_min_(self : Tensor,
min : number) -> Tensor
torch.clamp_min_(self : Tensor,
min : Tensor) -> Tensor
torch.clip(self : Tensor,
min : Optional[number],
max : Optional[number]) -> Tensor
torch.clip(self : Tensor,
min : Optional[Tensor],
max : Optional[Tensor]) -> Tensor
torch.clip(self : Tensor,
min : Optional[number],
max : Optional[number],
out : Tensor) -> Tensor
torch.clip(self : Tensor,
min : Optional[Tensor],
max : Optional[Tensor],
out : Tensor) -> Tensor
torch.clip_(self : Tensor,
min : Optional[number],
max : Optional[number]) -> Tensor
torch.clip_(self : Tensor,
min : Optional[Tensor],
max : Optional[Tensor]) -> Tensor
torch.clone(self : Tensor,
memory_format : Optional[int]) -> Tensor
torch.clone(self : Tensor,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.col_indices_copy(self : Tensor,
out : Tensor) -> Tensor
torch.col_indices_copy(self : Tensor) -> Tensor
torch.column_stack(tensors : List[Tensor]) -> Tensor
torch.column_stack(tensors : List[Tensor],
out : Tensor) -> Tensor
torch.combinations(self : Tensor,
r : int=2,
with_replacement : bool=False) -> Tensor
torch.complex(real : Tensor,
imag : Tensor) -> Tensor
torch.complex(real : Tensor,
imag : Tensor,
out : Tensor) -> Tensor
torch.concat(tensors : List[Tensor],
dim : int=0) -> Tensor
torch.concat(tensors : List[Tensor],
dim : int=0,
out : Tensor) -> Tensor
torch.concat(tensors : List[Tensor],
dim : str) -> Tensor
torch.concat(tensors : List[Tensor],
dim : str,
out : Tensor) -> Tensor
torch.concatenate(tensors : List[Tensor],
dim : int=0) -> Tensor
torch.concatenate(tensors : List[Tensor],
dim : int=0,
out : Tensor) -> Tensor
torch.concatenate(tensors : List[Tensor],
dim : str) -> Tensor
torch.concatenate(tensors : List[Tensor],
dim : str,
out : Tensor) -> Tensor
torch.conj(self : Tensor) -> Tensor
torch.conj_physical(self : Tensor) -> Tensor
torch.conj_physical(self : Tensor,
out : Tensor) -> Tensor
torch.conj_physical_(self : Tensor) -> Tensor
torch.constant_pad_nd(self : Tensor,
pad : List[int],
value : number=0) -> Tensor
torch.constant_pad_nd(self : Tensor,
pad : List[int],
value : number=0,
out : Tensor) -> Tensor
torch.conv1d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1],
padding : List[int]=[0],
dilation : List[int]=[1],
groups : int=1) -> Tensor
torch.conv1d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1],
padding : str=valid,
dilation : List[int]=[1],
groups : int=1) -> Tensor
torch.conv2d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
groups : int=1) -> Tensor
torch.conv2d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : str=valid,
dilation : List[int]=[1, 1],
groups : int=1) -> Tensor
torch.conv3d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
groups : int=1) -> Tensor
torch.conv3d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : str=valid,
dilation : List[int]=[1, 1, 1],
groups : int=1) -> Tensor
torch.conv_tbc(self : Tensor,
weight : Tensor,
bias : Tensor,
pad : int=0) -> Tensor
torch.conv_tbc(self : Tensor,
weight : Tensor,
bias : Tensor,
pad : int=0,
out : Tensor) -> Tensor
torch.conv_transpose1d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1],
padding : List[int]=[0],
output_padding : List[int]=[0],
groups : int=1,
dilation : List[int]=[1]) -> Tensor
torch.conv_transpose2d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : List[int]=[0, 0],
output_padding : List[int]=[0, 0],
groups : int=1,
dilation : List[int]=[1, 1]) -> Tensor
torch.conv_transpose3d(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : List[int]=[0, 0, 0],
output_padding : List[int]=[0, 0, 0],
groups : int=1,
dilation : List[int]=[1, 1, 1]) -> Tensor
torch.convolution(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
transposed : bool,
output_padding : List[int],
groups : int) -> Tensor
torch.convolution(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
transposed : bool,
output_padding : List[int],
groups : int,
out : Tensor) -> Tensor
torch.copysign(self : Tensor,
other : Tensor) -> Tensor
torch.copysign(self : Tensor,
other : number) -> Tensor
torch.copysign(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.copysign(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.copysign(a : int,
b : int) -> float
torch.copysign(a : float,
b : float) -> float
torch.copysign(a : int,
b : float) -> float
torch.copysign(a : float,
b : int) -> float
torch.copysign(a : number,
b : number) -> float
torch.corrcoef(self : Tensor) -> Tensor
torch.cos(self : Tensor) -> Tensor
torch.cos(self : Tensor,
out : Tensor) -> Tensor
torch.cos(a : int) -> float
torch.cos(a : float) -> float
torch.cos(a : complex) -> complex
torch.cos(a : number) -> number
torch.cos_(self : Tensor) -> Tensor
torch.cosh(self : Tensor) -> Tensor
torch.cosh(self : Tensor,
out : Tensor) -> Tensor
torch.cosh(a : int) -> float
torch.cosh(a : float) -> float
torch.cosh(a : complex) -> complex
torch.cosh(a : number) -> number
torch.cosh_(self : Tensor) -> Tensor
torch.cosine_embedding_loss(input1 : Tensor,
input2 : Tensor,
target : Tensor,
margin : float=0.0,
reduction : int=1) -> Tensor
torch.cosine_similarity(x1 : Tensor,
x2 : Tensor,
dim : int=1,
eps : float=1e-08) -> Tensor
torch.count_nonzero(self : Tensor,
dim : List[int]) -> Tensor
torch.count_nonzero(self : Tensor,
dim : List[int],
out : Tensor) -> Tensor
torch.count_nonzero(self : Tensor,
dim : Optional[int]) -> Tensor
torch.count_nonzero(self : Tensor,
dim : Optional[int],
out : Tensor) -> Tensor
torch.cov(self : Tensor,
correction : int=1,
fweights : Optional[Tensor],
aweights : Optional[Tensor]) -> Tensor
torch.cross(self : Tensor,
other : Tensor,
dim : Optional[int]) -> Tensor
torch.cross(self : Tensor,
other : Tensor,
dim : Optional[int],
out : Tensor) -> Tensor
torch.crow_indices_copy(self : Tensor,
out : Tensor) -> Tensor
torch.crow_indices_copy(self : Tensor) -> Tensor
torch.ctc_loss(log_probs : Tensor,
targets : Tensor,
input_lengths : List[int],
target_lengths : List[int],
blank : int=0,
reduction : int=1,
zero_infinity : bool=False) -> Tensor
torch.ctc_loss(log_probs : Tensor,
targets : Tensor,
input_lengths : Tensor,
target_lengths : Tensor,
blank : int=0,
reduction : int=1,
zero_infinity : bool=False) -> Tensor
torch.cudnn_affine_grid_generator(theta : Tensor,
N : int,
C : int,
H : int,
W : int,
out : Tensor) -> Tensor
torch.cudnn_affine_grid_generator(theta : Tensor,
N : int,
C : int,
H : int,
W : int) -> Tensor
torch.cudnn_batch_norm(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
training : bool,
exponential_average_factor : float,
epsilon : float) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch.cudnn_batch_norm(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
training : bool,
exponential_average_factor : float,
epsilon : float,
out0 : Tensor,
out1 : Tensor,
out2 : Tensor,
out3 : Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch.cudnn_convolution(self : Tensor,
weight : Tensor,
padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool,
allow_tf32 : bool) -> Tensor
torch.cudnn_convolution(self : Tensor,
weight : Tensor,
padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool,
allow_tf32 : bool,
out : Tensor) -> Tensor
torch.cudnn_convolution_add_relu(self : Tensor,
weight : Tensor,
z : Tensor,
alpha : Optional[number],
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
groups : int,
out : Tensor) -> Tensor
torch.cudnn_convolution_add_relu(self : Tensor,
weight : Tensor,
z : Tensor,
alpha : Optional[number],
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
groups : int) -> Tensor
torch.cudnn_convolution_relu(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
groups : int,
out : Tensor) -> Tensor
torch.cudnn_convolution_relu(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
groups : int) -> Tensor
torch.cudnn_convolution_transpose(self : Tensor,
weight : Tensor,
padding : List[int],
output_padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool,
allow_tf32 : bool) -> Tensor
torch.cudnn_convolution_transpose(self : Tensor,
weight : Tensor,
padding : List[int],
output_padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool,
allow_tf32 : bool,
out : Tensor) -> Tensor
torch.cudnn_grid_sampler(self : Tensor,
grid : Tensor) -> Tensor
torch.cudnn_grid_sampler(self : Tensor,
grid : Tensor,
out : Tensor) -> Tensor
torch.cudnn_is_acceptable(self : Tensor) -> bool
torch.cummax(self : Tensor,
dim : int) -> Tuple[Tensor, Tensor]
torch.cummax(self : Tensor,
dim : str) -> Tuple[Tensor, Tensor]
torch.cummax(self : Tensor,
dim : str,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.cummax(self : Tensor,
dim : int,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.cummin(self : Tensor,
dim : int) -> Tuple[Tensor, Tensor]
torch.cummin(self : Tensor,
dim : str) -> Tuple[Tensor, Tensor]
torch.cummin(self : Tensor,
dim : str,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.cummin(self : Tensor,
dim : int,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.cumprod(self : Tensor,
dim : int,
dtype : Optional[int]) -> Tensor
torch.cumprod(self : Tensor,
dim : str,
dtype : Optional[int]) -> Tensor
torch.cumprod(self : Tensor,
dim : str,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.cumprod(self : Tensor,
dim : int,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.cumsum(self : Tensor,
dim : int,
dtype : Optional[int]) -> Tensor
torch.cumsum(self : Tensor,
dim : str,
dtype : Optional[int]) -> Tensor
torch.cumsum(self : Tensor,
dim : str,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.cumsum(self : Tensor,
dim : int,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.cumulative_trapezoid(y : Tensor,
x : Tensor,
dim : int=-1) -> Tensor
torch.cumulative_trapezoid(y : Tensor,
dx : number=1,
dim : int=-1) -> Tensor
torch.deg2rad(self : Tensor) -> Tensor
torch.deg2rad(self : Tensor,
out : Tensor) -> Tensor
torch.deg2rad_(self : Tensor) -> Tensor
torch.dequantize(self : Tensor) -> Tensor
torch.dequantize(self : Tensor,
out : Tensor) -> Tensor
torch.dequantize(tensors : List[Tensor],
out : List[Tensor]) -> Tuple[]
torch.dequantize(tensors : List[Tensor]) -> List[Tensor]
torch.dequantize(qtensor : Tensor) -> Tensor
torch.dequantize(qtensors : List[Tensor]) -> List[Tensor]
torch.dequantize(tensors : Any) -> Any
torch.det(self : Tensor) -> Tensor
torch.detach(self : Tensor) -> Tensor
torch.detach_(self : Tensor) -> Tensor
torch.detach_copy(self : Tensor,
out : Tensor) -> Tensor
torch.detach_copy(self : Tensor) -> Tensor
torch.device(a : str) -> Device
torch.device(type : str,
index : int) -> Device
torch.diag(self : Tensor,
diagonal : int=0) -> Tensor
torch.diag(self : Tensor,
diagonal : int=0,
out : Tensor) -> Tensor
torch.diag_embed(self : Tensor,
offset : int=0,
dim1 : int=-2,
dim2 : int=-1) -> Tensor
torch.diag_embed(self : Tensor,
offset : int=0,
dim1 : int=-2,
dim2 : int=-1,
out : Tensor) -> Tensor
torch.diagflat(self : Tensor,
offset : int=0) -> Tensor
torch.diagonal(self : Tensor,
offset : int=0,
dim1 : int=0,
dim2 : int=1) -> Tensor
torch.diagonal(self : Tensor,
outdim : str,
dim1 : str,
dim2 : str,
offset : int=0) -> Tensor
torch.diagonal_copy(self : Tensor,
offset : int=0,
dim1 : int=0,
dim2 : int=1) -> Tensor
torch.diagonal_copy(self : Tensor,
offset : int=0,
dim1 : int=0,
dim2 : int=1,
out : Tensor) -> Tensor
torch.diagonal_scatter(self : Tensor,
src : Tensor,
offset : int=0,
dim1 : int=0,
dim2 : int=1) -> Tensor
torch.diagonal_scatter(self : Tensor,
src : Tensor,
offset : int=0,
dim1 : int=0,
dim2 : int=1,
out : Tensor) -> Tensor
torch.diff(self : Tensor,
n : int=1,
dim : int=-1,
prepend : Optional[Tensor],
append : Optional[Tensor]) -> Tensor
torch.diff(self : Tensor,
n : int=1,
dim : int=-1,
prepend : Optional[Tensor],
append : Optional[Tensor],
out : Tensor) -> Tensor
torch.digamma(self : Tensor) -> Tensor
torch.digamma(self : Tensor,
out : Tensor) -> Tensor
torch.dist(self : Tensor,
other : Tensor,
p : number=2) -> Tensor
torch.dist(self : Tensor,
other : Tensor,
p : number=2,
out : Tensor) -> Tensor
torch.div(self : Tensor,
other : Tensor) -> Tensor
torch.div(self : Tensor,
other : number) -> Tensor
torch.div(self : Tensor,
other : Tensor,
rounding_mode : Optional[str]) -> Tensor
torch.div(self : Tensor,
other : number,
rounding_mode : Optional[str]) -> Tensor
torch.div(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.div(self : Tensor,
other : Tensor,
rounding_mode : Optional[str],
out : Tensor) -> Tensor
torch.div(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.div(self : Tensor,
other : number,
rounding_mode : Optional[str],
out : Tensor) -> Tensor
torch.div(a : int,
b : int) -> float
torch.div(a : complex,
b : complex) -> complex
torch.div(a : float,
b : float) -> float
torch.div(a : number,
b : number) -> float
torch.divide(self : Tensor,
other : Tensor) -> Tensor
torch.divide(self : Tensor,
other : number) -> Tensor
torch.divide(self : Tensor,
other : Tensor,
rounding_mode : Optional[str]) -> Tensor
torch.divide(self : Tensor,
other : number,
rounding_mode : Optional[str]) -> Tensor
torch.divide(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.divide(self : Tensor,
other : Tensor,
rounding_mode : Optional[str],
out : Tensor) -> Tensor
torch.dot(self : Tensor,
tensor : Tensor) -> Tensor
torch.dot(self : Tensor,
tensor : Tensor,
out : Tensor) -> Tensor
torch.dropout(input : Tensor,
p : float,
train : bool) -> Tensor
torch.dropout_(self : Tensor,
p : float,
train : bool) -> Tensor
torch.dsplit(self : Tensor,
sections : int) -> List[Tensor]
torch.dsplit(self : Tensor,
indices : List[int]) -> List[Tensor]
torch.dstack(tensors : List[Tensor]) -> Tensor
torch.dstack(tensors : List[Tensor],
out : Tensor) -> Tensor
torch.einsum(equation : str,
tensors : List[Tensor],
path : Optional[List[int]]) -> Tensor
torch.einsum(a : Tensor) -> Tensor
torch.embedding(weight : Tensor,
indices : Tensor,
padding_idx : int=-1,
scale_grad_by_freq : bool=False,
sparse : bool=False) -> Tensor
torch.embedding(weight : Tensor,
indices : Tensor,
padding_idx : int=-1,
scale_grad_by_freq : bool=False,
sparse : bool=False,
out : Tensor) -> Tensor
torch.embedding_bag(weight : Tensor,
indices : Tensor,
offsets : Tensor,
scale_grad_by_freq : bool=False,
mode : int=0,
sparse : bool=False,
per_sample_weights : Optional[Tensor],
include_last_offset : bool=False) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch.embedding_bag(weight : Tensor,
indices : Tensor,
offsets : Tensor,
scale_grad_by_freq : bool,
mode : int,
sparse : bool,
per_sample_weights : Optional[Tensor],
include_last_offset : bool,
padding_idx : Optional[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch.embedding_renorm_(self : Tensor,
indices : Tensor,
max_norm : float,
norm_type : float) -> Tensor
torch.empty(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.empty(size : List[int],
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.empty(size : List[int],
names : Optional[List[str]],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.empty(size : List[int],
names : Optional[List[str]],
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.empty_like(self : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.empty_like(self : Tensor,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.empty_permuted(size : List[int],
physical_layout : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.empty_permuted(size : List[int],
physical_layout : List[int],
out : Tensor) -> Tensor
torch.empty_quantized(size : List[int],
qtensor : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.empty_quantized(size : List[int],
qtensor : Tensor,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.empty_strided(size : List[int],
stride : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.empty_strided(size : List[int],
stride : List[int],
out : Tensor) -> Tensor
torch.eq(self : Tensor,
other : Tensor) -> Tensor
torch.eq(self : Tensor,
other : number) -> Tensor
torch.eq(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.eq(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.eq(a : List[int],
b : List[int]) -> bool
torch.eq(a : Device,
b : Device) -> bool
torch.eq(a : bool,
b : bool) -> bool
torch.eq(a : AnyEnumType,
b : AnyEnumType) -> bool
torch.eq(a : int,
b : int) -> bool
torch.eq(a : complex,
b : complex) -> bool
torch.eq(a : float,
b : float) -> bool
torch.eq(a : int,
b : float) -> bool
torch.eq(a : float,
b : int) -> bool
torch.eq(a : float,
b : complex) -> bool
torch.eq(a : complex,
b : float) -> bool
torch.eq(a : number,
b : number) -> bool
torch.eq(a : str,
b : str) -> bool
torch.eq(a : List[float],
b : List[float]) -> bool
torch.eq(a : List[Tensor],
b : List[Tensor]) -> bool
torch.eq(a : List[bool],
b : List[bool]) -> bool
torch.eq(a : List[str],
b : List[str]) -> bool
torch.equal(self : Tensor,
other : Tensor) -> bool
torch.erf(self : Tensor) -> Tensor
torch.erf(self : Tensor,
out : Tensor) -> Tensor
torch.erf(a : int) -> float
torch.erf(a : float) -> float
torch.erf(a : number) -> number
torch.erf_(self : Tensor) -> Tensor
torch.erfc(self : Tensor) -> Tensor
torch.erfc(self : Tensor,
out : Tensor) -> Tensor
torch.erfc(a : int) -> float
torch.erfc(a : float) -> float
torch.erfc(a : number) -> number
torch.erfc_(self : Tensor) -> Tensor
torch.erfinv(self : Tensor) -> Tensor
torch.erfinv(self : Tensor,
out : Tensor) -> Tensor
torch.exp(self : Tensor) -> Tensor
torch.exp(self : Tensor,
out : Tensor) -> Tensor
torch.exp(a : int) -> float
torch.exp(a : float) -> float
torch.exp(a : complex) -> complex
torch.exp(a : number) -> number
torch.exp2(self : Tensor) -> Tensor
torch.exp2(self : Tensor,
out : Tensor) -> Tensor
torch.exp2_(self : Tensor) -> Tensor
torch.exp_(self : Tensor) -> Tensor
torch.expand_copy(self : Tensor,
size : List[int],
implicit : bool=False) -> Tensor
torch.expand_copy(self : Tensor,
size : List[int],
implicit : bool=False,
out : Tensor) -> Tensor
torch.expm1(self : Tensor) -> Tensor
torch.expm1(self : Tensor,
out : Tensor) -> Tensor
torch.expm1(a : int) -> float
torch.expm1(a : float) -> float
torch.expm1(a : number) -> number
torch.expm1_(self : Tensor) -> Tensor
torch.eye(n : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.eye(n : int,
m : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.eye(n : int,
out : Tensor) -> Tensor
torch.eye(n : int,
m : int,
out : Tensor) -> Tensor
torch.fake_quantize_per_channel_affine(self : Tensor,
scale : Tensor,
zero_point : Tensor,
axis : int,
quant_min : int,
quant_max : int) -> Tensor
torch.fake_quantize_per_tensor_affine(self : Tensor,
scale : float,
zero_point : int,
quant_min : int,
quant_max : int) -> Tensor
torch.fake_quantize_per_tensor_affine(self : Tensor,
scale : Tensor,
zero_point : Tensor,
quant_min : int,
quant_max : int) -> Tensor
torch.fbgemm_linear_fp16_weight(input : Tensor,
packed_weight : Tensor,
bias : Tensor) -> Tensor
torch.fbgemm_linear_fp16_weight_fp32_activation(input : Tensor,
packed_weight : Tensor,
bias : Tensor) -> Tensor
torch.fbgemm_linear_int8_weight(input : Tensor,
weight : Tensor,
packed : Tensor,
col_offsets : Tensor,
weight_scale : number,
weight_zero_point : number,
bias : Tensor) -> Tensor
torch.fbgemm_linear_int8_weight_fp32_activation(input : Tensor,
weight : Tensor,
packed : Tensor,
col_offsets : Tensor,
weight_scale : number,
weight_zero_point : number,
bias : Tensor) -> Tensor
torch.fbgemm_linear_quantize_weight(input : Tensor) -> Tuple[Tensor, Tensor, float, int]
torch.fbgemm_pack_gemm_matrix_fp16(input : Tensor) -> Tensor
torch.fbgemm_pack_quantized_matrix(input : Tensor) -> Tensor
torch.fbgemm_pack_quantized_matrix(input : Tensor,
K : int,
N : int) -> Tensor
torch.feature_alpha_dropout(input : Tensor,
p : float,
train : bool) -> Tensor
torch.feature_alpha_dropout_(self : Tensor,
p : float,
train : bool) -> Tensor
torch.feature_dropout(input : Tensor,
p : float,
train : bool) -> Tensor
torch.feature_dropout_(self : Tensor,
p : float,
train : bool) -> Tensor
torch.fill(self : Tensor,
value : number) -> Tensor
torch.fill(self : Tensor,
value : number,
out : Tensor) -> Tensor
torch.fill(self : Tensor,
value : Tensor) -> Tensor
torch.fill(self : Tensor,
value : Tensor,
out : Tensor) -> Tensor
torch.fill_(self : Tensor,
value : number) -> Tensor
torch.fill_(self : Tensor,
value : Tensor) -> Tensor
torch.fix(self : Tensor) -> Tensor
torch.fix(self : Tensor,
out : Tensor) -> Tensor
torch.fix_(self : Tensor) -> Tensor
torch.flatten(self : Tensor,
start_dim : int=0,
end_dim : int=-1) -> Tensor
torch.flatten(self : Tensor,
dims : List[str],
out_dim : str) -> Tensor
torch.flatten(self : Tensor,
start_dim : int,
end_dim : int,
out_dim : str) -> Tensor
torch.flatten(self : Tensor,
start_dim : str,
end_dim : str,
out_dim : str) -> Tensor
torch.flip(self : Tensor,
dims : List[int]) -> Tensor
torch.flip(self : Tensor,
dims : List[int],
out : Tensor) -> Tensor
torch.fliplr(self : Tensor) -> Tensor
torch.flipud(self : Tensor) -> Tensor
torch.float_power(self : Tensor,
exponent : Tensor) -> Tensor
torch.float_power(self : Tensor,
exponent : number) -> Tensor
torch.float_power(self : number,
exponent : Tensor) -> Tensor
torch.float_power(self : Tensor,
exponent : Tensor,
out : Tensor) -> Tensor
torch.float_power(self : number,
exponent : Tensor,
out : Tensor) -> Tensor
torch.float_power(self : Tensor,
exponent : number,
out : Tensor) -> Tensor
torch.floor(self : Tensor) -> Tensor
torch.floor(self : Tensor,
out : Tensor) -> Tensor
torch.floor(a : int) -> int
torch.floor(a : float) -> int
torch.floor(a : number) -> number
torch.floor_(self : Tensor) -> Tensor
torch.floor_divide(self : Tensor,
other : Tensor) -> Tensor
torch.floor_divide(self : Tensor,
other : number) -> Tensor
torch.floor_divide(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.floor_divide(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.fmax(self : Tensor,
other : Tensor) -> Tensor
torch.fmax(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.fmin(self : Tensor,
other : Tensor) -> Tensor
torch.fmin(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.fmod(self : Tensor,
other : Tensor) -> Tensor
torch.fmod(self : Tensor,
other : number) -> Tensor
torch.fmod(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.fmod(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.fmod(a : int,
b : int) -> float
torch.fmod(a : float,
b : float) -> float
torch.fmod(a : int,
b : float) -> float
torch.fmod(a : float,
b : int) -> float
torch.fmod(a : number,
b : number) -> float
torch.frac(self : Tensor) -> Tensor
torch.frac(self : Tensor,
out : Tensor) -> Tensor
torch.frac_(self : Tensor) -> Tensor
torch.frexp(self : Tensor) -> Tuple[Tensor, Tensor]
torch.frexp(self : Tensor,
mantissa : Tensor,
exponent : Tensor) -> Tuple[Tensor, Tensor]
torch.frexp(a : float) -> Tuple[float, int]
torch.frobenius_norm(self : Tensor,
dim : List[int],
keepdim : bool=False) -> Tensor
torch.frobenius_norm(self : Tensor,
dim : List[int],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.from_file(filename : str,
shared : Optional[bool],
size : Optional[int]=0,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.from_file(filename : str,
shared : Optional[bool],
size : Optional[int]=0,
out : Tensor) -> Tensor
torch.full(size : List[int],
fill_value : number,
names : Optional[List[str]],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.full(size : List[int],
fill_value : number,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.full(size : List[int],
fill_value : number,
names : Optional[List[str]],
out : Tensor) -> Tensor
torch.full(size : List[int],
fill_value : number,
out : Tensor) -> Tensor
torch.full_like(self : Tensor,
fill_value : number,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.full_like(self : Tensor,
fill_value : number,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.fused_moving_avg_obs_fake_quant(self : Tensor,
observer_on : Tensor,
fake_quant_on : Tensor,
running_min : Tensor,
running_max : Tensor,
scale : Tensor,
zero_point : Tensor,
averaging_const : float,
quant_min : int,
quant_max : int,
ch_axis : int,
per_row_fake_quant : bool=False,
symmetric_quant : bool=False) -> Tensor
torch.gather(self : Tensor,
dim : int,
index : Tensor,
sparse_grad : bool=False) -> Tensor
torch.gather(self : Tensor,
dim : int,
index : Tensor,
sparse_grad : bool=False,
out : Tensor) -> Tensor
torch.gather(self : Tensor,
dim : str,
index : Tensor,
sparse_grad : bool=False) -> Tensor
torch.gather(self : Tensor,
dim : str,
index : Tensor,
sparse_grad : bool=False,
out : Tensor) -> Tensor
torch.gcd(self : Tensor,
other : Tensor) -> Tensor
torch.gcd(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.gcd(a : int,
b : int) -> int
torch.gcd_(self : Tensor,
other : Tensor) -> Tensor
torch.ge(self : Tensor,
other : Tensor) -> Tensor
torch.ge(self : Tensor,
other : number) -> Tensor
torch.ge(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.ge(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.ge(a : int,
b : int) -> bool
torch.ge(a : float,
b : float) -> bool
torch.ge(a : int,
b : float) -> bool
torch.ge(a : float,
b : int) -> bool
torch.ge(a : number,
b : number) -> bool
torch.ge(a : str,
b : str) -> bool
torch.geqrf(self : Tensor) -> Tuple[Tensor, Tensor]
torch.geqrf(self : Tensor,
a : Tensor,
tau : Tensor) -> Tuple[Tensor, Tensor]
torch.ger(self : Tensor,
vec2 : Tensor) -> Tensor
torch.ger(self : Tensor,
vec2 : Tensor,
out : Tensor) -> Tensor
torch.get_autocast_dtype(device_type : str) -> int
torch.get_device(self : Tensor) -> int
torch.gradient(self : Tensor,
spacing : Optional[number],
dim : Optional[int],
edge_order : int=1) -> List[Tensor]
torch.gradient(self : Tensor,
spacing : number,
dim : List[int],
edge_order : int=1) -> List[Tensor]
torch.gradient(self : Tensor,
dim : List[int],
edge_order : int=1) -> List[Tensor]
torch.gradient(self : Tensor,
spacing : List[number],
dim : Optional[int],
edge_order : int=1) -> List[Tensor]
torch.gradient(self : Tensor,
spacing : List[number],
dim : List[int],
edge_order : int=1) -> List[Tensor]
torch.gradient(self : Tensor,
spacing : List[Tensor],
dim : Optional[int],
edge_order : int=1) -> List[Tensor]
torch.gradient(self : Tensor,
spacing : List[Tensor],
dim : List[int],
edge_order : int=1) -> List[Tensor]
torch.greater(self : Tensor,
other : Tensor) -> Tensor
torch.greater(self : Tensor,
other : number) -> Tensor
torch.greater(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.greater(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.greater_equal(self : Tensor,
other : Tensor) -> Tensor
torch.greater_equal(self : Tensor,
other : number) -> Tensor
torch.greater_equal(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.greater_equal(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.grid_sampler(input : Tensor,
grid : Tensor,
interpolation_mode : int,
padding_mode : int,
align_corners : bool) -> Tensor
torch.grid_sampler_2d(input : Tensor,
grid : Tensor,
interpolation_mode : int,
padding_mode : int,
align_corners : bool) -> Tensor
torch.grid_sampler_2d(input : Tensor,
grid : Tensor,
interpolation_mode : int,
padding_mode : int,
align_corners : bool,
out : Tensor) -> Tensor
torch.grid_sampler_3d(input : Tensor,
grid : Tensor,
interpolation_mode : int,
padding_mode : int,
align_corners : bool) -> Tensor
torch.grid_sampler_3d(input : Tensor,
grid : Tensor,
interpolation_mode : int,
padding_mode : int,
align_corners : bool,
out : Tensor) -> Tensor
torch.group_norm(input : Tensor,
num_groups : int,
weight : Optional[Tensor],
bias : Optional[Tensor],
eps : float=1e-05,
cudnn_enabled : bool=True) -> Tensor
torch.gru(input : Tensor,
hx : Tensor,
params : List[Tensor],
has_biases : bool,
num_layers : int,
dropout : float,
train : bool,
bidirectional : bool,
batch_first : bool) -> Tuple[Tensor, Tensor]
torch.gru(data : Tensor,
batch_sizes : Tensor,
hx : Tensor,
params : List[Tensor],
has_biases : bool,
num_layers : int,
dropout : float,
train : bool,
bidirectional : bool) -> Tuple[Tensor, Tensor]
torch.gru_cell(input : Tensor,
hx : Tensor,
w_ih : Tensor,
w_hh : Tensor,
b_ih : Optional[Tensor],
b_hh : Optional[Tensor]) -> Tensor
torch.gt(self : Tensor,
other : Tensor) -> Tensor
torch.gt(self : Tensor,
other : number) -> Tensor
torch.gt(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.gt(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.gt(a : int,
b : int) -> bool
torch.gt(a : float,
b : float) -> bool
torch.gt(a : int,
b : float) -> bool
torch.gt(a : float,
b : int) -> bool
torch.gt(a : number,
b : number) -> bool
torch.gt(a : str,
b : str) -> bool
torch.hamming_window(window_length : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.hamming_window(window_length : int,
periodic : bool,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.hamming_window(window_length : int,
periodic : bool,
alpha : float,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.hamming_window(window_length : int,
periodic : bool,
alpha : float,
beta : float,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.hamming_window(window_length : int,
out : Tensor) -> Tensor
torch.hamming_window(window_length : int,
periodic : bool,
out : Tensor) -> Tensor
torch.hamming_window(window_length : int,
periodic : bool,
alpha : float,
out : Tensor) -> Tensor
torch.hamming_window(window_length : int,
periodic : bool,
alpha : float,
beta : float,
out : Tensor) -> Tensor
torch.hann_window(window_length : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.hann_window(window_length : int,
periodic : bool,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.hann_window(window_length : int,
out : Tensor) -> Tensor
torch.hann_window(window_length : int,
periodic : bool,
out : Tensor) -> Tensor
torch.hardshrink(self : Tensor,
lambd : number=0.5) -> Tensor
torch.hardshrink(self : Tensor,
lambd : number=0.5,
out : Tensor) -> Tensor
torch.heaviside(self : Tensor,
values : Tensor) -> Tensor
torch.heaviside(self : Tensor,
values : Tensor,
out : Tensor) -> Tensor
torch.hinge_embedding_loss(self : Tensor,
target : Tensor,
margin : float=1.0,
reduction : int=1) -> Tensor
torch.histc(self : Tensor,
bins : int=100,
min : number=0,
max : number=0) -> Tensor
torch.histc(self : Tensor,
bins : int=100,
min : number=0,
max : number=0,
out : Tensor) -> Tensor
torch.histogram(self : Tensor,
bins : Tensor,
weight : Optional[Tensor],
density : bool=False) -> Tuple[Tensor, Tensor]
torch.histogram(self : Tensor,
bins : Tensor,
weight : Optional[Tensor],
density : bool=False,
hist : Tensor,
bin_edges : Tensor) -> Tuple[Tensor, Tensor]
torch.histogram(self : Tensor,
bins : int=100,
range : Optional[List[float]],
weight : Optional[Tensor],
density : bool=False) -> Tuple[Tensor, Tensor]
torch.histogram(self : Tensor,
bins : int=100,
range : Optional[List[float]],
weight : Optional[Tensor],
density : bool=False,
hist : Tensor,
bin_edges : Tensor) -> Tuple[Tensor, Tensor]
torch.histogramdd(self : Tensor,
bins : List[int],
range : Optional[List[float]],
weight : Optional[Tensor],
density : bool=False) -> Tuple[Tensor, List[Tensor]]
torch.histogramdd(self : Tensor,
bins : int,
range : Optional[List[float]],
weight : Optional[Tensor],
density : bool=False) -> Tuple[Tensor, List[Tensor]]
torch.histogramdd(self : Tensor,
bins : List[Tensor],
range : Optional[List[float]],
weight : Optional[Tensor],
density : bool=False) -> Tuple[Tensor, List[Tensor]]
torch.hsplit(self : Tensor,
sections : int) -> List[Tensor]
torch.hsplit(self : Tensor,
indices : List[int]) -> List[Tensor]
torch.hspmm(mat1 : Tensor,
mat2 : Tensor,
out : Tensor) -> Tensor
torch.hspmm(mat1 : Tensor,
mat2 : Tensor) -> Tensor
torch.hstack(tensors : List[Tensor]) -> Tensor
torch.hstack(tensors : List[Tensor],
out : Tensor) -> Tensor
torch.hypot(self : Tensor,
other : Tensor) -> Tensor
torch.hypot(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.i0(self : Tensor) -> Tensor
torch.i0(self : Tensor,
out : Tensor) -> Tensor
torch.i0_(self : Tensor) -> Tensor
torch.igamma(self : Tensor,
other : Tensor) -> Tensor
torch.igamma(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.igammac(self : Tensor,
other : Tensor) -> Tensor
torch.igammac(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.imag(self : Tensor) -> Tensor
torch.index_add(self : Tensor,
dim : int,
index : Tensor,
source : Tensor,
alpha : number=1) -> Tensor
torch.index_add(self : Tensor,
dim : int,
index : Tensor,
source : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
torch.index_add(self : Tensor,
dim : str,
index : Tensor,
source : Tensor,
alpha : number=1) -> Tensor
torch.index_copy(self : Tensor,
dim : int,
index : Tensor,
source : Tensor) -> Tensor
torch.index_copy(self : Tensor,
dim : str,
index : Tensor,
source : Tensor) -> Tensor
torch.index_copy(self : Tensor,
dim : int,
index : Tensor,
source : Tensor,
out : Tensor) -> Tensor
torch.index_fill(self : Tensor,
dim : int,
index : Tensor,
value : Tensor) -> Tensor
torch.index_fill(self : Tensor,
dim : int,
index : Tensor,
value : number) -> Tensor
torch.index_fill(self : Tensor,
dim : str,
index : Tensor,
value : number) -> Tensor
torch.index_fill(self : Tensor,
dim : str,
index : Tensor,
value : Tensor) -> Tensor
torch.index_fill(self : Tensor,
dim : int,
index : Tensor,
value : number,
out : Tensor) -> Tensor
torch.index_fill(self : Tensor,
dim : int,
index : Tensor,
value : Tensor,
out : Tensor) -> Tensor
torch.index_put(self : Tensor,
indices : List[Optional[Tensor]],
values : Tensor,
accumulate : bool=False) -> Tensor
torch.index_put(self : Tensor,
indices : List[Optional[Tensor]],
values : Tensor,
accumulate : bool=False,
out : Tensor) -> Tensor
torch.index_put(self : Tensor,
indices : List[Tensor],
values : Tensor,
accumulate : bool=False) -> Tensor
torch.index_put_(self : Tensor,
indices : List[Optional[Tensor]],
values : Tensor,
accumulate : bool=False) -> Tensor
torch.index_put_(self : Tensor,
indices : List[Tensor],
values : Tensor,
accumulate : bool=False) -> Tensor
torch.index_reduce(self : Tensor,
dim : int,
index : Tensor,
source : Tensor,
reduce : str,
include_self : bool=True) -> Tensor
torch.index_reduce(self : Tensor,
dim : int,
index : Tensor,
source : Tensor,
reduce : str,
include_self : bool=True,
out : Tensor) -> Tensor
torch.index_select(self : Tensor,
dim : int,
index : Tensor) -> Tensor
torch.index_select(self : Tensor,
dim : int,
index : Tensor,
out : Tensor) -> Tensor
torch.index_select(self : Tensor,
dim : str,
index : Tensor) -> Tensor
torch.index_select(self : Tensor,
dim : str,
index : Tensor,
out : Tensor) -> Tensor
torch.indices_copy(self : Tensor,
out : Tensor) -> Tensor
torch.indices_copy(self : Tensor) -> Tensor
torch.initial_seed(self : Generator) -> int
torch.inner(self : Tensor,
other : Tensor) -> Tensor
torch.inner(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.instance_norm(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
use_input_stats : bool,
momentum : float,
eps : float,
cudnn_enabled : bool) -> Tensor
torch.int_repr(self : Tensor,
out : Tensor) -> Tensor
torch.int_repr(self : Tensor) -> Tensor
torch.inverse(self : Tensor) -> Tensor
torch.inverse(self : Tensor,
out : Tensor) -> Tensor
torch.is_autocast_cpu_enabled() -> bool
torch.is_autocast_enabled() -> bool
torch.is_complex(self : Tensor) -> bool
torch.is_conj(self : Tensor) -> bool
torch.is_distributed(self : Tensor) -> bool
torch.is_floating_point(self : Tensor) -> bool
torch.is_grad_enabled() -> bool
torch.is_inference(self : Tensor) -> bool
torch.is_neg(self : Tensor) -> bool
torch.is_nonzero(self : Tensor) -> bool
torch.is_same_size(self : Tensor,
other : Tensor) -> bool
torch.is_signed(self : Tensor) -> bool
torch.is_vulkan_available() -> bool
torch.isclose(self : Tensor,
other : Tensor,
rtol : float=1e-05,
atol : float=1e-08,
equal_nan : bool=False) -> Tensor
torch.isfinite(self : Tensor) -> Tensor
torch.isfinite(a : float) -> bool
torch.isfinite(a : complex) -> bool
torch.isin(elements : Tensor,
test_elements : Tensor,
assume_unique : bool=False,
invert : bool=False) -> Tensor
torch.isin(elements : Tensor,
test_elements : Tensor,
assume_unique : bool=False,
invert : bool=False,
out : Tensor) -> Tensor
torch.isin(elements : Tensor,
test_element : number,
assume_unique : bool=False,
invert : bool=False) -> Tensor
torch.isin(elements : Tensor,
test_element : number,
assume_unique : bool=False,
invert : bool=False,
out : Tensor) -> Tensor
torch.isin(element : number,
test_elements : Tensor,
assume_unique : bool=False,
invert : bool=False) -> Tensor
torch.isin(element : number,
test_elements : Tensor,
assume_unique : bool=False,
invert : bool=False,
out : Tensor) -> Tensor
torch.isinf(self : Tensor) -> Tensor
torch.isinf(self : Tensor,
out : Tensor) -> Tensor
torch.isinf(a : float) -> bool
torch.isinf(a : complex) -> bool
torch.isnan(self : Tensor) -> Tensor
torch.isnan(self : Tensor,
out : Tensor) -> Tensor
torch.isnan(a : float) -> bool
torch.isnan(a : complex) -> bool
torch.isneginf(self : Tensor) -> Tensor
torch.isneginf(self : Tensor,
out : Tensor) -> Tensor
torch.isposinf(self : Tensor) -> Tensor
torch.isposinf(self : Tensor,
out : Tensor) -> Tensor
torch.isreal(self : Tensor) -> Tensor
torch.istft(self : Tensor,
n_fft : int,
hop_length : Optional[int],
win_length : Optional[int],
window : Optional[Tensor],
center : bool=True,
normalized : bool=False,
onesided : Optional[bool],
length : Optional[int],
return_complex : bool=False) -> Tensor
torch.kaiser_window(window_length : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.kaiser_window(window_length : int,
periodic : bool,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.kaiser_window(window_length : int,
periodic : bool,
beta : float,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.kaiser_window(window_length : int,
out : Tensor) -> Tensor
torch.kaiser_window(window_length : int,
periodic : bool,
out : Tensor) -> Tensor
torch.kaiser_window(window_length : int,
periodic : bool,
beta : float,
out : Tensor) -> Tensor
torch.kl_div(self : Tensor,
target : Tensor,
reduction : int=1,
log_target : bool=False) -> Tensor
torch.kron(self : Tensor,
other : Tensor) -> Tensor
torch.kron(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.kthvalue(self : Tensor,
k : int,
dim : int=-1,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.kthvalue(self : Tensor,
k : int,
dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.kthvalue(self : Tensor,
k : int,
dim : str,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.kthvalue(self : Tensor,
k : int,
dim : int=-1,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.layer_norm(input : Tensor,
normalized_shape : List[int],
weight : Optional[Tensor],
bias : Optional[Tensor],
eps : float=1e-05,
cudnn_enable : bool=True) -> Tensor
torch.lcm(self : Tensor,
other : Tensor) -> Tensor
torch.lcm(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.lcm_(self : Tensor,
other : Tensor) -> Tensor
torch.ldexp(self : Tensor,
other : Tensor) -> Tensor
torch.ldexp(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.ldexp(x : float,
i : int) -> float
torch.ldexp_(self : Tensor,
other : Tensor) -> Tensor
torch.le(self : Tensor,
other : Tensor) -> Tensor
torch.le(self : Tensor,
other : number) -> Tensor
torch.le(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.le(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.le(a : int,
b : int) -> bool
torch.le(a : float,
b : float) -> bool
torch.le(a : int,
b : float) -> bool
torch.le(a : float,
b : int) -> bool
torch.le(a : number,
b : number) -> bool
torch.le(a : str,
b : str) -> bool
torch.lerp(self : Tensor,
end : Tensor,
weight : number) -> Tensor
torch.lerp(self : Tensor,
end : Tensor,
weight : Tensor) -> Tensor
torch.lerp(self : Tensor,
end : Tensor,
weight : number,
out : Tensor) -> Tensor
torch.lerp(self : Tensor,
end : Tensor,
weight : Tensor,
out : Tensor) -> Tensor
torch.less(self : Tensor,
other : Tensor) -> Tensor
torch.less(self : Tensor,
other : number) -> Tensor
torch.less(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.less(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.less_equal(self : Tensor,
other : Tensor) -> Tensor
torch.less_equal(self : Tensor,
other : number) -> Tensor
torch.less_equal(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.less_equal(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.lgamma(self : Tensor) -> Tensor
torch.lgamma(self : Tensor,
out : Tensor) -> Tensor
torch.lgamma(a : int) -> float
torch.lgamma(a : float) -> float
torch.lgamma(a : number) -> number
torch.linspace(start : Tensor,
end : Tensor,
steps : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.linspace(start : Tensor,
end : number,
steps : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.linspace(start : number,
end : Tensor,
steps : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.linspace(start : number,
end : number,
steps : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.linspace(start : number,
end : number,
steps : int,
out : Tensor) -> Tensor
torch.linspace(start : Tensor,
end : Tensor,
steps : int,
out : Tensor) -> Tensor
torch.linspace(start : Tensor,
end : number,
steps : int,
out : Tensor) -> Tensor
torch.linspace(start : number,
end : Tensor,
steps : int,
out : Tensor) -> Tensor
torch.log(self : Tensor) -> Tensor
torch.log(self : Tensor,
out : Tensor) -> Tensor
torch.log(a : int) -> float
torch.log(a : float) -> float
torch.log(a : complex) -> complex
torch.log(a : number) -> number
torch.log(a : int,
b : int) -> float
torch.log(a : float,
b : float) -> float
torch.log(a : complex,
b : complex) -> complex
torch.log(a : int,
b : float) -> float
torch.log(a : float,
b : int) -> float
torch.log(a : int,
b : complex) -> complex
torch.log(a : complex,
b : int) -> complex
torch.log(a : float,
b : complex) -> complex
torch.log(a : complex,
b : float) -> complex
torch.log(a : number,
b : number) -> float
torch.log10(self : Tensor) -> Tensor
torch.log10(self : Tensor,
out : Tensor) -> Tensor
torch.log10(a : int) -> float
torch.log10(a : float) -> float
torch.log10(a : complex) -> complex
torch.log10(a : number) -> number
torch.log10_(self : Tensor) -> Tensor
torch.log1p(self : Tensor) -> Tensor
torch.log1p(self : Tensor,
out : Tensor) -> Tensor
torch.log1p(a : int) -> float
torch.log1p(a : float) -> float
torch.log1p(a : number) -> number
torch.log1p_(self : Tensor) -> Tensor
torch.log2(self : Tensor) -> Tensor
torch.log2(self : Tensor,
out : Tensor) -> Tensor
torch.log2_(self : Tensor) -> Tensor
torch.log_(self : Tensor) -> Tensor
torch.log_softmax(self : Tensor,
dim : int,
dtype : Optional[int]) -> Tensor
torch.log_softmax(self : Tensor,
dim : str,
dtype : Optional[int]) -> Tensor
torch.log_softmax(self : Tensor,
dim : int,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.logaddexp(self : Tensor,
other : Tensor) -> Tensor
torch.logaddexp(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.logaddexp2(self : Tensor,
other : Tensor) -> Tensor
torch.logaddexp2(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.logcumsumexp(self : Tensor,
dim : int) -> Tensor
torch.logcumsumexp(self : Tensor,
dim : str) -> Tensor
torch.logcumsumexp(self : Tensor,
dim : str,
out : Tensor) -> Tensor
torch.logcumsumexp(self : Tensor,
dim : int,
out : Tensor) -> Tensor
torch.logdet(self : Tensor) -> Tensor
torch.logical_and(self : Tensor,
other : Tensor) -> Tensor
torch.logical_and(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.logical_not(self : Tensor) -> Tensor
torch.logical_not(self : Tensor,
out : Tensor) -> Tensor
torch.logical_or(self : Tensor,
other : Tensor) -> Tensor
torch.logical_or(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.logical_xor(self : Tensor,
other : Tensor) -> Tensor
torch.logical_xor(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.logit(self : Tensor,
eps : Optional[float]) -> Tensor
torch.logit(self : Tensor,
eps : Optional[float],
out : Tensor) -> Tensor
torch.logit_(self : Tensor,
eps : Optional[float]) -> Tensor
torch.logspace(start : Tensor,
end : Tensor,
steps : int,
base : float=10.0,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.logspace(start : Tensor,
end : number,
steps : int,
base : float=10.0,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.logspace(start : number,
end : Tensor,
steps : int,
base : float=10.0,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.logspace(start : number,
end : number,
steps : int,
base : float=10.0,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.logspace(start : number,
end : number,
steps : int,
base : float=10.0,
out : Tensor) -> Tensor
torch.logspace(start : Tensor,
end : Tensor,
steps : int,
base : float=10.0,
out : Tensor) -> Tensor
torch.logspace(start : Tensor,
end : number,
steps : int,
base : float=10.0,
out : Tensor) -> Tensor
torch.logspace(start : number,
end : Tensor,
steps : int,
base : float=10.0,
out : Tensor) -> Tensor
torch.logsumexp(self : Tensor,
dim : List[int],
keepdim : bool=False) -> Tensor
torch.logsumexp(self : Tensor,
dim : List[str],
keepdim : bool=False) -> Tensor
torch.logsumexp(self : Tensor,
dim : List[str],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.logsumexp(self : Tensor,
dim : List[int],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.lstm(input : Tensor,
hx : List[Tensor],
params : List[Tensor],
has_biases : bool,
num_layers : int,
dropout : float,
train : bool,
bidirectional : bool,
batch_first : bool) -> Tuple[Tensor, Tensor, Tensor]
torch.lstm(data : Tensor,
batch_sizes : Tensor,
hx : List[Tensor],
params : List[Tensor],
has_biases : bool,
num_layers : int,
dropout : float,
train : bool,
bidirectional : bool) -> Tuple[Tensor, Tensor, Tensor]
torch.lstm_cell(input : Tensor,
hx : List[Tensor],
w_ih : Tensor,
w_hh : Tensor,
b_ih : Optional[Tensor],
b_hh : Optional[Tensor]) -> Tuple[Tensor, Tensor]
torch.lt(self : Tensor,
other : Tensor) -> Tensor
torch.lt(self : Tensor,
other : number) -> Tensor
torch.lt(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.lt(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.lt(a : int,
b : int) -> bool
torch.lt(a : float,
b : float) -> bool
torch.lt(a : int,
b : float) -> bool
torch.lt(a : float,
b : int) -> bool
torch.lt(a : number,
b : number) -> bool
torch.lt(a : str,
b : str) -> bool
torch.lu_solve(self : Tensor,
LU_data : Tensor,
LU_pivots : Tensor) -> Tensor
torch.lu_solve(self : Tensor,
LU_data : Tensor,
LU_pivots : Tensor,
out : Tensor) -> Tensor
torch.lu_unpack(LU_data : Tensor,
LU_pivots : Tensor,
unpack_data : bool=True,
unpack_pivots : bool=True) -> Tuple[Tensor, Tensor, Tensor]
torch.lu_unpack(LU_data : Tensor,
LU_pivots : Tensor,
unpack_data : bool=True,
unpack_pivots : bool=True,
P : Tensor,
L : Tensor,
U : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch.manual_seed(seed : int) -> Tuple[]
torch.manual_seed(self : Generator,
seed : int) -> Generator
torch.margin_ranking_loss(input1 : Tensor,
input2 : Tensor,
target : Tensor,
margin : float=0.0,
reduction : int=1) -> Tensor
torch.masked_fill(self : Tensor,
mask : Tensor,
value : number) -> Tensor
torch.masked_fill(self : Tensor,
mask : Tensor,
value : Tensor) -> Tensor
torch.masked_fill(self : Tensor,
mask : Tensor,
value : number,
out : Tensor) -> Tensor
torch.masked_fill(self : Tensor,
mask : Tensor,
value : Tensor,
out : Tensor) -> Tensor
torch.masked_scatter(self : Tensor,
mask : Tensor,
source : Tensor) -> Tensor
torch.masked_scatter(self : Tensor,
mask : Tensor,
source : Tensor,
out : Tensor) -> Tensor
torch.masked_select(self : Tensor,
mask : Tensor) -> Tensor
torch.masked_select(self : Tensor,
mask : Tensor,
out : Tensor) -> Tensor
torch.matmul(self : Tensor,
other : Tensor) -> Tensor
torch.matmul(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.matrix_exp(self : Tensor) -> Tensor
torch.matrix_power(self : Tensor,
n : int) -> Tensor
torch.matrix_power(self : Tensor,
n : int,
out : Tensor) -> Tensor
torch.max(self : Tensor,
other : Tensor) -> Tensor
torch.max(self : Tensor) -> Tensor
torch.max(self : Tensor,
dim : int,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.max(self : Tensor,
dim : int,
keepdim : bool=False,
max : Tensor,
max_values : Tensor) -> Tuple[Tensor, Tensor]
torch.max(self : Tensor,
dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.max(self : Tensor,
dim : str,
keepdim : bool=False,
max : Tensor,
max_values : Tensor) -> Tuple[Tensor, Tensor]
torch.max(self : Tensor,
out : Tensor) -> Tensor
torch.max(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.max_pool1d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0],
dilation : List[int]=[1],
ceil_mode : bool=False) -> Tensor
torch.max_pool1d_with_indices(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0],
dilation : List[int]=[1],
ceil_mode : bool=False) -> Tuple[Tensor, Tensor]
torch.max_pool2d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
ceil_mode : bool=False) -> Tensor
torch.max_pool3d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
ceil_mode : bool=False) -> Tensor
torch.maximum(self : Tensor,
other : Tensor) -> Tensor
torch.maximum(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.mean(self : Tensor,
dtype : Optional[int]) -> Tensor
torch.mean(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch.mean(self : Tensor,
dim : List[str],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch.mean(self : Tensor,
dim : List[str],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.mean(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.mean(self : Tensor,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.median(self : Tensor) -> Tensor
torch.median(self : Tensor,
dim : int,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.median(self : Tensor,
dim : int,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.median(self : Tensor,
dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.median(self : Tensor,
dim : str,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.median(self : Tensor,
out : Tensor) -> Tensor
torch.meshgrid(tensors : List[Tensor]) -> List[Tensor]
torch.meshgrid(tensors : List[Tensor],
indexing : str) -> List[Tensor]
torch.min(self : Tensor,
other : Tensor) -> Tensor
torch.min(self : Tensor) -> Tensor
torch.min(self : Tensor,
dim : int,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.min(self : Tensor,
dim : int,
keepdim : bool=False,
min : Tensor,
min_indices : Tensor) -> Tuple[Tensor, Tensor]
torch.min(self : Tensor,
dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.min(self : Tensor,
dim : str,
keepdim : bool=False,
min : Tensor,
min_indices : Tensor) -> Tuple[Tensor, Tensor]
torch.min(self : Tensor,
out : Tensor) -> Tensor
torch.min(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.minimum(self : Tensor,
other : Tensor) -> Tensor
torch.minimum(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.miopen_batch_norm(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
training : bool,
exponential_average_factor : float,
epsilon : float) -> Tuple[Tensor, Tensor, Tensor]
torch.miopen_batch_norm(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
training : bool,
exponential_average_factor : float,
epsilon : float,
out0 : Tensor,
out1 : Tensor,
out2 : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch.miopen_convolution(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool,
out : Tensor) -> Tensor
torch.miopen_convolution(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool) -> Tensor
torch.miopen_convolution_add_relu(self : Tensor,
weight : Tensor,
z : Tensor,
alpha : Optional[number],
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
groups : int) -> Tensor
torch.miopen_convolution_relu(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
groups : int) -> Tensor
torch.miopen_convolution_transpose(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
padding : List[int],
output_padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool,
out : Tensor) -> Tensor
torch.miopen_convolution_transpose(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
padding : List[int],
output_padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool) -> Tensor
torch.miopen_depthwise_convolution(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool,
out : Tensor) -> Tensor
torch.miopen_depthwise_convolution(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
benchmark : bool,
deterministic : bool) -> Tensor
torch.miopen_rnn(input : Tensor,
weight : List[Tensor],
weight_stride0 : int,
hx : Tensor,
cx : Optional[Tensor],
mode : int,
hidden_size : int,
num_layers : int,
batch_first : bool,
dropout : float,
train : bool,
bidirectional : bool,
batch_sizes : List[int],
dropout_state : Optional[Tensor],
out0 : Tensor,
out1 : Tensor,
out2 : Tensor,
out3 : Tensor,
out4 : Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
torch.miopen_rnn(input : Tensor,
weight : List[Tensor],
weight_stride0 : int,
hx : Tensor,
cx : Optional[Tensor],
mode : int,
hidden_size : int,
num_layers : int,
batch_first : bool,
dropout : float,
train : bool,
bidirectional : bool,
batch_sizes : List[int],
dropout_state : Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]
torch.mkldnn_adaptive_avg_pool2d(self : Tensor,
output_size : List[int],
out : Tensor) -> Tensor
torch.mkldnn_adaptive_avg_pool2d(self : Tensor,
output_size : List[int]) -> Tensor
torch.mkldnn_convolution(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
padding : List[int],
stride : List[int],
dilation : List[int],
groups : int) -> Tensor
torch.mkldnn_convolution(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
padding : List[int],
stride : List[int],
dilation : List[int],
groups : int,
out : Tensor) -> Tensor
torch.mkldnn_linear_backward_weights(grad_output : Tensor,
input : Tensor,
weight : Tensor,
bias_defined : bool,
out0 : Tensor,
out1 : Tensor) -> Tuple[Tensor, Tensor]
torch.mkldnn_linear_backward_weights(grad_output : Tensor,
input : Tensor,
weight : Tensor,
bias_defined : bool) -> Tuple[Tensor, Tensor]
torch.mkldnn_max_pool2d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
ceil_mode : bool=False,
out : Tensor) -> Tensor
torch.mkldnn_max_pool2d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
ceil_mode : bool=False) -> Tensor
torch.mkldnn_max_pool3d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
ceil_mode : bool=False,
out : Tensor) -> Tensor
torch.mkldnn_max_pool3d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
ceil_mode : bool=False) -> Tensor
torch.mkldnn_rnn_layer(input : Tensor,
weight0 : Tensor,
weight1 : Tensor,
weight2 : Tensor,
weight3 : Tensor,
hx_ : Tensor,
cx_ : Tensor,
reverse : bool,
batch_sizes : List[int],
mode : int,
hidden_size : int,
num_layers : int,
has_biases : bool,
bidirectional : bool,
batch_first : bool,
train : bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch.mkldnn_rnn_layer(input : Tensor,
weight0 : Tensor,
weight1 : Tensor,
weight2 : Tensor,
weight3 : Tensor,
hx_ : Tensor,
cx_ : Tensor,
reverse : bool,
batch_sizes : List[int],
mode : int,
hidden_size : int,
num_layers : int,
has_biases : bool,
bidirectional : bool,
batch_first : bool,
train : bool,
out0 : Tensor,
out1 : Tensor,
out2 : Tensor,
out3 : Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch.mm(self : Tensor,
mat2 : Tensor) -> Tensor
torch.mm(self : Tensor,
mat2 : Tensor,
out : Tensor) -> Tensor
torch.mode(self : Tensor,
dim : int=-1,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.mode(self : Tensor,
dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.mode(self : Tensor,
dim : str,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.mode(self : Tensor,
dim : int=-1,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.moveaxis(self : Tensor,
source : List[int],
destination : List[int]) -> Tensor
torch.moveaxis(self : Tensor,
source : int,
destination : int) -> Tensor
torch.movedim(self : Tensor,
source : int,
destination : int) -> Tensor
torch.movedim(self : Tensor,
source : List[int],
destination : List[int]) -> Tensor
torch.msort(self : Tensor) -> Tensor
torch.msort(self : Tensor,
out : Tensor) -> Tensor
torch.mul(self : Tensor,
other : Tensor) -> Tensor
torch.mul(self : Tensor,
other : number) -> Tensor
torch.mul(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.mul(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.mul(l : List[t],
n : int) -> List[t]
torch.mul(n : int,
l : List[t]) -> List[t]
torch.mul(a : int,
b : int) -> int
torch.mul(a : complex,
b : complex) -> complex
torch.mul(a : float,
b : float) -> float
torch.mul(a : int,
b : complex) -> complex
torch.mul(a : complex,
b : int) -> complex
torch.mul(a : float,
b : complex) -> complex
torch.mul(a : complex,
b : float) -> complex
torch.mul(a : int,
b : float) -> float
torch.mul(a : float,
b : int) -> float
torch.mul(a : number,
b : number) -> number
torch.multinomial(self : Tensor,
num_samples : int,
replacement : bool=False,
generator : Optional[Generator]) -> Tensor
torch.multinomial(self : Tensor,
num_samples : int,
replacement : bool=False,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.multiply(self : Tensor,
other : Tensor) -> Tensor
torch.multiply(self : Tensor,
other : number) -> Tensor
torch.multiply(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.mv(self : Tensor,
vec : Tensor) -> Tensor
torch.mv(self : Tensor,
vec : Tensor,
out : Tensor) -> Tensor
torch.mvlgamma(self : Tensor,
p : int) -> Tensor
torch.mvlgamma(self : Tensor,
p : int,
out : Tensor) -> Tensor
torch.nan_to_num(self : Tensor,
nan : Optional[float],
posinf : Optional[float],
neginf : Optional[float]) -> Tensor
torch.nan_to_num(self : Tensor,
nan : Optional[float],
posinf : Optional[float],
neginf : Optional[float],
out : Tensor) -> Tensor
torch.nan_to_num_(self : Tensor,
nan : Optional[float],
posinf : Optional[float],
neginf : Optional[float]) -> Tensor
torch.nanmean(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch.nanmean(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.nanmedian(self : Tensor) -> Tensor
torch.nanmedian(self : Tensor,
dim : int,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.nanmedian(self : Tensor,
dim : int,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.nanmedian(self : Tensor,
dim : str,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.nanmedian(self : Tensor,
dim : str,
keepdim : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.nanmedian(self : Tensor,
out : Tensor) -> Tensor
torch.nanquantile(self : Tensor,
q : Tensor,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear) -> Tensor
torch.nanquantile(self : Tensor,
q : float,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear) -> Tensor
torch.nanquantile(self : Tensor,
q : Tensor,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear,
out : Tensor) -> Tensor
torch.nanquantile(self : Tensor,
q : float,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear,
out : Tensor) -> Tensor
torch.nansum(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch.nansum(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.narrow(self : Tensor,
dim : int,
start : int,
length : int) -> Tensor
torch.narrow(self : Tensor,
dim : int,
start : Tensor,
length : int) -> Tensor
torch.narrow_copy(self : Tensor,
dim : int,
start : int,
length : int) -> Tensor
torch.narrow_copy(self : Tensor,
dim : int,
start : int,
length : int,
out : Tensor) -> Tensor
torch.native_batch_norm(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
training : bool,
momentum : float,
eps : float) -> Tuple[Tensor, Tensor, Tensor]
torch.native_batch_norm(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
running_mean : Optional[Tensor],
running_var : Optional[Tensor],
training : bool,
momentum : float,
eps : float,
out : Tensor,
save_mean : Tensor,
save_invstd : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch.native_channel_shuffle(self : Tensor,
groups : int) -> Tensor
torch.native_dropout(input : Tensor,
p : float,
train : Optional[bool]) -> Tuple[Tensor, Tensor]
torch.native_dropout(input : Tensor,
p : float,
train : Optional[bool],
out0 : Tensor,
out1 : Tensor) -> Tuple[Tensor, Tensor]
torch.native_group_norm(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
N : int,
C : int,
HxW : int,
group : int,
eps : float) -> Tuple[Tensor, Tensor, Tensor]
torch.native_group_norm(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
N : int,
C : int,
HxW : int,
group : int,
eps : float,
out0 : Tensor,
out1 : Tensor,
out2 : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch.native_layer_norm(input : Tensor,
normalized_shape : List[int],
weight : Optional[Tensor],
bias : Optional[Tensor],
eps : float) -> Tuple[Tensor, Tensor, Tensor]
torch.native_layer_norm(input : Tensor,
normalized_shape : List[int],
weight : Optional[Tensor],
bias : Optional[Tensor],
eps : float,
out0 : Tensor,
out1 : Tensor,
out2 : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch.native_norm(self : Tensor,
p : number=2,
out : Tensor) -> Tensor
torch.native_norm(self : Tensor,
p : Optional[number],
dim : List[int],
keepdim : bool,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.native_norm(self : Tensor,
p : number=2) -> Tensor
torch.native_norm(self : Tensor,
p : Optional[number],
dim : List[int],
keepdim : bool,
dtype : Optional[int]) -> Tensor
torch.ne(self : Tensor,
other : Tensor) -> Tensor
torch.ne(self : Tensor,
other : number) -> Tensor
torch.ne(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.ne(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.ne(a : List[int],
b : List[int]) -> bool
torch.ne(a : Device,
b : Device) -> bool
torch.ne(a : bool,
b : bool) -> bool
torch.ne(a : AnyEnumType,
b : AnyEnumType) -> bool
torch.ne(a : int,
b : int) -> bool
torch.ne(a : complex,
b : complex) -> bool
torch.ne(a : float,
b : float) -> bool
torch.ne(a : int,
b : float) -> bool
torch.ne(a : float,
b : int) -> bool
torch.ne(a : float,
b : complex) -> bool
torch.ne(a : complex,
b : float) -> bool
torch.ne(a : number,
b : number) -> bool
torch.ne(a : str,
b : str) -> bool
torch.ne(a : List[float],
b : List[float]) -> bool
torch.ne(a : List[Tensor],
b : List[Tensor]) -> bool
torch.ne(a : List[bool],
b : List[bool]) -> bool
torch.ne(a : List[str],
b : List[str]) -> bool
torch.neg(self : Tensor) -> Tensor
torch.neg(self : Tensor,
out : Tensor) -> Tensor
torch.neg(a : int) -> int
torch.neg(a : float) -> float
torch.neg(a : complex) -> complex
torch.neg(a : number) -> number
torch.neg_(self : Tensor) -> Tensor
torch.negative(self : Tensor) -> Tensor
torch.negative(self : Tensor,
out : Tensor) -> Tensor
torch.negative_(self : Tensor) -> Tensor
torch.nextafter(self : Tensor,
other : Tensor) -> Tensor
torch.nextafter(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.nonzero(self : Tensor) -> Tensor
torch.nonzero(self : Tensor,
out : Tensor) -> Tensor
torch.nonzero_static(self : Tensor,
size : int,
fill_value : int=-1) -> Tensor
torch.nonzero_static(self : Tensor,
size : int,
fill_value : int=-1,
out : Tensor) -> Tensor
torch.norm_except_dim(v : Tensor,
pow : int=2,
dim : int=0) -> Tensor
torch.normal(mean : Tensor,
std : float=1.0,
generator : Optional[Generator]) -> Tensor
torch.normal(mean : Tensor,
std : float=1.0,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.normal(mean : float,
std : Tensor,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.normal(mean : float,
std : Tensor,
generator : Optional[Generator]) -> Tensor
torch.normal(mean : Tensor,
std : Tensor,
generator : Optional[Generator]) -> Tensor
torch.normal(mean : Tensor,
std : Tensor,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.normal(mean : float,
std : float,
size : List[int],
generator : Optional[Generator],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.normal(mean : float,
std : float,
size : List[int],
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.normal(self : Tensor,
mean : float=0.0,
std : float=1.0,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.not_equal(self : Tensor,
other : Tensor) -> Tensor
torch.not_equal(self : Tensor,
other : number) -> Tensor
torch.not_equal(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.not_equal(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.nuclear_norm(self : Tensor,
keepdim : bool=False) -> Tensor
torch.nuclear_norm(self : Tensor,
dim : List[int],
keepdim : bool=False) -> Tensor
torch.nuclear_norm(self : Tensor,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.nuclear_norm(self : Tensor,
dim : List[int],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.numel(self : Tensor) -> int
torch.ones(size : List[int],
names : Optional[List[str]],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.ones(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.ones(size : List[int],
names : Optional[List[str]],
out : Tensor) -> Tensor
torch.ones(size : List[int],
out : Tensor) -> Tensor
torch.ones_like(self : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.ones_like(self : Tensor,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.orgqr(self : Tensor,
input2 : Tensor) -> Tensor
torch.orgqr(self : Tensor,
input2 : Tensor,
out : Tensor) -> Tensor
torch.ormqr(self : Tensor,
input2 : Tensor,
input3 : Tensor,
left : bool=True,
transpose : bool=False) -> Tensor
torch.ormqr(self : Tensor,
input2 : Tensor,
input3 : Tensor,
left : bool=True,
transpose : bool=False,
out : Tensor) -> Tensor
torch.outer(self : Tensor,
vec2 : Tensor) -> Tensor
torch.outer(self : Tensor,
vec2 : Tensor,
out : Tensor) -> Tensor
torch.pairwise_distance(x1 : Tensor,
x2 : Tensor,
p : float=2.0,
eps : float=1e-06,
keepdim : bool=False) -> Tensor
torch.pdist(self : Tensor,
p : float=2.0) -> Tensor
torch.permute(self : Tensor,
dims : List[int]) -> Tensor
torch.permute_copy(self : Tensor,
dims : List[int],
out : Tensor) -> Tensor
torch.permute_copy(self : Tensor,
dims : List[int]) -> Tensor
torch.pinverse(self : Tensor,
rcond : float=1e-15) -> Tensor
torch.pixel_shuffle(self : Tensor,
upscale_factor : int) -> Tensor
torch.pixel_shuffle(self : Tensor,
upscale_factor : int,
out : Tensor) -> Tensor
torch.pixel_unshuffle(self : Tensor,
downscale_factor : int) -> Tensor
torch.pixel_unshuffle(self : Tensor,
downscale_factor : int,
out : Tensor) -> Tensor
torch.poisson(self : Tensor,
generator : Optional[Generator]) -> Tensor
torch.poisson(self : Tensor,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.poisson_nll_loss(input : Tensor,
target : Tensor,
log_input : bool,
full : bool,
eps : float,
reduction : int) -> Tensor
torch.polar(abs : Tensor,
angle : Tensor) -> Tensor
torch.polar(abs : Tensor,
angle : Tensor,
out : Tensor) -> Tensor
torch.polar(a : int,
b : int) -> complex
torch.polar(a : float,
b : float) -> complex
torch.polar(a : int,
b : float) -> complex
torch.polar(a : float,
b : int) -> complex
torch.polar(a : number,
b : number) -> number
torch.polygamma(n : int,
self : Tensor) -> Tensor
torch.polygamma(n : int,
self : Tensor,
out : Tensor) -> Tensor
torch.positive(self : Tensor) -> Tensor
torch.pow(self : Tensor,
exponent : Tensor) -> Tensor
torch.pow(self : Tensor,
exponent : number) -> Tensor
torch.pow(self : number,
exponent : Tensor) -> Tensor
torch.pow(self : number,
exponent : Tensor,
out : Tensor) -> Tensor
torch.pow(self : Tensor,
exponent : number,
out : Tensor) -> Tensor
torch.pow(self : Tensor,
exponent : Tensor,
out : Tensor) -> Tensor
torch.pow(a : int,
b : int) -> float
torch.pow(a : complex,
b : complex) -> complex
torch.pow(a : float,
b : float) -> float
torch.pow(a : int,
b : float) -> float
torch.pow(a : float,
b : int) -> float
torch.pow(a : float,
b : complex) -> complex
torch.pow(a : complex,
b : float) -> complex
torch.pow(a : number,
b : number) -> float
torch.pow(a : int,
b : int) -> int
torch.prelu(self : Tensor,
weight : Tensor) -> Tensor
torch.prod(self : Tensor,
dtype : Optional[int]) -> Tensor
torch.prod(self : Tensor,
dim : int,
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch.prod(self : Tensor,
dim : str,
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch.prod(self : Tensor,
dim : str,
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.prod(self : Tensor,
dim : int,
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.prod(self : Tensor,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.promote_types(type1 : int,
type2 : int) -> int
torch.put(self : Tensor,
index : Tensor,
source : Tensor,
accumulate : bool=False) -> Tensor
torch.put(self : Tensor,
index : Tensor,
source : Tensor,
accumulate : bool=False,
out : Tensor) -> Tensor
torch.q_per_channel_axis(self : Tensor) -> int
torch.q_per_channel_scales(self : Tensor,
out : Tensor) -> Tensor
torch.q_per_channel_scales(self : Tensor) -> Tensor
torch.q_per_channel_zero_points(self : Tensor,
out : Tensor) -> Tensor
torch.q_per_channel_zero_points(self : Tensor) -> Tensor
torch.q_scale(self : Tensor) -> float
torch.q_zero_point(self : Tensor) -> int
torch.qr(self : Tensor,
some : bool=True) -> Tuple[Tensor, Tensor]
torch.qr(self : Tensor,
some : bool=True,
Q : Tensor,
R : Tensor) -> Tuple[Tensor, Tensor]
torch.qscheme(self : Tensor) -> QScheme
torch.quantile(self : Tensor,
q : Tensor,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear) -> Tensor
torch.quantile(self : Tensor,
q : float,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear) -> Tensor
torch.quantile(self : Tensor,
q : Tensor,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear,
out : Tensor) -> Tensor
torch.quantile(self : Tensor,
q : float,
dim : Optional[int],
keepdim : bool=False,
interpolation : str=linear,
out : Tensor) -> Tensor
torch.quantize_per_channel(self : Tensor,
scales : Tensor,
zero_points : Tensor,
axis : int,
dtype : int) -> Tensor
torch.quantize_per_channel(self : Tensor,
scales : Tensor,
zero_points : Tensor,
axis : int,
dtype : int,
out : Tensor) -> Tensor
torch.quantize_per_tensor(self : Tensor,
scale : float,
zero_point : int,
dtype : int) -> Tensor
torch.quantize_per_tensor(self : Tensor,
scale : Tensor,
zero_point : Tensor,
dtype : int) -> Tensor
torch.quantize_per_tensor(tensors : List[Tensor],
scales : Tensor,
zero_points : Tensor,
dtype : int) -> List[Tensor]
torch.quantize_per_tensor(self : Tensor,
scale : float,
zero_point : int,
dtype : int,
out : Tensor) -> Tensor
torch.quantize_per_tensor(self : Tensor,
scale : Tensor,
zero_point : Tensor,
dtype : int,
out : Tensor) -> Tensor
torch.quantize_per_tensor(tensors : List[Tensor],
scales : Tensor,
zero_points : Tensor,
dtype : int,
out : List[Tensor]) -> Tuple[]
torch.quantize_per_tensor_dynamic(self : Tensor,
dtype : int,
reduce_range : bool) -> Tensor
torch.quantize_per_tensor_dynamic(self : Tensor,
dtype : int,
reduce_range : bool,
out : Tensor) -> Tensor
torch.quantized_batch_norm(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
mean : Tensor,
var : Tensor,
eps : float,
output_scale : float,
output_zero_point : int,
out : Tensor) -> Tensor
torch.quantized_batch_norm(input : Tensor,
weight : Optional[Tensor],
bias : Optional[Tensor],
mean : Tensor,
var : Tensor,
eps : float,
output_scale : float,
output_zero_point : int) -> Tensor
torch.quantized_gru_cell(input : Tensor,
hx : Tensor,
w_ih : Tensor,
w_hh : Tensor,
b_ih : Tensor,
b_hh : Tensor,
packed_ih : Tensor,
packed_hh : Tensor,
col_offsets_ih : Tensor,
col_offsets_hh : Tensor,
scale_ih : number,
scale_hh : number,
zero_point_ih : number,
zero_point_hh : number) -> Tensor
torch.quantized_lstm_cell(input : Tensor,
hx : List[Tensor],
w_ih : Tensor,
w_hh : Tensor,
b_ih : Tensor,
b_hh : Tensor,
packed_ih : Tensor,
packed_hh : Tensor,
col_offsets_ih : Tensor,
col_offsets_hh : Tensor,
scale_ih : number,
scale_hh : number,
zero_point_ih : number,
zero_point_hh : number) -> Tuple[Tensor, Tensor]
torch.quantized_max_pool1d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0],
dilation : List[int]=[1],
ceil_mode : bool=False,
out : Tensor) -> Tensor
torch.quantized_max_pool1d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0],
dilation : List[int]=[1],
ceil_mode : bool=False) -> Tensor
torch.quantized_max_pool2d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
ceil_mode : bool=False,
out : Tensor) -> Tensor
torch.quantized_max_pool2d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
ceil_mode : bool=False) -> Tensor
torch.quantized_max_pool3d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
ceil_mode : bool=False,
out : Tensor) -> Tensor
torch.quantized_max_pool3d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
ceil_mode : bool=False) -> Tensor
torch.quantized_rnn_relu_cell(input : Tensor,
hx : Tensor,
w_ih : Tensor,
w_hh : Tensor,
b_ih : Tensor,
b_hh : Tensor,
packed_ih : Tensor,
packed_hh : Tensor,
col_offsets_ih : Tensor,
col_offsets_hh : Tensor,
scale_ih : number,
scale_hh : number,
zero_point_ih : number,
zero_point_hh : number) -> Tensor
torch.quantized_rnn_tanh_cell(input : Tensor,
hx : Tensor,
w_ih : Tensor,
w_hh : Tensor,
b_ih : Tensor,
b_hh : Tensor,
packed_ih : Tensor,
packed_hh : Tensor,
col_offsets_ih : Tensor,
col_offsets_hh : Tensor,
scale_ih : number,
scale_hh : number,
zero_point_ih : number,
zero_point_hh : number) -> Tensor
torch.rad2deg(self : Tensor) -> Tensor
torch.rad2deg(self : Tensor,
out : Tensor) -> Tensor
torch.rad2deg_(self : Tensor) -> Tensor
torch.rand(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.rand(size : List[int],
generator : Optional[Generator],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.rand(size : List[int],
names : Optional[List[str]],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.rand(size : List[int],
generator : Optional[Generator],
names : Optional[List[str]],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.rand(size : List[int],
out : Tensor) -> Tensor
torch.rand(size : List[int],
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.rand(size : List[int],
names : Optional[List[str]],
out : Tensor) -> Tensor
torch.rand(size : List[int],
generator : Optional[Generator],
names : Optional[List[str]],
out : Tensor) -> Tensor
torch.rand_like(self : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.rand_like(self : Tensor,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.randint(high : int,
size : List[int],
dtype : Optional[int]=4,
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randint(high : int,
size : List[int],
generator : Optional[Generator],
dtype : Optional[int]=4,
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randint(low : int,
high : int,
size : List[int],
dtype : Optional[int]=4,
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randint(low : int,
high : int,
size : List[int],
generator : Optional[Generator],
dtype : Optional[int]=4,
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randint(high : int,
size : List[int],
out : Tensor) -> Tensor
torch.randint(high : int,
size : List[int],
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.randint(low : int,
high : int,
size : List[int],
out : Tensor) -> Tensor
torch.randint(low : int,
high : int,
size : List[int],
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.randint_like(self : Tensor,
high : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.randint_like(self : Tensor,
low : int,
high : int,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.randint_like(self : Tensor,
high : int,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.randint_like(self : Tensor,
low : int,
high : int,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.randn(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randn(size : List[int],
generator : Optional[Generator],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randn(size : List[int],
names : Optional[List[str]],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randn(size : List[int],
generator : Optional[Generator],
names : Optional[List[str]],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randn(size : List[int],
out : Tensor) -> Tensor
torch.randn(size : List[int],
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.randn(size : List[int],
names : Optional[List[str]],
out : Tensor) -> Tensor
torch.randn(size : List[int],
generator : Optional[Generator],
names : Optional[List[str]],
out : Tensor) -> Tensor
torch.randn_like(self : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.randn_like(self : Tensor,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch.randperm(n : int,
dtype : Optional[int]=4,
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randperm(n : int,
generator : Optional[Generator],
dtype : Optional[int]=4,
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.randperm(n : int,
out : Tensor) -> Tensor
torch.randperm(n : int,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch.range(start : number,
end : number,
step : number=1,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.range(start : number,
end : number,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.range(start : number,
end : number,
step : number=1,
out : Tensor) -> Tensor
torch.range(start : number,
end : number,
out : Tensor) -> Tensor
torch.ravel(self : Tensor) -> Tensor
torch.real(self : Tensor) -> Tensor
torch.reciprocal(self : Tensor) -> Tensor
torch.reciprocal(self : Tensor,
out : Tensor) -> Tensor
torch.reciprocal_(self : Tensor) -> Tensor
torch.relu(self : Tensor) -> Tensor
torch.relu(self : Tensor,
out : Tensor) -> Tensor
torch.relu_(self : Tensor) -> Tensor
torch.remainder(self : Tensor,
other : Tensor) -> Tensor
torch.remainder(self : Tensor,
other : number) -> Tensor
torch.remainder(self : number,
other : Tensor) -> Tensor
torch.remainder(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.remainder(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.remainder(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch.remainder(a : int,
b : int) -> int
torch.remainder(a : float,
b : float) -> float
torch.remainder(a : int,
b : float) -> float
torch.remainder(a : float,
b : int) -> float
torch.remainder(a : number,
b : number) -> number
torch.renorm(self : Tensor,
p : number,
dim : int,
maxnorm : number) -> Tensor
torch.renorm(self : Tensor,
p : number,
dim : int,
maxnorm : number,
out : Tensor) -> Tensor
torch.repeat_interleave(repeats : Tensor,
output_size : Optional[int]) -> Tensor
torch.repeat_interleave(self : Tensor,
repeats : Tensor,
dim : Optional[int],
output_size : Optional[int]) -> Tensor
torch.repeat_interleave(self : Tensor,
repeats : int,
dim : Optional[int],
output_size : Optional[int]) -> Tensor
torch.repeat_interleave(repeats : Tensor,
output_size : Optional[int],
out : Tensor) -> Tensor
torch.reshape(self : Tensor,
shape : List[int]) -> Tensor
torch.resize_as_(self : Tensor,
the_template : Tensor,
memory_format : Optional[int]) -> Tensor
torch.resize_as_sparse_(self : Tensor,
the_template : Tensor) -> Tensor
torch.resolve_conj(self : Tensor) -> Tensor
torch.resolve_neg(self : Tensor) -> Tensor
torch.result_type(tensor : Tensor,
other : Tensor) -> int
torch.result_type(tensor : Tensor,
other : number) -> int
torch.result_type(scalar : number,
tensor : Tensor) -> int
torch.result_type(scalar1 : number,
scalar2 : number) -> int
torch.rms_norm(input : Tensor,
normalized_shape : List[int],
weight : Optional[Tensor],
eps : Optional[float]) -> Tensor
torch.rnn_relu(input : Tensor,
hx : Tensor,
params : List[Tensor],
has_biases : bool,
num_layers : int,
dropout : float,
train : bool,
bidirectional : bool,
batch_first : bool) -> Tuple[Tensor, Tensor]
torch.rnn_relu(data : Tensor,
batch_sizes : Tensor,
hx : Tensor,
params : List[Tensor],
has_biases : bool,
num_layers : int,
dropout : float,
train : bool,
bidirectional : bool) -> Tuple[Tensor, Tensor]
torch.rnn_relu_cell(input : Tensor,
hx : Tensor,
w_ih : Tensor,
w_hh : Tensor,
b_ih : Optional[Tensor],
b_hh : Optional[Tensor]) -> Tensor
torch.rnn_tanh(input : Tensor,
hx : Tensor,
params : List[Tensor],
has_biases : bool,
num_layers : int,
dropout : float,
train : bool,
bidirectional : bool,
batch_first : bool) -> Tuple[Tensor, Tensor]
torch.rnn_tanh(data : Tensor,
batch_sizes : Tensor,
hx : Tensor,
params : List[Tensor],
has_biases : bool,
num_layers : int,
dropout : float,
train : bool,
bidirectional : bool) -> Tuple[Tensor, Tensor]
torch.rnn_tanh_cell(input : Tensor,
hx : Tensor,
w_ih : Tensor,
w_hh : Tensor,
b_ih : Optional[Tensor],
b_hh : Optional[Tensor]) -> Tensor
torch.roll(self : Tensor,
shifts : List[int],
dims : List[int]=[]) -> Tensor
torch.roll(self : Tensor,
shifts : List[int],
dims : List[int]=[],
out : Tensor) -> Tensor
torch.rot90(self : Tensor,
k : int=1,
dims : List[int]=[0, 1]) -> Tensor
torch.rot90(self : Tensor,
k : int=1,
dims : List[int]=[0, 1],
out : Tensor) -> Tensor
torch.round(self : Tensor) -> Tensor
torch.round(self : Tensor,
decimals : int) -> Tensor
torch.round(self : Tensor,
out : Tensor) -> Tensor
torch.round(self : Tensor,
decimals : int,
out : Tensor) -> Tensor
torch.round(a : int) -> float
torch.round(a : float) -> float
torch.round(a : number) -> number
torch.round_(self : Tensor) -> Tensor
torch.round_(self : Tensor,
decimals : int) -> Tensor
torch.row_indices_copy(self : Tensor,
out : Tensor) -> Tensor
torch.row_indices_copy(self : Tensor) -> Tensor
torch.row_stack(tensors : List[Tensor]) -> Tensor
torch.row_stack(tensors : List[Tensor],
out : Tensor) -> Tensor
torch.rrelu(self : Tensor,
lower : number=0.125,
upper : number=0.3333333333333333,
training : bool=False,
generator : Optional[Generator]) -> Tensor
torch.rrelu_(self : Tensor,
lower : number=0.125,
upper : number=0.3333333333333333,
training : bool=False,
generator : Optional[Generator]) -> Tensor
torch.rsqrt(self : Tensor) -> Tensor
torch.rsqrt(self : Tensor,
out : Tensor) -> Tensor
torch.rsqrt_(self : Tensor) -> Tensor
torch.rsub(self : Tensor,
other : Tensor,
alpha : number=1) -> Tensor
torch.rsub(self : Tensor,
other : number,
alpha : number=1) -> Tensor
torch.rsub(self : Tensor,
other : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
torch.rsub(self : Tensor,
other : number,
alpha : number=1,
out : Tensor) -> Tensor
torch.save(item : t,
filename : str) -> Tuple[]
torch.scalar_tensor(s : number,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.scalar_tensor(s : number,
out : Tensor) -> Tensor
torch.scatter(self : Tensor,
dim : int,
index : Tensor,
value : number) -> Tensor
torch.scatter(self : Tensor,
dim : int,
index : Tensor,
src : Tensor) -> Tensor
torch.scatter(self : Tensor,
dim : int,
index : Tensor,
src : Tensor,
reduce : str) -> Tensor
torch.scatter(self : Tensor,
dim : int,
index : Tensor,
value : number,
reduce : str) -> Tensor
torch.scatter(self : Tensor,
dim : int,
index : Tensor,
src : Tensor,
out : Tensor) -> Tensor
torch.scatter(self : Tensor,
dim : int,
index : Tensor,
value : number,
out : Tensor) -> Tensor
torch.scatter(self : Tensor,
dim : int,
index : Tensor,
src : Tensor,
reduce : str,
out : Tensor) -> Tensor
torch.scatter(self : Tensor,
dim : int,
index : Tensor,
value : number,
reduce : str,
out : Tensor) -> Tensor
torch.scatter(self : Tensor,
dim : str,
index : Tensor,
src : Tensor) -> Tensor
torch.scatter(self : Tensor,
dim : str,
index : Tensor,
value : number) -> Tensor
torch.scatter_add(self : Tensor,
dim : int,
index : Tensor,
src : Tensor) -> Tensor
torch.scatter_add(self : Tensor,
dim : int,
index : Tensor,
src : Tensor,
out : Tensor) -> Tensor
torch.scatter_add(self : Tensor,
dim : str,
index : Tensor,
src : Tensor) -> Tensor
torch.scatter_reduce(self : Tensor,
dim : int,
index : Tensor,
src : Tensor,
reduce : str,
include_self : bool=True) -> Tensor
torch.scatter_reduce(self : Tensor,
dim : int,
index : Tensor,
src : Tensor,
reduce : str,
include_self : bool=True,
out : Tensor) -> Tensor
torch.searchsorted(sorted_sequence : Tensor,
self : Tensor,
out_int32 : bool=False,
right : bool=False,
side : Optional[str],
sorter : Optional[Tensor]) -> Tensor
torch.searchsorted(sorted_sequence : Tensor,
self : Tensor,
out_int32 : bool=False,
right : bool=False,
side : Optional[str],
sorter : Optional[Tensor],
out : Tensor) -> Tensor
torch.searchsorted(sorted_sequence : Tensor,
self : number,
out_int32 : bool=False,
right : bool=False,
side : Optional[str],
sorter : Optional[Tensor]) -> Tensor
torch.searchsorted(sorted_sequence : Tensor,
self : number,
out_int32 : bool=False,
right : bool=False,
side : Optional[str],
sorter : Optional[Tensor],
out : Tensor) -> Tensor
torch.seed(self : Generator) -> int
torch.segment_reduce(data : Tensor,
reduce : str,
lengths : Optional[Tensor],
indices : Optional[Tensor],
offsets : Optional[Tensor],
axis : int=0,
unsafe : bool=False,
initial : Optional[number]) -> Tensor
torch.segment_reduce(data : Tensor,
reduce : str,
lengths : Optional[Tensor],
indices : Optional[Tensor],
offsets : Optional[Tensor],
axis : int=0,
unsafe : bool=False,
initial : Optional[number],
out : Tensor) -> Tensor
torch.select(self : Tensor,
dim : str,
index : int) -> Tensor
torch.select(self : Tensor,
dim : int,
index : int) -> Tensor
torch.select(list : List[t],
idx : int) -> t
torch.select_copy(self : Tensor,
dim : int,
index : int,
out : Tensor) -> Tensor
torch.select_copy(self : Tensor,
dim : int,
index : int) -> Tensor
torch.select_scatter(self : Tensor,
src : Tensor,
dim : int,
index : int) -> Tensor
torch.select_scatter(self : Tensor,
src : Tensor,
dim : int,
index : int,
out : Tensor) -> Tensor
torch.selu(self : Tensor) -> Tensor
torch.selu_(self : Tensor) -> Tensor
torch.set_grad_enabled(val : bool) -> Tuple[]
torch.sgn(self : Tensor) -> Tensor
torch.sgn(self : Tensor,
out : Tensor) -> Tensor
torch.sigmoid(self : Tensor) -> Tensor
torch.sigmoid(self : Tensor,
out : Tensor) -> Tensor
torch.sigmoid_(self : Tensor) -> Tensor
torch.sign(self : Tensor) -> Tensor
torch.sign(self : Tensor,
out : Tensor) -> Tensor
torch.signbit(self : Tensor) -> Tensor
torch.signbit(self : Tensor,
out : Tensor) -> Tensor
torch.sin(self : Tensor) -> Tensor
torch.sin(self : Tensor,
out : Tensor) -> Tensor
torch.sin(a : int) -> float
torch.sin(a : float) -> float
torch.sin(a : complex) -> complex
torch.sin(a : number) -> number
torch.sin_(self : Tensor) -> Tensor
torch.sinc(self : Tensor) -> Tensor
torch.sinc(self : Tensor,
out : Tensor) -> Tensor
torch.sinc_(self : Tensor) -> Tensor
torch.sinh(self : Tensor) -> Tensor
torch.sinh(self : Tensor,
out : Tensor) -> Tensor
torch.sinh(a : int) -> float
torch.sinh(a : float) -> float
torch.sinh(a : complex) -> complex
torch.sinh(a : number) -> number
torch.sinh_(self : Tensor) -> Tensor
torch.slice_copy(self : Tensor,
dim : int=0,
start : Optional[int],
end : Optional[int],
step : int=1,
out : Tensor) -> Tensor
torch.slice_copy(self : Tensor,
dim : int=0,
start : Optional[int],
end : Optional[int],
step : int=1) -> Tensor
torch.slice_inverse(self : Tensor,
src : Tensor,
dim : int=0,
start : Optional[int],
end : Optional[int],
step : int=1) -> Tensor
torch.slice_scatter(self : Tensor,
src : Tensor,
dim : int=0,
start : Optional[int],
end : Optional[int],
step : int=1) -> Tensor
torch.slice_scatter(self : Tensor,
src : Tensor,
dim : int=0,
start : Optional[int],
end : Optional[int],
step : int=1,
out : Tensor) -> Tensor
torch.slogdet(self : Tensor) -> Tuple[Tensor, Tensor]
torch.slogdet(self : Tensor,
sign : Tensor,
logabsdet : Tensor) -> Tuple[Tensor, Tensor]
torch.smm(self : Tensor,
mat2 : Tensor) -> Tensor
torch.softmax(self : Tensor,
dim : int,
dtype : Optional[int]) -> Tensor
torch.softmax(self : Tensor,
dim : str,
dtype : Optional[int]) -> Tensor
torch.softmax(self : Tensor,
dim : int,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.sort(self : Tensor,
dim : int=-1,
descending : bool=False) -> Tuple[Tensor, Tensor]
torch.sort(self : Tensor,
stable : Optional[bool],
dim : int=-1,
descending : bool=False) -> Tuple[Tensor, Tensor]
torch.sort(self : Tensor,
stable : Optional[bool],
dim : int=-1,
descending : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.sort(self : Tensor,
dim : int=-1,
descending : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.sort(self : Tensor,
dim : str,
descending : bool=False) -> Tuple[Tensor, Tensor]
torch.sort(self : Tensor,
dim : str,
descending : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.sort(self : Tensor,
stable : Optional[bool],
dim : str,
descending : bool=False) -> Tuple[Tensor, Tensor]
torch.sort(self : Tensor,
stable : Optional[bool],
dim : str,
descending : bool=False,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.sort(self : List[int],
reverse : bool=False) -> Tuple[]
torch.sort(self : List[float],
reverse : bool=False) -> Tuple[]
torch.sort(self : List[Tensor],
reverse : bool=False) -> Tuple[]
torch.sort(self : List[bool],
reverse : bool=False) -> Tuple[]
torch.sort(self : List[str],
reverse : bool=False) -> Tuple[]
torch.sort(self : List[t],
reverse : bool=False) -> Tuple[]
torch.sparse_bsc_tensor(ccol_indices : Tensor,
row_indices : Tensor,
values : Tensor,
size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_bsc_tensor(ccol_indices : Tensor,
row_indices : Tensor,
values : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_bsr_tensor(crow_indices : Tensor,
col_indices : Tensor,
values : Tensor,
size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_bsr_tensor(crow_indices : Tensor,
col_indices : Tensor,
values : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_compressed_tensor(compressed_indices : Tensor,
plain_indices : Tensor,
values : Tensor,
size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_compressed_tensor(compressed_indices : Tensor,
plain_indices : Tensor,
values : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_coo_tensor(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_coo_tensor(indices : Tensor,
values : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
is_coalesced : Optional[bool]) -> Tensor
torch.sparse_coo_tensor(indices : Tensor,
values : Tensor,
size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
is_coalesced : Optional[bool]) -> Tensor
torch.sparse_coo_tensor(size : List[int],
out : Tensor) -> Tensor
torch.sparse_csc_tensor(ccol_indices : Tensor,
row_indices : Tensor,
values : Tensor,
size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_csc_tensor(ccol_indices : Tensor,
row_indices : Tensor,
values : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_csr_tensor(crow_indices : Tensor,
col_indices : Tensor,
values : Tensor,
size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.sparse_csr_tensor(crow_indices : Tensor,
col_indices : Tensor,
values : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]=False) -> Tensor
torch.split(self : Tensor,
split_size : int,
dim : int=0) -> List[Tensor]
torch.split(self : Tensor,
split_size : List[int],
dim : int=0) -> List[Tensor]
torch.split(self : str,
separator : Optional[str],
max : int=-1) -> List[str]
torch.split(self : Tensor,
split_sizes : List[int],
dim : int=0) -> List[Tensor]
torch.split_copy(self : Tensor,
split_size : int,
dim : int=0,
out : List[Tensor]) -> Tuple[]
torch.split_copy(self : Tensor,
split_size : int,
dim : int=0) -> List[Tensor]
torch.split_with_sizes(self : Tensor,
split_sizes : List[int],
dim : int=0) -> List[Tensor]
torch.split_with_sizes_copy(self : Tensor,
split_sizes : List[int],
dim : int=0) -> List[Tensor]
torch.split_with_sizes_copy(self : Tensor,
split_sizes : List[int],
dim : int=0,
out : List[Tensor]) -> Tuple[]
torch.sqrt(self : Tensor) -> Tensor
torch.sqrt(self : Tensor,
out : Tensor) -> Tensor
torch.sqrt(a : int) -> float
torch.sqrt(a : float) -> float
torch.sqrt(a : complex) -> complex
torch.sqrt(a : number) -> number
torch.sqrt_(self : Tensor) -> Tensor
torch.square(self : Tensor) -> Tensor
torch.square(self : Tensor,
out : Tensor) -> Tensor
torch.square_(self : Tensor) -> Tensor
torch.squeeze(self : Tensor) -> Tensor
torch.squeeze(self : Tensor,
dim : int) -> Tensor
torch.squeeze(self : Tensor,
dim : List[int]) -> Tensor
torch.squeeze(self : Tensor,
dim : str) -> Tensor
torch.squeeze_copy(self : Tensor,
out : Tensor) -> Tensor
torch.squeeze_copy(self : Tensor,
dim : int,
out : Tensor) -> Tensor
torch.squeeze_copy(self : Tensor,
dim : List[int],
out : Tensor) -> Tensor
torch.squeeze_copy(self : Tensor) -> Tensor
torch.squeeze_copy(self : Tensor,
dim : int) -> Tensor
torch.squeeze_copy(self : Tensor,
dim : List[int]) -> Tensor
torch.sspaddmm(self : Tensor,
mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
torch.sspaddmm(self : Tensor,
mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
torch.stack(tensors : List[Tensor],
dim : int=0) -> Tensor
torch.stack(tensors : List[Tensor],
dim : int=0,
out : Tensor) -> Tensor
torch.std(self : Tensor,
unbiased : bool=True) -> Tensor
torch.std(self : Tensor,
dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False) -> Tensor
torch.std(self : Tensor,
dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False) -> Tensor
torch.std(self : Tensor,
dim : List[str],
unbiased : bool=True,
keepdim : bool=False) -> Tensor
torch.std(self : Tensor,
dim : List[str],
unbiased : bool=True,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.std(self : Tensor,
dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.std(self : Tensor,
dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.std(self : Tensor,
dim : List[str],
correction : Optional[number],
keepdim : bool=False) -> Tensor
torch.std(self : Tensor,
dim : List[str],
correction : Optional[number],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.std_mean(self : Tensor,
unbiased : bool=True) -> Tuple[Tensor, Tensor]
torch.std_mean(self : Tensor,
dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.std_mean(self : Tensor,
dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.std_mean(self : Tensor,
dim : List[str],
unbiased : bool=True,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.std_mean(self : Tensor,
dim : List[str],
correction : Optional[number],
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.std_mean(self : Tensor,
dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False,
out0 : Tensor,
out1 : Tensor) -> Tuple[Tensor, Tensor]
torch.sub(self : Tensor,
other : Tensor,
alpha : number=1) -> Tensor
torch.sub(self : Tensor,
other : number,
alpha : number=1) -> Tensor
torch.sub(self : Tensor,
other : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
torch.sub(self : Tensor,
other : number,
alpha : number=1,
out : Tensor) -> Tensor
torch.sub(a : int,
b : int) -> int
torch.sub(a : complex,
b : complex) -> complex
torch.sub(a : float,
b : float) -> float
torch.sub(a : int,
b : complex) -> complex
torch.sub(a : complex,
b : int) -> complex
torch.sub(a : float,
b : complex) -> complex
torch.sub(a : complex,
b : float) -> complex
torch.sub(a : int,
b : float) -> float
torch.sub(a : float,
b : int) -> float
torch.sub(a : number,
b : number) -> number
torch.subtract(self : Tensor,
other : Tensor,
alpha : number=1) -> Tensor
torch.subtract(self : Tensor,
other : Tensor,
alpha : number=1,
out : Tensor) -> Tensor
torch.subtract(self : Tensor,
other : number,
alpha : number=1) -> Tensor
torch.sum(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch.sum(self : Tensor,
dtype : Optional[int]) -> Tensor
torch.sum(self : Tensor,
dim : List[str],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch.sum(self : Tensor,
dim : List[str],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.sum(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.sum(self : Tensor,
dtype : Optional[int],
out : Tensor) -> Tensor
torch.sum(self : List[int]) -> int
torch.sum(self : List[float]) -> float
torch.sum(self : List[complex]) -> complex
torch.sum(self : List[bool]) -> int
torch.svd(self : Tensor,
some : bool=True,
compute_uv : bool=True) -> Tuple[Tensor, Tensor, Tensor]
torch.svd(self : Tensor,
some : bool=True,
compute_uv : bool=True,
U : Tensor,
S : Tensor,
V : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch.swapaxes(self : Tensor,
axis0 : int,
axis1 : int) -> Tensor
torch.swapdims(self : Tensor,
dim0 : int,
dim1 : int) -> Tensor
torch.sym_constrain_range(size : number,
min : Optional[int],
max : Optional[int]) -> Tuple[]
torch.sym_constrain_range_for_size(size : number,
min : Optional[int],
max : Optional[int]) -> Tuple[]
torch.t(self : Tensor) -> Tensor
torch.t_copy(self : Tensor,
out : Tensor) -> Tensor
torch.t_copy(self : Tensor) -> Tensor
torch.take(self : Tensor,
index : Tensor) -> Tensor
torch.take(self : Tensor,
index : Tensor,
out : Tensor) -> Tensor
torch.take_along_dim(self : Tensor,
indices : Tensor,
dim : Optional[int]) -> Tensor
torch.take_along_dim(self : Tensor,
indices : Tensor,
dim : Optional[int],
out : Tensor) -> Tensor
torch.tan(self : Tensor) -> Tensor
torch.tan(self : Tensor,
out : Tensor) -> Tensor
torch.tan(a : int) -> float
torch.tan(a : float) -> float
torch.tan(a : complex) -> complex
torch.tan(a : number) -> number
torch.tan_(self : Tensor) -> Tensor
torch.tanh(self : Tensor) -> Tensor
torch.tanh(self : Tensor,
out : Tensor) -> Tensor
torch.tanh(a : int) -> float
torch.tanh(a : float) -> float
torch.tanh(a : complex) -> complex
torch.tanh(a : number) -> number
torch.tanh_(self : Tensor) -> Tensor
torch.tensor(t : bool,
dtype : Optional[int],
device : Optional[Device],
requires_grad : bool=False) -> Tensor
torch.tensor(t : float,
dtype : Optional[int],
device : Optional[Device],
requires_grad : bool=False) -> Tensor
torch.tensor(t : int,
dtype : Optional[int],
device : Optional[Device],
requires_grad : bool=False) -> Tensor
torch.tensor(t : complex,
dtype : Optional[int],
device : Optional[Device],
requires_grad : bool=False) -> Tensor
torch.tensor(data : List[t],
dtype : Optional[int],
device : Optional[Device],
requires_grad : bool=False) -> Tensor
torch.tensor_split(self : Tensor,
sections : int,
dim : int=0) -> List[Tensor]
torch.tensor_split(self : Tensor,
indices : List[int],
dim : int=0) -> List[Tensor]
torch.tensor_split(self : Tensor,
tensor_indices_or_sections : Tensor,
dim : int=0) -> List[Tensor]
torch.threshold(self : Tensor,
threshold : number,
value : number) -> Tensor
torch.threshold(self : Tensor,
threshold : number,
value : number,
out : Tensor) -> Tensor
torch.threshold_(self : Tensor,
threshold : number,
value : number) -> Tensor
torch.tile(self : Tensor,
dims : List[int]) -> Tensor
torch.topk(self : Tensor,
k : int,
dim : int=-1,
largest : bool=True,
sorted : bool=True) -> Tuple[Tensor, Tensor]
torch.topk(self : Tensor,
k : int,
dim : int=-1,
largest : bool=True,
sorted : bool=True,
values : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch.trace(self : Tensor) -> Tensor
torch.trace(self : Tensor,
out : Tensor) -> Tensor
torch.transpose(self : Tensor,
dim0 : int,
dim1 : int) -> Tensor
torch.transpose(self : Tensor,
dim0 : str,
dim1 : str) -> Tensor
torch.transpose_copy(self : Tensor,
dim0 : int,
dim1 : int,
out : Tensor) -> Tensor
torch.transpose_copy(self : Tensor,
dim0 : int,
dim1 : int) -> Tensor
torch.trapezoid(y : Tensor,
x : Tensor,
dim : int=-1) -> Tensor
torch.trapezoid(y : Tensor,
dx : number=1,
dim : int=-1) -> Tensor
torch.trapz(y : Tensor,
x : Tensor,
dim : int=-1) -> Tensor
torch.trapz(y : Tensor,
dx : float=1.0,
dim : int=-1) -> Tensor
torch.triangular_solve(self : Tensor,
A : Tensor,
upper : bool=True,
transpose : bool=False,
unitriangular : bool=False) -> Tuple[Tensor, Tensor]
torch.triangular_solve(self : Tensor,
A : Tensor,
upper : bool=True,
transpose : bool=False,
unitriangular : bool=False,
X : Tensor,
M : Tensor) -> Tuple[Tensor, Tensor]
torch.tril(self : Tensor,
diagonal : int=0) -> Tensor
torch.tril(self : Tensor,
diagonal : int=0,
out : Tensor) -> Tensor
torch.tril_indices(row : int,
col : int,
offset : int=0,
dtype : Optional[int]=4,
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.tril_indices(row : int,
col : int,
offset : int=0,
out : Tensor) -> Tensor
torch.triplet_margin_loss(anchor : Tensor,
positive : Tensor,
negative : Tensor,
margin : float=1.0,
p : float=2.0,
eps : float=1e-06,
swap : bool=False,
reduction : int=1) -> Tensor
torch.triu(self : Tensor,
diagonal : int=0) -> Tensor
torch.triu(self : Tensor,
diagonal : int=0,
out : Tensor) -> Tensor
torch.triu_indices(row : int,
col : int,
offset : int=0,
dtype : Optional[int]=4,
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.triu_indices(row : int,
col : int,
offset : int=0,
out : Tensor) -> Tensor
torch.true_divide(self : Tensor,
other : Tensor) -> Tensor
torch.true_divide(self : Tensor,
other : number) -> Tensor
torch.true_divide(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.trunc(self : Tensor) -> Tensor
torch.trunc(self : Tensor,
out : Tensor) -> Tensor
torch.trunc_(self : Tensor) -> Tensor
torch.unbind(self : Tensor,
dim : int=0) -> List[Tensor]
torch.unbind(self : Tensor,
dim : str) -> List[Tensor]
torch.unbind_copy(self : Tensor,
dim : int=0,
out : List[Tensor]) -> Tuple[]
torch.unbind_copy(self : Tensor,
dim : int=0) -> List[Tensor]
torch.unflatten(self : Tensor,
dim : int,
sizes : List[int]) -> Tensor
torch.unflatten(self : Tensor,
dim : str,
sizes : List[int],
names : List[str]) -> Tensor
torch.unfold_copy(self : Tensor,
dimension : int,
size : int,
step : int) -> Tensor
torch.unfold_copy(self : Tensor,
dimension : int,
size : int,
step : int,
out : Tensor) -> Tensor
torch.unsafe_chunk(self : Tensor,
chunks : int,
dim : int=0) -> List[Tensor]
torch.unsafe_split(self : Tensor,
split_size : int,
dim : int=0) -> List[Tensor]
torch.unsafe_split(self : Tensor,
split_size : int,
dim : int=0,
out : List[Tensor]) -> Tuple[]
torch.unsafe_split_with_sizes(self : Tensor,
split_sizes : List[int],
dim : int=0) -> List[Tensor]
torch.unsafe_split_with_sizes(self : Tensor,
split_sizes : List[int],
dim : int=0,
out : List[Tensor]) -> Tuple[]
torch.unsqueeze(self : Tensor,
dim : int) -> Tensor
torch.unsqueeze_copy(self : Tensor,
dim : int,
out : Tensor) -> Tensor
torch.unsqueeze_copy(self : Tensor,
dim : int) -> Tensor
torch.values_copy(self : Tensor,
out : Tensor) -> Tensor
torch.values_copy(self : Tensor) -> Tensor
torch.vander(x : Tensor,
N : Optional[int],
increasing : bool=False) -> Tensor
torch.var(self : Tensor,
unbiased : bool=True) -> Tensor
torch.var(self : Tensor,
dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False) -> Tensor
torch.var(self : Tensor,
dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False) -> Tensor
torch.var(self : Tensor,
dim : List[str],
unbiased : bool=True,
keepdim : bool=False) -> Tensor
torch.var(self : Tensor,
dim : List[str],
unbiased : bool=True,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.var(self : Tensor,
dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False,
out : Tensor) -> Tensor
torch.var(self : Tensor,
dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.var(self : Tensor,
dim : List[str],
correction : Optional[number],
keepdim : bool=False) -> Tensor
torch.var(self : Tensor,
dim : List[str],
correction : Optional[number],
keepdim : bool=False,
out : Tensor) -> Tensor
torch.var_mean(self : Tensor,
unbiased : bool=True) -> Tuple[Tensor, Tensor]
torch.var_mean(self : Tensor,
dim : Optional[List[int]],
unbiased : bool=True,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.var_mean(self : Tensor,
dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.var_mean(self : Tensor,
dim : List[str],
unbiased : bool=True,
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.var_mean(self : Tensor,
dim : List[str],
correction : Optional[number],
keepdim : bool=False) -> Tuple[Tensor, Tensor]
torch.var_mean(self : Tensor,
dim : Optional[List[int]],
correction : Optional[number],
keepdim : bool=False,
out0 : Tensor,
out1 : Tensor) -> Tuple[Tensor, Tensor]
torch.vdot(self : Tensor,
other : Tensor) -> Tensor
torch.vdot(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.view_as_complex(self : Tensor) -> Tensor
torch.view_as_complex_copy(self : Tensor,
out : Tensor) -> Tensor
torch.view_as_complex_copy(self : Tensor) -> Tensor
torch.view_as_real(self : Tensor) -> Tensor
torch.view_as_real_copy(self : Tensor,
out : Tensor) -> Tensor
torch.view_as_real_copy(self : Tensor) -> Tensor
torch.view_copy(self : Tensor,
size : List[int]) -> Tensor
torch.view_copy(self : Tensor,
size : List[int],
out : Tensor) -> Tensor
torch.view_copy(self : Tensor,
dtype : int,
out : Tensor) -> Tensor
torch.view_copy(self : Tensor,
dtype : int) -> Tensor
torch.vsplit(self : Tensor,
sections : int) -> List[Tensor]
torch.vsplit(self : Tensor,
indices : List[int]) -> List[Tensor]
torch.vstack(tensors : List[Tensor]) -> Tensor
torch.vstack(tensors : List[Tensor],
out : Tensor) -> Tensor
torch.wait(self : Future[t]) -> t
torch.where(condition : Tensor,
self : Tensor,
other : Tensor) -> Tensor
torch.where(condition : Tensor,
self : Tensor,
other : number) -> Tensor
torch.where(condition : Tensor,
self : number,
other : Tensor) -> Tensor
torch.where(condition : Tensor,
self : number,
other : number) -> Tensor
torch.where(condition : Tensor) -> List[Tensor]
torch.where(condition : Tensor,
self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.xlogy(self : Tensor,
other : Tensor) -> Tensor
torch.xlogy(self : Tensor,
other : number) -> Tensor
torch.xlogy(self : number,
other : Tensor) -> Tensor
torch.xlogy(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch.xlogy(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch.xlogy(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch.xlogy_(self : Tensor,
other : Tensor) -> Tensor
torch.xlogy_(self : Tensor,
other : number) -> Tensor
torch.zero_(self : Tensor) -> Tensor
torch.zeros(size : List[int],
names : Optional[List[str]],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.zeros(size : List[int],
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch.zeros(size : List[int],
names : Optional[List[str]],
out : Tensor) -> Tensor
torch.zeros(size : List[int],
out : Tensor) -> Tensor
torch.zeros_like(self : Tensor,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool],
memory_format : Optional[int]) -> Tensor
torch.zeros_like(self : Tensor,
memory_format : Optional[int],
out : Tensor) -> Tensor
torch._C._nn.adaptive_avg_pool2d(self : Tensor,
output_size : List[int]) -> Tensor
torch._C._nn.adaptive_avg_pool2d(self : Tensor,
output_size : List[int],
out : Tensor) -> Tensor
torch._C._nn.adaptive_avg_pool3d(self : Tensor,
output_size : List[int]) -> Tensor
torch._C._nn.adaptive_avg_pool3d(self : Tensor,
output_size : List[int],
out : Tensor) -> Tensor
torch._C._nn.adaptive_max_pool2d(self : Tensor,
output_size : List[int]) -> Tuple[Tensor, Tensor]
torch._C._nn.adaptive_max_pool2d(self : Tensor,
output_size : List[int],
out : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch._C._nn.adaptive_max_pool3d(self : Tensor,
output_size : List[int]) -> Tuple[Tensor, Tensor]
torch._C._nn.adaptive_max_pool3d(self : Tensor,
output_size : List[int],
out : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch._C._nn.avg_pool2d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
ceil_mode : bool=False,
count_include_pad : bool=True,
divisor_override : Optional[int]) -> Tensor
torch._C._nn.avg_pool2d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
ceil_mode : bool=False,
count_include_pad : bool=True,
divisor_override : Optional[int],
out : Tensor) -> Tensor
torch._C._nn.avg_pool3d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
ceil_mode : bool=False,
count_include_pad : bool=True,
divisor_override : Optional[int]) -> Tensor
torch._C._nn.avg_pool3d(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
ceil_mode : bool=False,
count_include_pad : bool=True,
divisor_override : Optional[int],
out : Tensor) -> Tensor
torch._C._nn.binary_cross_entropy(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
reduction : int=1) -> Tensor
torch._C._nn.binary_cross_entropy(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
reduction : int=1,
out : Tensor) -> Tensor
torch._C._nn.col2im(self : Tensor,
output_size : List[int],
kernel_size : List[int],
dilation : List[int],
padding : List[int],
stride : List[int]) -> Tensor
torch._C._nn.col2im(self : Tensor,
output_size : List[int],
kernel_size : List[int],
dilation : List[int],
padding : List[int],
stride : List[int],
out : Tensor) -> Tensor
torch._C._nn.conv_depthwise3d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int],
out : Tensor) -> Tensor
torch._C._nn.conv_depthwise3d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int],
padding : List[int],
dilation : List[int]) -> Tensor
torch._C._nn.cross_entropy_loss(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
reduction : int=1,
ignore_index : int=-100,
label_smoothing : float=0.0) -> Tensor
torch._C._nn.elu(self : Tensor,
alpha : number=1,
scale : number=1,
input_scale : number=1) -> Tensor
torch._C._nn.elu(self : Tensor,
alpha : number=1,
scale : number=1,
input_scale : number=1,
out : Tensor) -> Tensor
torch._C._nn.elu_(self : Tensor,
alpha : number=1,
scale : number=1,
input_scale : number=1) -> Tensor
torch._C._nn.flatten_dense_tensors(tensors : List[Tensor]) -> Tensor
torch._C._nn.fractional_max_pool2d(self : Tensor,
kernel_size : List[int],
output_size : List[int],
random_samples : Tensor) -> Tuple[Tensor, Tensor]
torch._C._nn.fractional_max_pool2d(self : Tensor,
kernel_size : List[int],
output_size : List[int],
random_samples : Tensor,
output : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch._C._nn.fractional_max_pool3d(self : Tensor,
kernel_size : List[int],
output_size : List[int],
random_samples : Tensor) -> Tuple[Tensor, Tensor]
torch._C._nn.fractional_max_pool3d(self : Tensor,
kernel_size : List[int],
output_size : List[int],
random_samples : Tensor,
output : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch._C._nn.gelu(self : Tensor,
approximate : str=none) -> Tensor
torch._C._nn.gelu(self : Tensor,
approximate : str=none,
out : Tensor) -> Tensor
torch._C._nn.gelu_(self : Tensor,
approximate : str=none) -> Tensor
torch._C._nn.glu(self : Tensor,
dim : int=-1) -> Tensor
torch._C._nn.glu(self : Tensor,
dim : int=-1,
out : Tensor) -> Tensor
torch._C._nn.hardsigmoid(self : Tensor) -> Tensor
torch._C._nn.hardsigmoid(self : Tensor,
out : Tensor) -> Tensor
torch._C._nn.hardsigmoid_(self : Tensor) -> Tensor
torch._C._nn.hardswish(self : Tensor) -> Tensor
torch._C._nn.hardswish(self : Tensor,
out : Tensor) -> Tensor
torch._C._nn.hardswish_(self : Tensor) -> Tensor
torch._C._nn.hardtanh(self : Tensor,
min_val : number=-1,
max_val : number=1) -> Tensor
torch._C._nn.hardtanh(self : Tensor,
min_val : number=-1,
max_val : number=1,
out : Tensor) -> Tensor
torch._C._nn.hardtanh_(self : Tensor,
min_val : number=-1,
max_val : number=1) -> Tensor
torch._C._nn.huber_loss(self : Tensor,
target : Tensor,
reduction : int=1,
delta : float=1.0) -> Tensor
torch._C._nn.huber_loss(self : Tensor,
target : Tensor,
reduction : int=1,
delta : float=1.0,
out : Tensor) -> Tensor
torch._C._nn.im2col(self : Tensor,
kernel_size : List[int],
dilation : List[int],
padding : List[int],
stride : List[int]) -> Tensor
torch._C._nn.im2col(self : Tensor,
kernel_size : List[int],
dilation : List[int],
padding : List[int],
stride : List[int],
out : Tensor) -> Tensor
torch._C._nn.l1_loss(self : Tensor,
target : Tensor,
reduction : int=1) -> Tensor
torch._C._nn.leaky_relu(self : Tensor,
negative_slope : number=0.01) -> Tensor
torch._C._nn.leaky_relu(self : Tensor,
negative_slope : number=0.01,
out : Tensor) -> Tensor
torch._C._nn.leaky_relu_(self : Tensor,
negative_slope : number=0.01) -> Tensor
torch._C._nn.linear(input : Tensor,
weight : Tensor,
bias : Optional[Tensor]) -> Tensor
torch._C._nn.linear(input : Tensor,
weight : Tensor,
bias : Optional[Tensor],
out : Tensor) -> Tensor
torch._C._nn.log_sigmoid(self : Tensor) -> Tensor
torch._C._nn.log_sigmoid(self : Tensor,
out : Tensor) -> Tensor
torch._C._nn.max_pool2d_with_indices(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
ceil_mode : bool=False) -> Tuple[Tensor, Tensor]
torch._C._nn.max_pool2d_with_indices(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
ceil_mode : bool=False,
out : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch._C._nn.max_pool3d_with_indices(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
ceil_mode : bool=False) -> Tuple[Tensor, Tensor]
torch._C._nn.max_pool3d_with_indices(self : Tensor,
kernel_size : List[int],
stride : List[int]=[],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
ceil_mode : bool=False,
out : Tensor,
indices : Tensor) -> Tuple[Tensor, Tensor]
torch._C._nn.max_unpool2d(self : Tensor,
indices : Tensor,
output_size : List[int]) -> Tensor
torch._C._nn.max_unpool2d(self : Tensor,
indices : Tensor,
output_size : List[int],
out : Tensor) -> Tensor
torch._C._nn.max_unpool3d(self : Tensor,
indices : Tensor,
output_size : List[int],
stride : List[int],
padding : List[int]) -> Tensor
torch._C._nn.max_unpool3d(self : Tensor,
indices : Tensor,
output_size : List[int],
stride : List[int],
padding : List[int],
out : Tensor) -> Tensor
torch._C._nn.mish(self : Tensor) -> Tensor
torch._C._nn.mish(self : Tensor,
out : Tensor) -> Tensor
torch._C._nn.mish_(self : Tensor) -> Tensor
torch._C._nn.mkldnn_linear(self : Tensor,
weight : Tensor,
bias : Optional[Tensor],
out : Tensor) -> Tensor
torch._C._nn.mkldnn_linear(self : Tensor,
weight : Tensor,
bias : Optional[Tensor]) -> Tensor
torch._C._nn.mkldnn_reorder_conv2d_weight(self : Tensor,
padding : List[int]=[0, 0],
stride : List[int]=[1, 1],
dilation : List[int]=[1, 1],
groups : int=1,
input_size : Optional[List[int]],
out : Tensor) -> Tensor
torch._C._nn.mkldnn_reorder_conv2d_weight(self : Tensor,
padding : List[int]=[0, 0],
stride : List[int]=[1, 1],
dilation : List[int]=[1, 1],
groups : int=1,
input_size : Optional[List[int]]) -> Tensor
torch._C._nn.mkldnn_reorder_conv3d_weight(self : Tensor,
padding : List[int]=[0, 0, 0],
stride : List[int]=[1, 1, 1],
dilation : List[int]=[1, 1, 1],
groups : int=1,
input_size : Optional[List[int]],
out : Tensor) -> Tensor
torch._C._nn.mkldnn_reorder_conv3d_weight(self : Tensor,
padding : List[int]=[0, 0, 0],
stride : List[int]=[1, 1, 1],
dilation : List[int]=[1, 1, 1],
groups : int=1,
input_size : Optional[List[int]]) -> Tensor
torch._C._nn.mse_loss(self : Tensor,
target : Tensor,
reduction : int=1) -> Tensor
torch._C._nn.mse_loss(self : Tensor,
target : Tensor,
reduction : int=1,
out : Tensor) -> Tensor
torch._C._nn.multi_margin_loss(self : Tensor,
target : Tensor,
p : number=1,
margin : number=1,
weight : Optional[Tensor],
reduction : int=1) -> Tensor
torch._C._nn.multi_margin_loss(self : Tensor,
target : Tensor,
p : number=1,
margin : number=1,
weight : Optional[Tensor],
reduction : int=1,
out : Tensor) -> Tensor
torch._C._nn.multilabel_margin_loss(self : Tensor,
target : Tensor,
reduction : int=1) -> Tensor
torch._C._nn.multilabel_margin_loss(self : Tensor,
target : Tensor,
reduction : int=1,
out : Tensor) -> Tensor
torch._C._nn.nll_loss(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
reduction : int=1,
ignore_index : int=-100) -> Tensor
torch._C._nn.nll_loss(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
reduction : int=1,
ignore_index : int=-100,
out : Tensor) -> Tensor
torch._C._nn.nll_loss2d(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
reduction : int=1,
ignore_index : int=-100) -> Tensor
torch._C._nn.nll_loss2d(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
reduction : int=1,
ignore_index : int=-100,
out : Tensor) -> Tensor
torch._C._nn.nll_loss_nd(self : Tensor,
target : Tensor,
weight : Optional[Tensor],
reduction : int=1,
ignore_index : int=-100) -> Tensor
torch._C._nn.one_hot(self : Tensor,
num_classes : int=-1) -> Tensor
torch._C._nn.pad(self : Tensor,
pad : List[int],
mode : str=constant,
value : Optional[float]) -> Tensor
torch._C._nn.pad_sequence(sequences : List[Tensor],
batch_first : bool=False,
padding_value : float=0.0,
padding_side : str=right) -> Tensor
torch._C._nn.reflection_pad1d(self : Tensor,
padding : List[int]) -> Tensor
torch._C._nn.reflection_pad1d(self : Tensor,
padding : List[int],
out : Tensor) -> Tensor
torch._C._nn.reflection_pad2d(self : Tensor,
padding : List[int]) -> Tensor
torch._C._nn.reflection_pad2d(self : Tensor,
padding : List[int],
out : Tensor) -> Tensor
torch._C._nn.reflection_pad3d(self : Tensor,
padding : List[int]) -> Tensor
torch._C._nn.reflection_pad3d(self : Tensor,
padding : List[int],
out : Tensor) -> Tensor
torch._C._nn.relu6(self : Tensor) -> Tensor
torch._C._nn.relu6_(self : Tensor) -> Tensor
torch._C._nn.replication_pad1d(self : Tensor,
padding : List[int]) -> Tensor
torch._C._nn.replication_pad1d(self : Tensor,
padding : List[int],
out : Tensor) -> Tensor
torch._C._nn.replication_pad2d(self : Tensor,
padding : List[int]) -> Tensor
torch._C._nn.replication_pad2d(self : Tensor,
padding : List[int],
out : Tensor) -> Tensor
torch._C._nn.replication_pad3d(self : Tensor,
padding : List[int]) -> Tensor
torch._C._nn.replication_pad3d(self : Tensor,
padding : List[int],
out : Tensor) -> Tensor
torch._C._nn.rrelu_with_noise(self : Tensor,
noise : Tensor,
lower : number=0.125,
upper : number=0.3333333333333333,
training : bool=False,
generator : Optional[Generator]) -> Tensor
torch._C._nn.rrelu_with_noise(self : Tensor,
noise : Tensor,
lower : number=0.125,
upper : number=0.3333333333333333,
training : bool=False,
generator : Optional[Generator],
out : Tensor) -> Tensor
torch._C._nn.rrelu_with_noise_(self : Tensor,
noise : Tensor,
lower : number=0.125,
upper : number=0.3333333333333333,
training : bool=False,
generator : Optional[Generator]) -> Tensor
torch._C._nn.scaled_dot_product_attention(query : Tensor,
key : Tensor,
value : Tensor,
attn_mask : Optional[Tensor],
dropout_p : float=0.0,
is_causal : bool=False,
scale : Optional[float],
enable_gqa : bool=False) -> Tensor
torch._C._nn.silu(self : Tensor) -> Tensor
torch._C._nn.silu(self : Tensor,
out : Tensor) -> Tensor
torch._C._nn.silu_(self : Tensor) -> Tensor
torch._C._nn.slow_conv3d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : List[int]=[0, 0, 0]) -> Tensor
torch._C._nn.slow_conv3d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : List[int]=[0, 0, 0],
out : Tensor) -> Tensor
torch._C._nn.slow_conv_dilated2d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1]) -> Tensor
torch._C._nn.slow_conv_dilated2d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
out : Tensor) -> Tensor
torch._C._nn.slow_conv_dilated3d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1]) -> Tensor
torch._C._nn.slow_conv_dilated3d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
out : Tensor) -> Tensor
torch._C._nn.slow_conv_transpose2d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : List[int]=[0, 0],
output_padding : List[int]=[0, 0],
dilation : List[int]=[1, 1]) -> Tensor
torch._C._nn.slow_conv_transpose2d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : List[int]=[0, 0],
output_padding : List[int]=[0, 0],
dilation : List[int]=[1, 1],
out : Tensor) -> Tensor
torch._C._nn.slow_conv_transpose3d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : List[int]=[0, 0, 0],
output_padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1]) -> Tensor
torch._C._nn.slow_conv_transpose3d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1, 1],
padding : List[int]=[0, 0, 0],
output_padding : List[int]=[0, 0, 0],
dilation : List[int]=[1, 1, 1],
out : Tensor) -> Tensor
torch._C._nn.smooth_l1_loss(self : Tensor,
target : Tensor,
reduction : int=1,
beta : float=1.0) -> Tensor
torch._C._nn.smooth_l1_loss(self : Tensor,
target : Tensor,
reduction : int=1,
beta : float=1.0,
out : Tensor) -> Tensor
torch._C._nn.soft_margin_loss(self : Tensor,
target : Tensor,
reduction : int=1) -> Tensor
torch._C._nn.soft_margin_loss(self : Tensor,
target : Tensor,
reduction : int=1,
out : Tensor) -> Tensor
torch._C._nn.softplus(self : Tensor,
beta : number=1,
threshold : number=20) -> Tensor
torch._C._nn.softplus(self : Tensor,
beta : number=1,
threshold : number=20,
out : Tensor) -> Tensor
torch._C._nn.softshrink(self : Tensor,
lambd : number=0.5) -> Tensor
torch._C._nn.softshrink(self : Tensor,
lambd : number=0.5,
out : Tensor) -> Tensor
torch._C._nn.thnn_conv2d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : List[int]=[0, 0]) -> Tensor
torch._C._nn.thnn_conv2d(self : Tensor,
weight : Tensor,
kernel_size : List[int],
bias : Optional[Tensor],
stride : List[int]=[1, 1],
padding : List[int]=[0, 0],
out : Tensor) -> Tensor
torch._C._nn.unflatten_dense_tensors(flat : Tensor,
tensors : List[Tensor]) -> List[Tensor]
torch._C._nn.upsample_bicubic2d(self : Tensor,
output_size : List[int],
align_corners : bool,
scales_h : Optional[float],
scales_w : Optional[float]) -> Tensor
torch._C._nn.upsample_bicubic2d(input : Tensor,
output_size : Optional[List[int]],
align_corners : bool,
scale_factors : Optional[List[float]]) -> Tensor
torch._C._nn.upsample_bicubic2d(self : Tensor,
output_size : List[int],
align_corners : bool,
scales_h : Optional[float],
scales_w : Optional[float],
out : Tensor) -> Tensor
torch._C._nn.upsample_bilinear2d(self : Tensor,
output_size : List[int],
align_corners : bool,
scales_h : Optional[float],
scales_w : Optional[float]) -> Tensor
torch._C._nn.upsample_bilinear2d(input : Tensor,
output_size : Optional[List[int]],
align_corners : bool,
scale_factors : Optional[List[float]]) -> Tensor
torch._C._nn.upsample_bilinear2d(self : Tensor,
output_size : List[int],
align_corners : bool,
scales_h : Optional[float],
scales_w : Optional[float],
out : Tensor) -> Tensor
torch._C._nn.upsample_bilinear2d(input : Tensor,
output_size : Optional[List[int]],
align_corners : bool,
scale_factors : Optional[List[float]],
out : Tensor) -> Tensor
torch._C._nn.upsample_linear1d(self : Tensor,
output_size : List[int],
align_corners : bool,
scales : Optional[float]) -> Tensor
torch._C._nn.upsample_linear1d(input : Tensor,
output_size : Optional[List[int]],
align_corners : bool,
scale_factors : Optional[List[float]]) -> Tensor
torch._C._nn.upsample_linear1d(self : Tensor,
output_size : List[int],
align_corners : bool,
scales : Optional[float],
out : Tensor) -> Tensor
torch._C._nn.upsample_nearest1d(self : Tensor,
output_size : List[int],
scales : Optional[float]) -> Tensor
torch._C._nn.upsample_nearest1d(input : Tensor,
output_size : Optional[List[int]],
scale_factors : Optional[List[float]]) -> Tensor
torch._C._nn.upsample_nearest1d(self : Tensor,
output_size : List[int],
scales : Optional[float],
out : Tensor) -> Tensor
torch._C._nn.upsample_nearest2d(self : Tensor,
output_size : List[int],
scales_h : Optional[float],
scales_w : Optional[float]) -> Tensor
torch._C._nn.upsample_nearest2d(input : Tensor,
output_size : Optional[List[int]],
scale_factors : Optional[List[float]]) -> Tensor
torch._C._nn.upsample_nearest2d(self : Tensor,
output_size : List[int],
scales_h : Optional[float],
scales_w : Optional[float],
out : Tensor) -> Tensor
torch._C._nn.upsample_nearest2d(input : Tensor,
output_size : Optional[List[int]],
scale_factors : Optional[List[float]],
out : Tensor) -> Tensor
torch._C._nn.upsample_nearest3d(self : Tensor,
output_size : List[int],
scales_d : Optional[float],
scales_h : Optional[float],
scales_w : Optional[float]) -> Tensor
torch._C._nn.upsample_nearest3d(input : Tensor,
output_size : Optional[List[int]],
scale_factors : Optional[List[float]]) -> Tensor
torch._C._nn.upsample_nearest3d(self : Tensor,
output_size : List[int],
scales_d : Optional[float],
scales_h : Optional[float],
scales_w : Optional[float],
out : Tensor) -> Tensor
torch._C._nn.upsample_trilinear3d(self : Tensor,
output_size : List[int],
align_corners : bool,
scales_d : Optional[float],
scales_h : Optional[float],
scales_w : Optional[float]) -> Tensor
torch._C._nn.upsample_trilinear3d(input : Tensor,
output_size : Optional[List[int]],
align_corners : bool,
scale_factors : Optional[List[float]]) -> Tensor
torch._C._nn.upsample_trilinear3d(self : Tensor,
output_size : List[int],
align_corners : bool,
scales_d : Optional[float],
scales_h : Optional[float],
scales_w : Optional[float],
out : Tensor) -> Tensor
torch._C._fft.fft_fft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str]) -> Tensor
torch._C._fft.fft_fft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_fft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_fft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_fftfreq(n : int,
d : float=1.0,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch._C._fft.fft_fftfreq(n : int,
d : float=1.0,
out : Tensor) -> Tensor
torch._C._fft.fft_fftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_fftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_fftshift(self : Tensor,
dim : Optional[List[int]]) -> Tensor
torch._C._fft.fft_hfft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str]) -> Tensor
torch._C._fft.fft_hfft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_hfft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_hfft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_hfftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_hfftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_ifft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str]) -> Tensor
torch._C._fft.fft_ifft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_ifft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_ifft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_ifftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_ifftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_ifftshift(self : Tensor,
dim : Optional[List[int]]) -> Tensor
torch._C._fft.fft_ihfft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str]) -> Tensor
torch._C._fft.fft_ihfft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_ihfft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_ihfft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_ihfftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_ihfftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_irfft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str]) -> Tensor
torch._C._fft.fft_irfft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_irfft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_irfft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_irfftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_irfftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_rfft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str]) -> Tensor
torch._C._fft.fft_rfft(self : Tensor,
n : Optional[int],
dim : int=-1,
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_rfft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_rfft2(self : Tensor,
s : Optional[List[int]],
dim : List[int]=[-2, -1],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._fft.fft_rfftfreq(n : int,
d : float=1.0,
dtype : Optional[int],
layout : Optional[int],
device : Optional[Device],
pin_memory : Optional[bool]) -> Tensor
torch._C._fft.fft_rfftfreq(n : int,
d : float=1.0,
out : Tensor) -> Tensor
torch._C._fft.fft_rfftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str]) -> Tensor
torch._C._fft.fft_rfftn(self : Tensor,
s : Optional[List[int]],
dim : Optional[List[int]],
norm : Optional[str],
out : Tensor) -> Tensor
torch._C._linalg.linalg_cholesky(self : Tensor,
upper : bool=False) -> Tensor
torch._C._linalg.linalg_cholesky(self : Tensor,
upper : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_cholesky_ex(self : Tensor,
upper : bool=False,
check_errors : bool=False) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_cholesky_ex(self : Tensor,
upper : bool=False,
check_errors : bool=False,
L : Tensor,
info : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_cond(self : Tensor,
p : Optional[number]) -> Tensor
torch._C._linalg.linalg_cond(self : Tensor,
p : str) -> Tensor
torch._C._linalg.linalg_cond(self : Tensor,
p : Optional[number],
out : Tensor) -> Tensor
torch._C._linalg.linalg_cond(self : Tensor,
p : str,
out : Tensor) -> Tensor
torch._C._linalg.linalg_cross(self : Tensor,
other : Tensor,
dim : int=-1) -> Tensor
torch._C._linalg.linalg_cross(self : Tensor,
other : Tensor,
dim : int=-1,
out : Tensor) -> Tensor
torch._C._linalg.linalg_det(A : Tensor) -> Tensor
torch._C._linalg.linalg_det(A : Tensor,
out : Tensor) -> Tensor
torch._C._linalg.linalg_diagonal(A : Tensor,
offset : int=0,
dim1 : int=-2,
dim2 : int=-1) -> Tensor
torch._C._linalg.linalg_eig(self : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_eig(self : Tensor,
eigenvalues : Tensor,
eigenvectors : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_eigh(self : Tensor,
UPLO : str=L) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_eigh(self : Tensor,
UPLO : str=L,
eigvals : Tensor,
eigvecs : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_eigvals(self : Tensor) -> Tensor
torch._C._linalg.linalg_eigvals(self : Tensor,
out : Tensor) -> Tensor
torch._C._linalg.linalg_eigvalsh(self : Tensor,
UPLO : str=L) -> Tensor
torch._C._linalg.linalg_eigvalsh(self : Tensor,
UPLO : str=L,
out : Tensor) -> Tensor
torch._C._linalg.linalg_householder_product(input : Tensor,
tau : Tensor) -> Tensor
torch._C._linalg.linalg_householder_product(input : Tensor,
tau : Tensor,
out : Tensor) -> Tensor
torch._C._linalg.linalg_inv(A : Tensor) -> Tensor
torch._C._linalg.linalg_inv(A : Tensor,
out : Tensor) -> Tensor
torch._C._linalg.linalg_inv_ex(A : Tensor,
check_errors : bool=False) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_inv_ex(A : Tensor,
check_errors : bool=False,
inverse : Tensor,
info : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_ldl_factor(self : Tensor,
hermitian : bool=False) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_ldl_factor(self : Tensor,
hermitian : bool=False,
LD : Tensor,
pivots : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_ldl_factor_ex(self : Tensor,
hermitian : bool=False,
check_errors : bool=False) -> Tuple[Tensor, Tensor, Tensor]
torch._C._linalg.linalg_ldl_factor_ex(self : Tensor,
hermitian : bool=False,
check_errors : bool=False,
LD : Tensor,
pivots : Tensor,
info : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch._C._linalg.linalg_ldl_solve(LD : Tensor,
pivots : Tensor,
B : Tensor,
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_ldl_solve(LD : Tensor,
pivots : Tensor,
B : Tensor,
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_lstsq(self : Tensor,
b : Tensor,
rcond : Optional[float],
driver : Optional[str]) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch._C._linalg.linalg_lstsq(self : Tensor,
b : Tensor,
rcond : Optional[float],
driver : Optional[str],
solution : Tensor,
residuals : Tensor,
rank : Tensor,
singular_values : Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]
torch._C._linalg.linalg_lu(A : Tensor,
pivot : bool=True) -> Tuple[Tensor, Tensor, Tensor]
torch._C._linalg.linalg_lu(A : Tensor,
pivot : bool=True,
P : Tensor,
L : Tensor,
U : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch._C._linalg.linalg_lu_factor(A : Tensor,
pivot : bool=True) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_lu_factor(A : Tensor,
pivot : bool=True,
LU : Tensor,
pivots : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_lu_factor_ex(A : Tensor,
pivot : bool=True,
check_errors : bool=False) -> Tuple[Tensor, Tensor, Tensor]
torch._C._linalg.linalg_lu_factor_ex(A : Tensor,
pivot : bool=True,
check_errors : bool=False,
LU : Tensor,
pivots : Tensor,
info : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch._C._linalg.linalg_lu_solve(LU : Tensor,
pivots : Tensor,
B : Tensor,
left : bool=True,
adjoint : bool=False) -> Tensor
torch._C._linalg.linalg_lu_solve(LU : Tensor,
pivots : Tensor,
B : Tensor,
left : bool=True,
adjoint : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_matmul(self : Tensor,
other : Tensor) -> Tensor
torch._C._linalg.linalg_matmul(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_exp(self : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_exp(self : Tensor,
out : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_norm(self : Tensor,
ord : number,
dim : List[int]=[-2, -1],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch._C._linalg.linalg_matrix_norm(self : Tensor,
ord : str=fro,
dim : List[int]=[-2, -1],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch._C._linalg.linalg_matrix_norm(self : Tensor,
ord : number,
dim : List[int]=[-2, -1],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_norm(self : Tensor,
ord : str=fro,
dim : List[int]=[-2, -1],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_power(self : Tensor,
n : int) -> Tensor
torch._C._linalg.linalg_matrix_power(self : Tensor,
n : int,
out : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_rank(self : Tensor,
tol : float,
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_matrix_rank(input : Tensor,
tol : Tensor,
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_matrix_rank(input : Tensor,
atol : Optional[Tensor],
rtol : Optional[Tensor],
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_matrix_rank(self : Tensor,
atol : Optional[float],
rtol : Optional[float],
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_matrix_rank(input : Tensor,
atol : Optional[Tensor],
rtol : Optional[Tensor],
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_rank(self : Tensor,
atol : Optional[float],
rtol : Optional[float],
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_rank(self : Tensor,
tol : float,
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_matrix_rank(input : Tensor,
tol : Tensor,
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_multi_dot(tensors : List[Tensor]) -> Tensor
torch._C._linalg.linalg_multi_dot(tensors : List[Tensor],
out : Tensor) -> Tensor
torch._C._linalg.linalg_norm(self : Tensor,
ord : Optional[number],
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch._C._linalg.linalg_norm(self : Tensor,
ord : str,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch._C._linalg.linalg_norm(self : Tensor,
ord : Optional[number],
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch._C._linalg.linalg_norm(self : Tensor,
ord : str,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch._C._linalg.linalg_pinv(self : Tensor,
rcond : float,
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_pinv(self : Tensor,
atol : Optional[float],
rtol : Optional[float],
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_pinv(self : Tensor,
atol : Optional[Tensor],
rtol : Optional[Tensor],
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_pinv(self : Tensor,
atol : Optional[Tensor],
rtol : Optional[Tensor],
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_pinv(self : Tensor,
atol : Optional[float],
rtol : Optional[float],
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_pinv(self : Tensor,
rcond : float,
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_pinv(self : Tensor,
rcond : Tensor,
hermitian : bool=False) -> Tensor
torch._C._linalg.linalg_pinv(self : Tensor,
rcond : Tensor,
hermitian : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_qr(A : Tensor,
mode : str=reduced) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_qr(A : Tensor,
mode : str=reduced,
Q : Tensor,
R : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_slogdet(A : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_slogdet(A : Tensor,
sign : Tensor,
logabsdet : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_solve(A : Tensor,
B : Tensor,
left : bool=True) -> Tensor
torch._C._linalg.linalg_solve(A : Tensor,
B : Tensor,
left : bool=True,
out : Tensor) -> Tensor
torch._C._linalg.linalg_solve_ex(A : Tensor,
B : Tensor,
left : bool=True,
check_errors : bool=False) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_solve_ex(A : Tensor,
B : Tensor,
left : bool=True,
check_errors : bool=False,
result : Tensor,
info : Tensor) -> Tuple[Tensor, Tensor]
torch._C._linalg.linalg_solve_triangular(self : Tensor,
B : Tensor,
upper : bool,
left : bool=True,
unitriangular : bool=False) -> Tensor
torch._C._linalg.linalg_solve_triangular(self : Tensor,
B : Tensor,
upper : bool,
left : bool=True,
unitriangular : bool=False,
out : Tensor) -> Tensor
torch._C._linalg.linalg_svd(A : Tensor,
full_matrices : bool=True,
driver : Optional[str]) -> Tuple[Tensor, Tensor, Tensor]
torch._C._linalg.linalg_svd(A : Tensor,
full_matrices : bool=True,
driver : Optional[str],
U : Tensor,
S : Tensor,
Vh : Tensor) -> Tuple[Tensor, Tensor, Tensor]
torch._C._linalg.linalg_svdvals(A : Tensor,
driver : Optional[str]) -> Tensor
torch._C._linalg.linalg_svdvals(A : Tensor,
driver : Optional[str],
out : Tensor) -> Tensor
torch._C._linalg.linalg_tensorinv(self : Tensor,
ind : int=2) -> Tensor
torch._C._linalg.linalg_tensorinv(self : Tensor,
ind : int=2,
out : Tensor) -> Tensor
torch._C._linalg.linalg_tensorsolve(self : Tensor,
other : Tensor,
dims : Optional[List[int]]) -> Tensor
torch._C._linalg.linalg_tensorsolve(self : Tensor,
other : Tensor,
dims : Optional[List[int]],
out : Tensor) -> Tensor
torch._C._linalg.linalg_vander(x : Tensor,
N : Optional[int]) -> Tensor
torch._C._linalg.linalg_vecdot(x : Tensor,
y : Tensor,
dim : int=-1) -> Tensor
torch._C._linalg.linalg_vecdot(x : Tensor,
y : Tensor,
dim : int=-1,
out : Tensor) -> Tensor
torch._C._linalg.linalg_vector_norm(self : Tensor,
ord : number=2,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int]) -> Tensor
torch._C._linalg.linalg_vector_norm(self : Tensor,
ord : number=2,
dim : Optional[List[int]],
keepdim : bool=False,
dtype : Optional[int],
out : Tensor) -> Tensor
torch._C._nested.nested_to_padded_tensor(self : Tensor,
padding : float,
output_size : Optional[List[int]]) -> Tensor
torch._C._sparse.sparse_sampled_addmm(self : Tensor,
mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1,
out : Tensor) -> Tensor
torch._C._sparse.sparse_sampled_addmm(self : Tensor,
mat1 : Tensor,
mat2 : Tensor,
beta : number=1,
alpha : number=1) -> Tensor
torch._C._special.special_airy_ai(x : Tensor) -> Tensor
torch._C._special.special_airy_ai(x : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_bessel_j0(self : Tensor) -> Tensor
torch._C._special.special_bessel_j0(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_bessel_j1(self : Tensor) -> Tensor
torch._C._special.special_bessel_j1(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_bessel_y0(self : Tensor) -> Tensor
torch._C._special.special_bessel_y0(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_bessel_y1(self : Tensor) -> Tensor
torch._C._special.special_bessel_y1(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_t(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_t(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_t(x : number,
n : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_t(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_t(x : Tensor,
n : number) -> Tensor
torch._C._special.special_chebyshev_polynomial_t(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_u(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_u(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_u(x : number,
n : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_u(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_u(x : Tensor,
n : number) -> Tensor
torch._C._special.special_chebyshev_polynomial_u(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_v(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_v(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_v(x : number,
n : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_v(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_v(x : Tensor,
n : number) -> Tensor
torch._C._special.special_chebyshev_polynomial_v(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_w(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_w(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_w(x : number,
n : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_w(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_chebyshev_polynomial_w(x : Tensor,
n : number) -> Tensor
torch._C._special.special_chebyshev_polynomial_w(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_digamma(self : Tensor) -> Tensor
torch._C._special.special_digamma(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_entr(self : Tensor) -> Tensor
torch._C._special.special_entr(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_erf(self : Tensor) -> Tensor
torch._C._special.special_erf(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_erfc(self : Tensor) -> Tensor
torch._C._special.special_erfc(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_erfcx(self : Tensor) -> Tensor
torch._C._special.special_erfcx(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_erfinv(self : Tensor) -> Tensor
torch._C._special.special_erfinv(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_exp2(self : Tensor) -> Tensor
torch._C._special.special_exp2(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_expit(self : Tensor) -> Tensor
torch._C._special.special_expit(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_expm1(self : Tensor) -> Tensor
torch._C._special.special_expm1(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_gammainc(self : Tensor,
other : Tensor) -> Tensor
torch._C._special.special_gammainc(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_gammaincc(self : Tensor,
other : Tensor) -> Tensor
torch._C._special.special_gammaincc(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_gammaln(self : Tensor) -> Tensor
torch._C._special.special_gammaln(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_h(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_h(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_h(x : number,
n : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_h(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_h(x : Tensor,
n : number) -> Tensor
torch._C._special.special_hermite_polynomial_h(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_he(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_he(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_he(x : number,
n : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_he(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_hermite_polynomial_he(x : Tensor,
n : number) -> Tensor
torch._C._special.special_hermite_polynomial_he(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_i0(self : Tensor) -> Tensor
torch._C._special.special_i0(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_i0e(self : Tensor) -> Tensor
torch._C._special.special_i0e(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_i1(self : Tensor) -> Tensor
torch._C._special.special_i1(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_i1e(self : Tensor) -> Tensor
torch._C._special.special_i1e(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_laguerre_polynomial_l(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_laguerre_polynomial_l(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_laguerre_polynomial_l(x : number,
n : Tensor) -> Tensor
torch._C._special.special_laguerre_polynomial_l(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_laguerre_polynomial_l(x : Tensor,
n : number) -> Tensor
torch._C._special.special_laguerre_polynomial_l(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_legendre_polynomial_p(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_legendre_polynomial_p(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_legendre_polynomial_p(x : number,
n : Tensor) -> Tensor
torch._C._special.special_legendre_polynomial_p(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_legendre_polynomial_p(x : Tensor,
n : number) -> Tensor
torch._C._special.special_legendre_polynomial_p(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_log1p(self : Tensor) -> Tensor
torch._C._special.special_log1p(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_log_ndtr(self : Tensor) -> Tensor
torch._C._special.special_log_ndtr(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_log_softmax(self : Tensor,
dim : int,
dtype : Optional[int]) -> Tensor
torch._C._special.special_logit(self : Tensor,
eps : Optional[float]) -> Tensor
torch._C._special.special_logit(self : Tensor,
eps : Optional[float],
out : Tensor) -> Tensor
torch._C._special.special_logsumexp(self : Tensor,
dim : List[int],
keepdim : bool=False) -> Tensor
torch._C._special.special_logsumexp(self : Tensor,
dim : List[int],
keepdim : bool=False,
out : Tensor) -> Tensor
torch._C._special.special_modified_bessel_i0(self : Tensor) -> Tensor
torch._C._special.special_modified_bessel_i0(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_modified_bessel_i1(self : Tensor) -> Tensor
torch._C._special.special_modified_bessel_i1(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_modified_bessel_k0(self : Tensor) -> Tensor
torch._C._special.special_modified_bessel_k0(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_modified_bessel_k1(self : Tensor) -> Tensor
torch._C._special.special_modified_bessel_k1(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_multigammaln(self : Tensor,
p : int) -> Tensor
torch._C._special.special_multigammaln(self : Tensor,
p : int,
out : Tensor) -> Tensor
torch._C._special.special_ndtr(self : Tensor) -> Tensor
torch._C._special.special_ndtr(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_ndtri(self : Tensor) -> Tensor
torch._C._special.special_ndtri(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_polygamma(n : int,
self : Tensor) -> Tensor
torch._C._special.special_polygamma(n : int,
self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_psi(self : Tensor) -> Tensor
torch._C._special.special_psi(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_round(self : Tensor,
decimals : int=0) -> Tensor
torch._C._special.special_round(self : Tensor,
decimals : int=0,
out : Tensor) -> Tensor
torch._C._special.special_scaled_modified_bessel_k0(x : Tensor) -> Tensor
torch._C._special.special_scaled_modified_bessel_k0(x : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_scaled_modified_bessel_k1(x : Tensor) -> Tensor
torch._C._special.special_scaled_modified_bessel_k1(x : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_t(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_t(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_t(x : number,
n : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_t(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_t(x : Tensor,
n : number) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_t(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_u(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_u(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_u(x : number,
n : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_u(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_u(x : Tensor,
n : number) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_u(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_v(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_v(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_v(x : number,
n : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_v(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_v(x : Tensor,
n : number) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_v(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_w(x : Tensor,
n : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_w(x : Tensor,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_w(x : number,
n : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_w(x : number,
n : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_w(x : Tensor,
n : number) -> Tensor
torch._C._special.special_shifted_chebyshev_polynomial_w(x : Tensor,
n : number,
out : Tensor) -> Tensor
torch._C._special.special_sinc(self : Tensor) -> Tensor
torch._C._special.special_sinc(self : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_softmax(self : Tensor,
dim : int,
dtype : Optional[int]) -> Tensor
torch._C._special.special_spherical_bessel_j0(x : Tensor) -> Tensor
torch._C._special.special_spherical_bessel_j0(x : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_xlog1py(self : Tensor,
other : Tensor) -> Tensor
torch._C._special.special_xlog1py(self : Tensor,
other : number) -> Tensor
torch._C._special.special_xlog1py(self : number,
other : Tensor) -> Tensor
torch._C._special.special_xlog1py(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_xlog1py(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_xlog1py(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch._C._special.special_xlogy(self : Tensor,
other : Tensor) -> Tensor
torch._C._special.special_xlogy(self : Tensor,
other : number) -> Tensor
torch._C._special.special_xlogy(self : number,
other : Tensor) -> Tensor
torch._C._special.special_xlogy(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_xlogy(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_xlogy(self : Tensor,
other : number,
out : Tensor) -> Tensor
torch._C._special.special_zeta(self : Tensor,
other : Tensor) -> Tensor
torch._C._special.special_zeta(self : Tensor,
other : number) -> Tensor
torch._C._special.special_zeta(self : number,
other : Tensor) -> Tensor
torch._C._special.special_zeta(self : Tensor,
other : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_zeta(self : number,
other : Tensor,
out : Tensor) -> Tensor
torch._C._special.special_zeta(self : Tensor,
other : number,
out : Tensor) -> Tensor
TorchScript Builtin Functions¶
collections.OrderedDict() -> Dict[str, Tensor]
collections.OrderedDict(inputs : List[Tuple[str, tVal]]) -> Dict[str, tVal]
collections.OrderedDict(self : Dict[str, t]) -> Dict[str, t]
collections.OrderedDict(inputs : List[Tuple[int, tVal]]) -> Dict[int, tVal]
collections.OrderedDict(self : Dict[int, t]) -> Dict[int, t]
collections.OrderedDict(inputs : List[Tuple[bool, tVal]]) -> Dict[bool, tVal]
collections.OrderedDict(self : Dict[bool, t]) -> Dict[bool, t]
collections.OrderedDict(inputs : List[Tuple[float, tVal]]) -> Dict[float, tVal]
collections.OrderedDict(self : Dict[float, t]) -> Dict[float, t]
collections.OrderedDict(inputs : List[Tuple[complex, tVal]]) -> Dict[complex, tVal]
collections.OrderedDict(self : Dict[complex, t]) -> Dict[complex, t]
collections.OrderedDict(inputs : List[Tuple[Tensor, tVal]]) -> Dict[Tensor, tVal]
collections.OrderedDict(self : Dict[Tensor, t]) -> Dict[Tensor, t]
builtins.dict() -> Dict[str, Tensor]
builtins.dict(inputs : List[Tuple[str, tVal]]) -> Dict[str, tVal]
builtins.dict(self : Dict[str, t]) -> Dict[str, t]
builtins.dict(inputs : List[Tuple[int, tVal]]) -> Dict[int, tVal]
builtins.dict(self : Dict[int, t]) -> Dict[int, t]
builtins.dict(inputs : List[Tuple[bool, tVal]]) -> Dict[bool, tVal]
builtins.dict(self : Dict[bool, t]) -> Dict[bool, t]
builtins.dict(inputs : List[Tuple[float, tVal]]) -> Dict[float, tVal]
builtins.dict(self : Dict[float, t]) -> Dict[float, t]
builtins.dict(inputs : List[Tuple[complex, tVal]]) -> Dict[complex, tVal]
builtins.dict(self : Dict[complex, t]) -> Dict[complex, t]
builtins.dict(inputs : List[Tuple[Tensor, tVal]]) -> Dict[Tensor, tVal]
builtins.dict(self : Dict[Tensor, t]) -> Dict[Tensor, t]
torch.backends.cudnn.is_acceptable(self : Tensor) -> bool
cmath.isnan(self : Tensor) -> Tensor
cmath.isnan(self : Tensor,
out : Tensor) -> Tensor
cmath.isnan(a : float) -> bool
cmath.isnan(a : complex) -> bool
cmath.isfinite(self : Tensor) -> Tensor
cmath.isfinite(a : float) -> bool
cmath.isfinite(a : complex) -> bool
cmath.isinf(self : Tensor) -> Tensor
cmath.isinf(self : Tensor,
out : Tensor) -> Tensor
cmath.isinf(a : float) -> bool
cmath.isinf(a : complex) -> bool
cmath.phase(self : Tensor) -> Tensor
cmath.phase(self : Tensor,
out : Tensor) -> Tensor
cmath.phase(a : int) -> float
cmath.phase(a : float) -> float
cmath.phase(a : complex) -> float
cmath.phase(a : number) -> number
cmath.rect(abs : Tensor,
angle : Tensor) -> Tensor
cmath.rect(abs : Tensor,
angle : Tensor,
out : Tensor) -> Tensor
cmath.rect(a : int,
b : int) -> complex
cmath.rect(a : float,
b : float) -> complex
cmath.rect(a : int,
b : float) -> complex
cmath.rect(a : float,
b : int) -> complex
cmath.rect(a : number,
b : number) -> number
cmath.log(self : Tensor) -> Tensor
cmath.log(self : Tensor,
out : Tensor) -> Tensor
cmath.log(a : int) -> float
cmath.log(a : float) -> float
cmath.log(a : complex) -> complex
cmath.log(a : number) -> number
cmath.log(a : int,
b : int) -> float
cmath.log(a : float,
b : float) -> float
cmath.log(a : complex,
b : complex) -> complex
cmath.log(a : int,
b : float) -> float
cmath.log(a : float,
b : int) -> float
cmath.log(a : int,
b : complex) -> complex
cmath.log(a : complex,
b : int) -> complex
cmath.log(a : float,
b : complex) -> complex
cmath.log(a : complex,
b : float) -> complex
cmath.log(a : number,
b : number) -> float
cmath.log10(self : Tensor) -> Tensor
cmath.log10(self : Tensor,
out : Tensor) -> Tensor
cmath.log10(a : int) -> float
cmath.log10(a : float) -> float
cmath.log10(a : complex) -> complex
cmath.log10(a : number) -> number
cmath.sqrt(self : Tensor) -> Tensor
cmath.sqrt(self : Tensor,
out : Tensor) -> Tensor
cmath.sqrt(a : int) -> float
cmath.sqrt(a : float) -> float
cmath.sqrt(a : complex) -> complex
cmath.sqrt(a : number) -> number
cmath.exp(self : Tensor) -> Tensor
cmath.exp(self : Tensor,
out : Tensor) -> Tensor
cmath.exp(a : int) -> float
cmath.exp(a : float) -> float
cmath.exp(a : complex) -> complex
cmath.exp(a : number) -> number
cmath.sin(self : Tensor) -> Tensor
cmath.sin(self : Tensor,
out : Tensor) -> Tensor
cmath.sin(a : int) -> float
cmath.sin(a : float) -> float
cmath.sin(a : complex) -> complex
cmath.sin(a : number) -> number
cmath.tan(self : Tensor) -> Tensor
cmath.tan(self : Tensor,
out : Tensor) -> Tensor
cmath.tan(a : int) -> float
cmath.tan(a : float) -> float
cmath.tan(a : complex) -> complex
cmath.tan(a : number) -> number
cmath.cos(self : Tensor) -> Tensor
cmath.cos(self : Tensor,
out : Tensor) -> Tensor
cmath.cos(a : int) -> float
cmath.cos(a : float) -> float
cmath.cos(a : complex) -> complex
cmath.cos(a : number) -> number
cmath.asin(self : Tensor) -> Tensor
cmath.asin(self : Tensor,
out : Tensor) -> Tensor
cmath.asin(a : int) -> float
cmath.asin(a : float) -> float
cmath.asin(a : complex) -> complex
cmath.asin(a : number) -> number
cmath.acos(self : Tensor) -> Tensor
cmath.acos(self : Tensor,
out : Tensor) -> Tensor
cmath.acos(a : int) -> float
cmath.acos(a : float) -> float
cmath.acos(a : complex) -> complex
cmath.acos(a : number) -> number
cmath.atan(self : Tensor) -> Tensor
cmath.atan(self : Tensor,
out : Tensor) -> Tensor
cmath.atan(a : int) -> float
cmath.atan(a : float) -> float
cmath.atan(a : complex) -> complex
cmath.atan(a : number) -> number
cmath.sinh(self : Tensor) -> Tensor
cmath.sinh(self : Tensor,
out : Tensor) -> Tensor
cmath.sinh(a : int) -> float
cmath.sinh(a : float) -> float
cmath.sinh(a : complex) -> complex
cmath.sinh(a : number) -> number
cmath.cosh(self : Tensor) -> Tensor
cmath.cosh(self : Tensor,
out : Tensor) -> Tensor
cmath.cosh(a : int) -> float
cmath.cosh(a : float) -> float
cmath.cosh(a : complex) -> complex
cmath.cosh(a : number) -> number
cmath.tanh(self : Tensor) -> Tensor
cmath.tanh(self : Tensor,
out : Tensor) -> Tensor
cmath.tanh(a : int) -> float
cmath.tanh(a : float) -> float
cmath.tanh(a : complex) -> complex
cmath.tanh(a : number) -> number
cmath.asinh(self : Tensor) -> Tensor
cmath.asinh(self : Tensor,
out : Tensor) -> Tensor
cmath.asinh(a : int) -> float
cmath.asinh(a : float) -> float
cmath.asinh(a : complex) -> complex
cmath.asinh(a : number) -> number
cmath.acosh(self : Tensor) -> Tensor
cmath.acosh(self : Tensor,
out : Tensor) -> Tensor
cmath.acosh(a : int) -> float
cmath.acosh(a : float) -> float
cmath.acosh(a : complex) -> complex
cmath.acosh(a : number) -> number
cmath.atanh(self : Tensor) -> Tensor
cmath.atanh(self : Tensor,
out : Tensor) -> Tensor
cmath.atanh(a : int) -> float
cmath.atanh(a : float) -> float
cmath.atanh(a : complex) -> complex
cmath.atanh(a : number) -> number
torch.autograd.grad(outputs : List[Tensor],
inputs : List[Tensor],
grad_outputs : Optional[List[Optional[Tensor]]],
retain_graph : Optional[bool],
create_graph : bool=False,
allow_unused : bool=False) -> List[Optional[Tensor]]
torch.autograd.backward(self : Tensor,
gradient : Optional[Tensor],
retain_graph : Optional[bool],
create_graph : bool=False) -> Tuple[]
torch.autograd.backward(tensors : List[Tensor],
grad_tensors : Optional[List[Optional[Tensor]]],
retain_graph : Optional[bool],
create_graph : bool=False) -> Tuple[]
torch.Size(sizes : List[int]) -> List[int]
torch.functional.align_tensors(tensors : List[Tensor]) -> List[Tensor]
torch.functional.atleast_1d(self : Tensor) -> Tensor
torch.functional.atleast_1d(tensors : List[Tensor]) -> List[Tensor]
torch.functional.atleast_2d(self : Tensor) -> Tensor
torch.functional.atleast_2d(tensors : List[Tensor]) -> List[Tensor]
torch.functional.atleast_3d(self : Tensor) -> Tensor
torch.functional.atleast_3d(tensors : List[Tensor]) -> List[Tensor]
torch.functional.block_diag(tensors : List[Tensor]) -> Tensor
torch.functional.block_diag(tensors : List[Tensor],
out : Tensor) -> Tensor
torch.functional.broadcast_tensors(tensors : List[Tensor]) -> List[Tensor]
torch.functional.cartesian_prod(tensors : List[Tensor]) -> Tensor
torch.functional.chain_matmul(matrices : List[Tensor]) -> Tensor
torch.functional.chain_matmul(matrices : List[Tensor],
out : Tensor) -> Tensor
torch.device(a : str) -> Device
torch.device(type : str,
index : int) -> Device
torch.functional.einsum(equation : str,
tensors : List[Tensor],
path : Optional[List[int]]) -> Tensor
torch.functional.einsum(a : Tensor) -> Tensor
torch.get_autocast_dtype(device_type : str) -> int
torch.random.initial_seed(self : Generator) -> int
torch.is_autocast_cpu_enabled() -> bool
torch.is_autocast_enabled() -> bool
torch.is_grad_enabled() -> bool
torch.random.manual_seed(seed : int) -> Tuple[]
torch.random.manual_seed(self : Generator,
seed : int) -> Generator
torch.functional.meshgrid(tensors : List[Tensor]) -> List[Tensor]
torch.functional.meshgrid(tensors : List[Tensor],
indexing : str) -> List[Tensor]
torch.qscheme(self : Tensor) -> QScheme
torch.serialization.save(item : t,
filename : str) -> Tuple[]
torch.random.seed(self : Generator) -> int
torch.autograd.grad_mode.set_grad_enabled(val : bool) -> Tuple[]
torch.functional.split(self : Tensor,
split_size : int,
dim : int=0) -> List[Tensor]
torch.functional.split(self : Tensor,
split_size : List[int],
dim : int=0) -> List[Tensor]
torch.functional.split(self : str,
separator : Optional[str],
max : int=-1) -> List[str]
torch.functional.split(self : Tensor,
split_sizes : List[int],
dim : int=0) -> List[Tensor]
torch.wait(self : Future[t]) -> t
Python Built-in Functions¶
The functions in the following table are supported but do not have a static schema
Function |
Note |
---|---|
Print any value |
|
Lists cannot be converted to tuples with this method since their size is not statically known |
|
Attribute name must be a literal string |
|
Attribute name must be a literal string |
|
Result is static |
|
Can only be used as an iterator in a for loop |
|
Arguments must be iterable. See Iterables for details. |
|
Arguments must be iterable. See Iterables for details. |
The following functions will use the corresponding magic method on TorchScript Classes
Function |
Magic Method |
---|---|
complex |
|
float |
|
int |
|
bool |
|
str |
|
len |
|
hex |
|
oct |
|
These built-in functions use the schema
float(a : Tensor) -> float
float(a : number) -> float
float(a : int) -> float
float(a : bool) -> float
float(a : str) -> float
complex(a : number) -> complex
complex(a : Tensor,
b : Tensor) -> complex
complex(x : int,
y : bool) -> complex
complex(x : bool,
y : int) -> complex
complex(x : float,
y : bool) -> complex
complex(x : bool,
y : float) -> complex
complex(x : float,
y : int) -> complex
complex(x : int,
y : float) -> complex
complex(x : int,
y : int) -> complex
complex(x : bool,
y : bool) -> complex
complex(x : float,
y : float) -> complex
complex(x : Tensor,
y : float) -> complex
complex(x : float,
y : Tensor) -> complex
complex(x : Tensor,
y : int) -> complex
complex(x : int,
y : Tensor) -> complex
complex(x : Tensor,
y : bool) -> complex
complex(x : bool,
y : Tensor) -> complex
int(a : Tensor) -> int
int(a : bool) -> int
int(a : float) -> int
int(a : number) -> int
int(a : str) -> int
bool(a : Tensor) -> bool
bool(a : int) -> bool
bool(a : float) -> bool
str(elem : t) -> str
len(a : List[t]) -> int
len(t : Tensor) -> int
len(s : str) -> int
len(self : Dict[str, t]) -> int
len(self : Dict[int, t]) -> int
len(self : Dict[bool, t]) -> int
len(self : Dict[float, t]) -> int
len(self : Dict[complex, t]) -> int
len(self : Dict[Tensor, t]) -> int
len(a : List[Any]) -> int
hex(i : int) -> str
oct(i : int) -> str
round(self : Tensor) -> Tensor
round(self : Tensor,
decimals : int) -> Tensor
round(self : Tensor,
out : Tensor) -> Tensor
round(self : Tensor,
decimals : int,
out : Tensor) -> Tensor
round(a : int) -> float
round(a : float) -> float
round(a : number) -> number
hash(value : t) -> int
min(a : int,
b : int) -> int
min(a : float,
b : float) -> float
min(a : int,
b : float) -> float
min(a : float,
b : int) -> float
min(a : number,
b : number) -> number
min(l : List[int],
r : List[int]) -> List[int]
min(self : List[int]) -> int
min(l : List[float],
r : List[float]) -> List[float]
min(self : List[float]) -> float
min(l : List[bool],
r : List[bool]) -> List[bool]
min(self : List[bool]) -> bool
max(a : int,
b : int) -> int
max(a : float,
b : float) -> float
max(a : int,
b : float) -> float
max(a : float,
b : int) -> float
max(a : number,
b : number) -> number
max(l : List[int],
r : List[int]) -> List[int]
max(self : List[int]) -> int
max(l : List[float],
r : List[float]) -> List[float]
max(self : List[float]) -> float
max(l : List[bool],
r : List[bool]) -> List[bool]
max(self : List[bool]) -> bool
abs(a : int) -> int
abs(a : float) -> float
abs(a : complex) -> float
abs(a : number) -> number
abs(x : Tensor) -> Tensor
all(self : Tensor) -> Tensor
all(self : Tensor,
dim : int,
keepdim : bool=False) -> Tensor
all(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False) -> Tensor
all(self : Tensor,
dim : int,
keepdim : bool=False,
out : Tensor) -> Tensor
all(self : Tensor,
dim : Optional[List[int]],
keepdim : bool=False,
out : Tensor) -> Tensor
all(self : Tensor,
out : Tensor) -> Tensor
all(self : Tensor,
dim : str,
keepdim : bool=False) -> Tensor
all(self : Tensor,
dim : str,
keepdim : bool=False,
out : Tensor) -> Tensor
all(self : List[int]) -> bool
all(self : List[float]) -> bool
all(self : List[bool]) -> bool
divmod(x : int,
y : int) -> Tuple[int, int]
divmod(x : float,
y : float) -> Tuple[float, float]
divmod(x : int,
y : float) -> Tuple[float, float]
divmod(x : float,
y : int) -> Tuple[float, float]
list(t : str) -> List[str]
list(l : List[t]) -> List[t]
ord(string : str) -> int
chr(i : int) -> str
bin(i : int) -> str
sorted(input : List[int]) -> List[int]
sorted(input : List[float]) -> List[float]
sorted(input : List[Tensor]) -> List[Tensor]
sorted(input : List[bool]) -> List[bool]
sorted(input : List[str]) -> List[str]
sorted(self : List[t]) -> List[t]
math
Module¶
aten::ceil.int(int a) -> int
aten::ceil.float(float a) -> int
aten::ceil.Scalar(Scalar a) -> Scalar
aten::copysign.int(int a, int b) -> float
aten::copysign.float(float a, float b) -> float
aten::copysign.int_float(int a, float b) -> float
aten::copysign.float_int(float a, int b) -> float
aten::copysign(Scalar a, Scalar b) -> float
aten::erf.int(int a) -> float
aten::erf.float(float a) -> float
aten::erf.Scalar(Scalar a) -> Scalar
aten::erfc.int(int a) -> float
aten::erfc.float(float a) -> float
aten::erfc.Scalar(Scalar a) -> Scalar
aten::exp.int(int a) -> float
aten::exp.float(float a) -> float
aten::exp.complex(complex a) -> complex
aten::exp.Scalar(Scalar a) -> Scalar
aten::expm1.int(int a) -> float
aten::expm1.float(float a) -> float
aten::expm1.Scalar(Scalar a) -> Scalar
aten::fabs.int(int a) -> float
aten::fabs.float(float a) -> float
aten::fabs.Scalar(Scalar a) -> Scalar
aten::floor.int(int a) -> int
aten::floor.float(float a) -> int
aten::floor.Scalar(Scalar a) -> Scalar
aten::gamma.int(int a) -> float
aten::gamma.float(float a) -> float
aten::gamma.Scalar(Scalar a) -> Scalar
aten::lgamma.int(int a) -> float
aten::lgamma.float(float a) -> float
aten::lgamma.Scalar(Scalar a) -> Scalar
aten::log.int(int a) -> float
aten::log.float(float a) -> float
aten::log.complex(complex a) -> complex
aten::log.Scalar(Scalar a) -> Scalar
aten::log.int_int(int a, int b) -> float
aten::log.float_float(float a, float b) -> float
aten::log.complex_complex(complex a, complex b) -> complex
aten::log.int_float(int a, float b) -> float
aten::log.float_int(float a, int b) -> float
aten::log.int_complex(int a, complex b) -> complex
aten::log.complex_int(complex a, int b) -> complex
aten::log.float_complex(float a, complex b) -> complex
aten::log.complex_float(complex a, float b) -> complex
aten::log.Scalar_Scalar(Scalar a, Scalar b) -> float
aten::log10.int(int a) -> float
aten::log10.float(float a) -> float
aten::log10.complex(complex a) -> complex
aten::log10.Scalar(Scalar a) -> Scalar
aten::log1p.int(int a) -> float
aten::log1p.float(float a) -> float
aten::log1p.Scalar(Scalar a) -> Scalar
aten::pow.int(int a, int b) -> float
aten::pow.complex(complex a, complex b) -> complex
aten::pow.float(float a, float b) -> float
aten::pow.int_float(int a, float b) -> float
aten::pow.float_int(float a, int b) -> float
aten::pow.float_complex(float a, complex b) -> complex
aten::pow.complex_float(complex a, float b) -> complex
aten::pow.Scalar_Scalar(Scalar a, Scalar b) -> float
aten::pow.int_to_int(int a, int b) -> int
aten::sqrt.int(int a) -> float
aten::sqrt.float(float a) -> float
aten::sqrt.complex(complex a) -> complex
aten::sqrt.Scalar(Scalar a) -> Scalar
aten::isnan.float(float a) -> bool
aten::isnan.complex(complex a) -> bool
aten::asinh.int(int a) -> float
aten::asinh.float(float a) -> float
aten::asinh.complex(complex a) -> complex
aten::asinh.Scalar(Scalar a) -> Scalar
aten::atanh.int(int a) -> float
aten::atanh.float(float a) -> float
aten::atanh.complex(complex a) -> complex
aten::atanh.Scalar(Scalar a) -> Scalar
aten::cosh.int(int a) -> float
aten::cosh.float(float a) -> float
aten::cosh.complex(complex a) -> complex
aten::cosh.Scalar(Scalar a) -> Scalar
aten::sinh.int(int a) -> float
aten::sinh.float(float a) -> float
aten::sinh.complex(complex a) -> complex
aten::sinh.Scalar(Scalar a) -> Scalar
aten::tanh.int(int a) -> float
aten::tanh.float(float a) -> float
aten::tanh.complex(complex a) -> complex
aten::tanh.Scalar(Scalar a) -> Scalar
aten::acos.int(int a) -> float
aten::acos.float(float a) -> float
aten::acos.complex(complex a) -> complex
aten::acos.Scalar(Scalar a) -> Scalar
aten::asin.int(int a) -> float
aten::asin.float(float a) -> float
aten::asin.complex(complex a) -> complex
aten::asin.Scalar(Scalar a) -> Scalar
aten::atan.int(int a) -> float
aten::atan.float(float a) -> float
aten::atan.complex(complex a) -> complex
aten::atan.Scalar(Scalar a) -> Scalar
aten::atan2.int(int a, int b) -> float
aten::atan2.float(float a, float b) -> float
aten::atan2.int_float(int a, float b) -> float
aten::atan2.float_int(float a, int b) -> float
aten::atan2.Scalar_Scalar(Scalar a, Scalar b) -> float
aten::cos.int(int a) -> float
aten::cos.float(float a) -> float
aten::cos.complex(complex a) -> complex
aten::cos.Scalar(Scalar a) -> Scalar
aten::sin.int(int a) -> float
aten::sin.float(float a) -> float
aten::sin.complex(complex a) -> complex
aten::sin.Scalar(Scalar a) -> Scalar
aten::tan.int(int a) -> float
aten::tan.float(float a) -> float
aten::tan.complex(complex a) -> complex
aten::tan.Scalar(Scalar a) -> Scalar
aten::asinh.int(int a) -> float
aten::asinh.float(float a) -> float
aten::asinh.complex(complex a) -> complex
aten::asinh.Scalar(Scalar a) -> Scalar
aten::atanh.int(int a) -> float
aten::atanh.float(float a) -> float
aten::atanh.complex(complex a) -> complex
aten::atanh.Scalar(Scalar a) -> Scalar
aten::acosh.int(int a) -> float
aten::acosh.float(float a) -> float
aten::acosh.complex(complex a) -> complex
aten::acosh.Scalar(Scalar a) -> Scalar
aten::fmod.int(int a, int b) -> float
aten::fmod.float(float a, float b) -> float
aten::fmod.int_float(int a, float b) -> float
aten::fmod.float_int(float a, int b) -> float
aten::fmod(Scalar a, Scalar b) -> float
aten::modf(float a) -> (float, float)
aten::factorial.int(int a) -> int
aten::frexp(float a) -> (float, int)
aten::isinf.float(float a) -> bool
aten::isinf.complex(complex a) -> bool
aten::degrees.int(int a) -> float
aten::degrees.float(float a) -> float
aten::degrees.Scalar(Scalar a) -> Scalar
aten::radians.int(int a) -> float
aten::radians.float(float a) -> float
aten::radians.Scalar(Scalar a) -> Scalar
aten::ldexp(float x, int i) -> float
aten::gcd.int(int a, int b) -> int
aten::isfinite.float(float a) -> bool
aten::isfinite.complex(complex a) -> bool
aten::mathremainder.int(int a, int b) -> float
aten::mathremainder.float(float a, float b) -> float
aten::mathremainder.int_float(int a, float b) -> float
aten::mathremainder.float_int(float a, int b) -> float
aten::mathremainder(Scalar a, Scalar b) -> float