Shortcuts

Class Tensor

Page Contents

Class Documentation

class at::Tensor

Public Types

using hook_return_void_t = std::enable_if_t<std::is_void<typename std::result_of<T&(Tensor)>::type>::value, unsigned>
using hook_return_var_t = std::enable_if_t<std::is_same<typename std::result_of<T&(Tensor)>::type, Tensor>::value, unsigned>

Public Functions

Tensor()
Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
Tensor(const Tensor&) = default
Tensor(Tensor&&) = default
int64_t dim() const
int64_t storage_offset() const
TensorImpl *unsafeGetTensorImpl() const
TensorImpl *unsafeReleaseTensorImpl()
const c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> &getIntrusivePtr() const
bool defined() const
void reset()
Tensor &operator=(const Tensor &x) &
Tensor &operator=(Tensor &&x) &
Tensor &operator=(Scalar v) &&
Tensor &operator=(const Tensor&) &&
Tensor &operator=(Tensor&&) &&
bool is_same(const Tensor &other) const noexcept
size_t use_count() const noexcept
size_t weak_use_count() const noexcept
std::string toString() const
IntArrayRef sizes() const
IntArrayRef strides() const
c10::optional<DimnameList> opt_names() const
DimnameList names() const
int64_t ndimension() const
bool is_contiguous(at::MemoryFormat memory_format = at::MemoryFormat::Contiguous) const
bool is_non_overlapping_and_dense() const
at::MemoryFormat suggest_memory_format(bool channels_last_strides_exact_match = false) const
size_t nbytes() const
int64_t numel() const
size_t itemsize() const
int64_t element_size() const
C10_DEPRECATED_MESSAGE ("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().") DeprecatedTypeProperties &type() const
DispatchKeySet key_set() const
ScalarType scalar_type() const
bool has_storage() const
const Storage &storage() const
bool is_alias_of(const at::Tensor &other) const
Tensor toType(ScalarType t) const
Tensor toBackend(Backend b) const
C10_DEPRECATED_MESSAGE ("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())") bool is_variable() const noexcept
Layout layout() const noexcept

Returns a Tensor’s layout. Defined in Type.h.

caffe2::TypeMeta dtype() const noexcept

Returns a Tensor’s dtype (TypeMeta). Defined in TensorMethods.cpp.

Device device() const

Returns a Tensor’s device.

int64_t get_device() const

Returns a Tensor’s device index.

bool is_cuda() const

Returns if a Tensor has CUDA backend.

bool is_hip() const

Returns if a Tensor has HIP backend.

bool is_sparse() const

Returns if a Tensor has sparse backend.

bool is_mkldnn() const

Returns if a Tensor is mkldnn tensor.

bool is_vulkan() const

Returns if a Tensor is vulkan tensor.

bool is_metal() const

Returns if a Tensor is metal tensor.

bool is_quantized() const

Returns if a Tensor has quantized backend.

bool is_meta() const

Returns if a Tensor is a meta tensor.

Meta tensors can also have other designations.

QuantizerPtr quantizer() const

If a tensor is a quantized tensor, returns its quantizer TODO: it’s not in native_functions.yaml yet as it’s not exposed to python.

bool has_names() const

Returns if a Tensor has any dimension names.

const NamedTensorMeta *get_named_tensor_meta() const

Returns a Tensor’s dimension names data structure.

NamedTensorMeta *get_named_tensor_meta()
TensorOptions options() const

Returns the TensorOptions corresponding to this Tensor.

Defined in TensorOptions.h.

void *data_ptr() const
template<typename T>
T *data_ptr() const
template<typename T> C10_DEPRECATED_MESSAGE ("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.") T *data() const
template<typename T>
T item() const
void print() const
template<typename T, size_t N>
TensorAccessor<T, N> accessor() const &
template<typename T, size_t N>
TensorAccessor<T, N> accessor() && = delete
template<typename T, size_t N, template<typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
GenericPackedTensorAccessor<T, N, PtrTraits, index_t> generic_packed_accessor() const &
template<typename T, size_t N, template<typename U> class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>
GenericPackedTensorAccessor<T, N> generic_packed_accessor() && = delete
template<typename T, size_t N, template<typename U> class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor32<T, N, PtrTraits> packed_accessor32() const &
template<typename T, size_t N, template<typename U> class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor32<T, N, PtrTraits> packed_accessor32() && = delete
template<typename T, size_t N, template<typename U> class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor64<T, N, PtrTraits> packed_accessor64() const &
template<typename T, size_t N, template<typename U> class PtrTraits = DefaultPtrTraits>
PackedTensorAccessor64<T, N, PtrTraits> packed_accessor64() && = delete
template<typename T, size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> C10_DEPRECATED_MESSAGE ("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") GenericPackedTensorAccessor<T
index_t packed_accessor() const &
template<typename T, size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t> C10_DEPRECATED_MESSAGE ("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") GenericPackedTensorAccessor<T
index_t packed_accessor() && = delete
Tensor operator~() const
Tensor operator-() const
Tensor &operator+=(const Tensor &other)
Tensor &operator+=(Scalar other)
Tensor &operator-=(const Tensor &other)
Tensor &operator-=(Scalar other)
Tensor &operator*=(const Tensor &other)
Tensor &operator*=(Scalar other)
Tensor &operator/=(const Tensor &other)
Tensor &operator/=(Scalar other)
Tensor &operator&=(const Tensor &other)
Tensor &operator|=(const Tensor &other)
Tensor &operator^=(const Tensor &other)
Tensor operator[](Scalar index) const
Tensor operator[](Tensor index) const
Tensor operator[](int64_t index) const
Tensor index(ArrayRef<at::indexing::TensorIndex> indices) const
Tensor index(std::initializer_list<at::indexing::TensorIndex> indices) const
Tensor &index_put_(ArrayRef<at::indexing::TensorIndex> indices, Tensor const &rhs)
Tensor &index_put_(ArrayRef<at::indexing::TensorIndex> indices, Scalar v)
Tensor &index_put_(std::initializer_list<at::indexing::TensorIndex> indices, Tensor const &rhs)
Tensor &index_put_(std::initializer_list<at::indexing::TensorIndex> indices, Scalar v)
Tensor cpu() const
Tensor cuda() const
Tensor hip() const
Tensor vulkan() const
Tensor metal() const
void backward(const Tensor &gradient = {}, c10::optional<bool> retain_graph = c10::nullopt, bool create_graph = false, c10::optional<TensorList> inputs = c10::nullopt) const

Computes the gradient of current tensor with respect to graph leaves.

The graph is differentiated using the chain rule. If the tensor is non-scalar (i.e. its data has more than one element) and requires gradient, the function additionally requires specifying gradient. It should be a tensor of matching type and location, that contains the gradient of the differentiated function w.r.t. this Tensor.

This function accumulates gradients in the leaves - you might need to zero them before calling it.

Parameters
  • gradient: Gradient w.r.t. the tensor. If it is a tensor, it will be automatically converted to a Tensor that does not require grad unless create_graph is True. None values can be specified for scalar Tensors or ones that don’t require grad. If a None value would be acceptable then this argument is optional.

  • retain_graph: If false, the graph used to compute the grads will be freed. Note that in nearly all cases setting this option to True is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph.

  • create_graph: If true, graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to false.

  • inputs: Inputs w.r.t. which the gradient will be accumulated into at::Tensor::grad. All other Tensors will be ignored. If not provided, the gradient is accumulated into all the leaf Tensors that were used to compute the current tensor. All the provided inputs must be leaf Tensors.

Tensor &set_requires_grad(bool requires_grad)
bool requires_grad() const
Tensor &mutable_grad()

Return a mutable reference to the gradient.

This is conventionally used as t.grad() = x to set a gradient to a completely new tensor. Note that this function work with a non-const Tensor and is not thread safe.

const Tensor &grad() const

This function returns an undefined tensor by default and returns a defined tensor the first time a call to backward() computes gradients for this Tensor.

The attribute will then contain the gradients computed and future calls to backward() will accumulate (add) gradients into it.

void _backward(TensorList inputs, const c10::optional<Tensor> &gradient = {}, c10::optional<bool> retain_graph = c10::nullopt, bool create_graph = false) const
void set_data(const Tensor &new_data) const
Tensor data() const
bool is_leaf() const

All Tensors that have requires_grad() which is false will be leaf Tensors by convention.

For Tensors that have requires_grad() which is true, they will be leaf Tensors if they were created by the user. This means that they are not the result of an operation and so grad_fn() is nullptr.

Only leaf Tensors will have their grad() populated during a call to backward(). To get grad() populated for non-leaf Tensors, you can use retain_grad().

Example:

auto a = torch::rand(10, torch::requires_grad());
std::cout << a.is_leaf() << std::endl; // prints `true`

auto b = torch::rand(10, torch::requires_grad()).to(torch::kCUDA);
std::cout << b.is_leaf() << std::endl; // prints `false`
// b was created by the operation that cast a cpu Tensor into a cuda Tensor

auto c = torch::rand(10, torch::requires_grad()) + 2;
std::cout << c.is_leaf() << std::endl; // prints `false`
// c was created by the addition operation

auto d = torch::rand(10).cuda();
std::cout << d.is_leaf() << std::endl; // prints `true`
// d does not require gradients and so has no operation creating it (that is tracked by the autograd engine)

auto e = torch::rand(10).cuda().requires_grad_();
std::cout << e.is_leaf() << std::endl; // prints `true`
// e requires gradients and has no operations creating it

auto f = torch::rand(10, torch::device(torch::kCUDA).requires_grad(true));
std::cout << f.is_leaf() << std::endl; // prints `true`
// f requires grad, has no operation creating it

int64_t output_nr() const
int64_t _version() const
Tensor &requires_grad_(bool requires_grad = true) const
void retain_grad() const

Enables .grad() for non-leaf Tensors.

Tensor &rename_(c10::optional<DimnameList> names) const
Tensor rename(c10::optional<DimnameList> names) const
Tensor align_to(DimnameList names) const
Tensor align_to(DimnameList order, int64_t ellipsis_idx) const
Tensor align_as(const Tensor &other) const
Tensor refine_names(DimnameList names) const
Tensor abs() const
Tensor &abs_() const
Tensor absolute() const
Tensor &absolute_() const
Tensor angle() const
Tensor sgn() const
Tensor &sgn_() const
Tensor conj() const
Tensor acos() const
Tensor &acos_() const
Tensor arccos() const
Tensor &arccos_() const
Tensor add(const Tensor &other, Scalar alpha = 1) const
Tensor &add_(const Tensor &other, Scalar alpha = 1) const
Tensor add(Scalar other, Scalar alpha = 1) const
Tensor &add_(Scalar other, Scalar alpha = 1) const
Tensor addmv(const Tensor &mat, const Tensor &vec, Scalar beta = 1, Scalar alpha = 1) const
Tensor &addmv_(const Tensor &mat, const Tensor &vec, Scalar beta = 1, Scalar alpha = 1) const
Tensor addr(const Tensor &vec1, const Tensor &vec2, Scalar beta = 1, Scalar alpha = 1) const
Tensor &addr_(const Tensor &vec1, const Tensor &vec2, Scalar beta = 1, Scalar alpha = 1) const
Tensor all(int64_t dim, bool keepdim = false) const
Tensor all(Dimname dim, bool keepdim = false) const
bool allclose(const Tensor &other, double rtol = 1e-05, double atol = 1e-08, bool equal_nan = false) const
Tensor any(int64_t dim, bool keepdim = false) const
Tensor any(Dimname dim, bool keepdim = false) const
Tensor argmax(c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
Tensor argmin(c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
Tensor acosh() const
Tensor &acosh_() const
Tensor arccosh() const
Tensor &arccosh_() const
Tensor asinh() const
Tensor &asinh_() const
Tensor arcsinh() const
Tensor &arcsinh_() const
Tensor atanh() const
Tensor &atanh_() const
Tensor arctanh() const
Tensor &arctanh_() const
Tensor as_strided(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset = c10::nullopt) const
Tensor &as_strided_(IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset = c10::nullopt) const
Tensor asin() const
Tensor &asin_() const
Tensor arcsin() const
Tensor &arcsin_() const
Tensor atan() const
Tensor &atan_() const
Tensor arctan() const
Tensor &arctan_() const
Tensor baddbmm(const Tensor &batch1, const Tensor &batch2, Scalar beta = 1, Scalar alpha = 1) const
Tensor &baddbmm_(const Tensor &batch1, const Tensor &batch2, Scalar beta = 1, Scalar alpha = 1) const
Tensor bernoulli(c10::optional<Generator> generator = c10::nullopt) const
Tensor &bernoulli_(const Tensor &p, c10::optional<Generator> generator = c10::nullopt) const
Tensor &bernoulli_(double p = 0.5, c10::optional<Generator> generator = c10::nullopt) const
Tensor bernoulli(double p, c10::optional<Generator> generator = c10::nullopt) const
Tensor bincount(const c10::optional<Tensor> &weights = {}, int64_t minlength = 0) const
Tensor bitwise_not() const
Tensor &bitwise_not_() const
Tensor copysign(const Tensor &other) const
Tensor &copysign_(const Tensor &other) const
Tensor copysign(Scalar other) const
Tensor &copysign_(Scalar other) const
Tensor logical_not() const
Tensor &logical_not_() const
Tensor logical_xor(const Tensor &other) const
Tensor &logical_xor_(const Tensor &other) const
Tensor logical_and(const Tensor &other) const
Tensor &logical_and_(const Tensor &other) const
Tensor logical_or(const Tensor &other) const
Tensor &logical_or_(const Tensor &other) const
Tensor bmm(const Tensor &mat2) const
Tensor ceil() const
Tensor &ceil_() const
std::vector<Tensor> unsafe_chunk(int64_t chunks, int64_t dim = 0) const
std::vector<Tensor> chunk(int64_t chunks, int64_t dim = 0) const
std::vector<Tensor> tensor_split(int64_t sections, int64_t dim = 0) const
std::vector<Tensor> tensor_split(IntArrayRef indices, int64_t dim = 0) const
Tensor clamp(c10::optional<Scalar> min = c10::nullopt, c10::optional<Scalar> max = c10::nullopt) const
Tensor &clamp_(c10::optional<Scalar> min = c10::nullopt, c10::optional<Scalar> max = c10::nullopt) const
Tensor clamp_max(Scalar max) const
Tensor &clamp_max_(Scalar max) const
Tensor clamp_min(Scalar min) const
Tensor &clamp_min_(Scalar min) const
Tensor clip(c10::optional<Scalar> min = c10::nullopt, c10::optional<Scalar> max = c10::nullopt) const
Tensor &clip_(c10::optional<Scalar> min = c10::nullopt, c10::optional<Scalar> max = c10::nullopt) const
Tensor contiguous(MemoryFormat memory_format = MemoryFormat::Contiguous) const
Tensor &copy_(const Tensor &src, bool non_blocking = false) const
Tensor cos() const
Tensor &cos_() const
Tensor cosh() const
Tensor &cosh_() const
Tensor count_nonzero(IntArrayRef dim) const
Tensor count_nonzero(c10::optional<int64_t> dim = c10::nullopt) const
std::tuple<Tensor, Tensor> cummax(int64_t dim) const
std::tuple<Tensor, Tensor> cummax(Dimname dim) const
std::tuple<Tensor, Tensor> cummin(int64_t dim) const
std::tuple<Tensor, Tensor> cummin(Dimname dim) const
Tensor cumprod(int64_t dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor &cumprod_(int64_t dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor cumprod(Dimname dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor &cumprod_(Dimname dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor cumsum(int64_t dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor &cumsum_(int64_t dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor cumsum(Dimname dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor &cumsum_(Dimname dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor diag_embed(int64_t offset = 0, int64_t dim1 = -2, int64_t dim2 = -1) const
Tensor diagflat(int64_t offset = 0) const
Tensor diagonal(int64_t offset = 0, int64_t dim1 = 0, int64_t dim2 = 1) const
Tensor diagonal(Dimname outdim, Dimname dim1, Dimname dim2, int64_t offset = 0) const
Tensor &fill_diagonal_(Scalar fill_value, bool wrap = false) const
Tensor div(const Tensor &other) const
Tensor &div_(const Tensor &other) const
Tensor div(Scalar other) const
Tensor &div_(Scalar other) const
Tensor divide(const Tensor &other) const
Tensor &divide_(const Tensor &other) const
Tensor divide(Scalar other) const
Tensor &divide_(Scalar other) const
Tensor true_divide(const Tensor &other) const
Tensor &true_divide_(const Tensor &other) const
Tensor true_divide(Scalar other) const
Tensor &true_divide_(Scalar other) const
Tensor dot(const Tensor &tensor) const
Tensor vdot(const Tensor &other) const
Tensor new_empty(IntArrayRef size, const TensorOptions &options = {}) const
Tensor new_empty(IntArrayRef size, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory) const
Tensor new_empty_strided(IntArrayRef size, IntArrayRef stride, const TensorOptions &options = {}) const
Tensor new_empty_strided(IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory) const
Tensor new_full(IntArrayRef size, Scalar fill_value, const TensorOptions &options = {}) const
Tensor new_full(IntArrayRef size, Scalar fill_value, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory) const
Tensor new_zeros(IntArrayRef size, const TensorOptions &options = {}) const
Tensor new_zeros(IntArrayRef size, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory) const
Tensor &resize_(IntArrayRef size, c10::optional<MemoryFormat> memory_format = c10::nullopt) const
Tensor erf() const
Tensor &erf_() const
Tensor erfc() const
Tensor &erfc_() const
Tensor exp() const
Tensor &exp_() const
Tensor exp2() const
Tensor &exp2_() const
Tensor expm1() const
Tensor &expm1_() const
Tensor expand(IntArrayRef size, bool implicit = false) const
Tensor expand_as(const Tensor &other) const
Tensor flatten(int64_t start_dim = 0, int64_t end_dim = -1) const
Tensor flatten(int64_t start_dim, int64_t end_dim, Dimname out_dim) const
Tensor flatten(Dimname start_dim, Dimname end_dim, Dimname out_dim) const
Tensor flatten(DimnameList dims, Dimname out_dim) const
Tensor unflatten(int64_t dim, IntArrayRef sizes, c10::optional<DimnameList> names = c10::nullopt) const
Tensor unflatten(Dimname dim, IntArrayRef sizes, DimnameList names) const
Tensor &fill_(Scalar value) const
Tensor &fill_(const Tensor &value) const
Tensor floor() const
Tensor &floor_() const
Tensor floor_divide(const Tensor &other) const
Tensor &floor_divide_(const Tensor &other) const
Tensor floor_divide(Scalar other) const
Tensor &floor_divide_(Scalar other) const
Tensor frac() const
Tensor &frac_() const
Tensor gcd(const Tensor &other) const
Tensor &gcd_(const Tensor &other) const
Tensor lcm(const Tensor &other) const
Tensor &lcm_(const Tensor &other) const
Tensor ifft(int64_t signal_ndim, bool normalized = false) const
Tensor rfft(int64_t signal_ndim, bool normalized = false, bool onesided = true) const
Tensor irfft(int64_t signal_ndim, bool normalized = false, bool onesided = true, IntArrayRef signal_sizes = {}) const
Tensor index(TensorList indices) const
Tensor &index_copy_(int64_t dim, const Tensor &index, const Tensor &source) const
Tensor index_copy(int64_t dim, const Tensor &index, const Tensor &source) const
Tensor &index_copy_(Dimname dim, const Tensor &index, const Tensor &source) const
Tensor index_copy(Dimname dim, const Tensor &index, const Tensor &source) const
Tensor &index_put_(TensorList indices, const Tensor &values, bool accumulate = false) const
Tensor index_put(TensorList indices, const Tensor &values, bool accumulate = false) const
Tensor inverse() const
Tensor isclose(const Tensor &other, double rtol = 1e-05, double atol = 1e-08, bool equal_nan = false) const
Tensor isnan() const
bool is_distributed() const
bool is_floating_point() const
bool is_complex() const
Tensor isreal() const
bool is_nonzero() const
bool is_same_size(const Tensor &other) const
bool is_signed() const
Tensor kron(const Tensor &other) const
std::tuple<Tensor, Tensor> kthvalue(int64_t k, int64_t dim = -1, bool keepdim = false) const
std::tuple<Tensor, Tensor> kthvalue(int64_t k, Dimname dim, bool keepdim = false) const
Tensor nan_to_num(c10::optional<double> nan = c10::nullopt, c10::optional<double> posinf = c10::nullopt, c10::optional<double> neginf = c10::nullopt) const
Tensor &nan_to_num_(c10::optional<double> nan = c10::nullopt, c10::optional<double> posinf = c10::nullopt, c10::optional<double> neginf = c10::nullopt) const
Tensor ldexp(const Tensor &other) const
Tensor &ldexp_(const Tensor &other) const
Tensor log() const
Tensor &log_() const
Tensor log10() const
Tensor &log10_() const
Tensor log1p() const
Tensor &log1p_() const
Tensor log2() const
Tensor &log2_() const
Tensor logaddexp(const Tensor &other) const
Tensor logaddexp2(const Tensor &other) const
Tensor logdet() const
Tensor log_softmax(int64_t dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor log_softmax(Dimname dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor logcumsumexp(int64_t dim) const
Tensor logcumsumexp(Dimname dim) const
Tensor logsumexp(IntArrayRef dim, bool keepdim = false) const
Tensor logsumexp(DimnameList dim, bool keepdim = false) const
Tensor matmul(const Tensor &other) const
Tensor matrix_power(int64_t n) const
Tensor matrix_exp() const
std::tuple<Tensor, Tensor> max(int64_t dim, bool keepdim = false) const
std::tuple<Tensor, Tensor> max(Dimname dim, bool keepdim = false) const
Tensor amax(IntArrayRef dim = {}, bool keepdim = false) const
Tensor mean(c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor mean(IntArrayRef dim, bool keepdim = false, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor mean(DimnameList dim, bool keepdim = false, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor median() const
std::tuple<Tensor, Tensor> median(int64_t dim, bool keepdim = false) const
std::tuple<Tensor, Tensor> median(Dimname dim, bool keepdim = false) const
Tensor nanmedian() const
std::tuple<Tensor, Tensor> nanmedian(int64_t dim, bool keepdim = false) const
std::tuple<Tensor, Tensor> nanmedian(Dimname dim, bool keepdim = false) const
std::tuple<Tensor, Tensor> min(int64_t dim, bool keepdim = false) const
std::tuple<Tensor, Tensor> min(Dimname dim, bool keepdim = false) const
Tensor amin(IntArrayRef dim = {}, bool keepdim = false) const
Tensor mm(const Tensor &mat2) const
std::tuple<Tensor, Tensor> mode(int64_t dim = -1, bool keepdim = false) const
std::tuple<Tensor, Tensor> mode(Dimname dim, bool keepdim = false) const
Tensor mul(const Tensor &other) const
Tensor &mul_(const Tensor &other) const
Tensor mul(Scalar other) const
Tensor &mul_(Scalar other) const
Tensor multiply(const Tensor &other) const
Tensor &multiply_(const Tensor &other) const
Tensor multiply(Scalar other) const
Tensor &multiply_(Scalar other) const
Tensor mv(const Tensor &vec) const
Tensor mvlgamma(int64_t p) const
Tensor &mvlgamma_(int64_t p) const
Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const
Tensor narrow(int64_t dim, int64_t start, int64_t length) const
Tensor narrow(int64_t dim, const Tensor &start, int64_t length) const
Tensor permute(IntArrayRef dims) const
Tensor movedim(IntArrayRef source, IntArrayRef destination) const
Tensor movedim(int64_t source, int64_t destination) const
Tensor numpy_T() const
bool is_pinned() const
Tensor pin_memory() const
Tensor pinverse(double rcond = 1e-15) const
Tensor rad2deg() const
Tensor &rad2deg_() const
Tensor deg2rad() const
Tensor &deg2rad_() const
Tensor ravel() const
Tensor reciprocal() const
Tensor &reciprocal_() const
Tensor neg() const
Tensor &neg_() const
Tensor negative() const
Tensor &negative_() const
Tensor repeat(IntArrayRef repeats) const
Tensor repeat_interleave(const Tensor &repeats, c10::optional<int64_t> dim = c10::nullopt) const
Tensor repeat_interleave(int64_t repeats, c10::optional<int64_t> dim = c10::nullopt) const
Tensor reshape(IntArrayRef shape) const
Tensor reshape_as(const Tensor &other) const
Tensor round() const
Tensor &round_() const
Tensor relu() const
Tensor &relu_() const
Tensor prelu(const Tensor &weight) const
std::tuple<Tensor, Tensor> prelu_backward(const Tensor &grad_output, const Tensor &weight) const
Tensor hardshrink(Scalar lambd = 0.5) const
Tensor hardshrink_backward(const Tensor &grad_out, Scalar lambd) const
Tensor rsqrt() const
Tensor &rsqrt_() const
Tensor select(Dimname dim, int64_t index) const
Tensor select(int64_t dim, int64_t index) const
Tensor sigmoid() const
Tensor &sigmoid_() const
Tensor logit(c10::optional<double> eps = c10::nullopt) const
Tensor &logit_(c10::optional<double> eps = c10::nullopt) const
Tensor sin() const
Tensor &sin_() const
Tensor sinh() const
Tensor &sinh_() const
Tensor detach() const

Returns a new Tensor, detached from the current graph.

The result will never require gradient.

Tensor &detach_() const

Detaches the Tensor from the graph that created it, making it a leaf.

Views cannot be detached in-place.

int64_t size(int64_t dim) const
int64_t size(Dimname dim) const
Tensor slice(int64_t dim = 0, int64_t start = 0, int64_t end = 9223372036854775807, int64_t step = 1) const
std::tuple<Tensor, Tensor> slogdet() const
Tensor smm(const Tensor &mat2) const
Tensor softmax(int64_t dim, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor softmax(Dimname dim, c10::optional<ScalarType> dtype = c10::nullopt) const
std::vector<Tensor> unsafe_split(int64_t split_size, int64_t dim = 0) const
std::vector<Tensor> split(int64_t split_size, int64_t dim = 0) const
std::vector<Tensor> unsafe_split_with_sizes(IntArrayRef split_sizes, int64_t dim = 0) const
std::vector<Tensor> split_with_sizes(IntArrayRef split_sizes, int64_t dim = 0) const
Tensor squeeze() const
Tensor squeeze(int64_t dim) const
Tensor squeeze(Dimname dim) const
Tensor &squeeze_() const
Tensor &squeeze_(int64_t dim) const
Tensor &squeeze_(Dimname dim) const
Tensor sspaddmm(const Tensor &mat1, const Tensor &mat2, Scalar beta = 1, Scalar alpha = 1) const
Tensor stft(int64_t n_fft, c10::optional<int64_t> hop_length = c10::nullopt, c10::optional<int64_t> win_length = c10::nullopt, const c10::optional<Tensor> &window = {}, bool normalized = false, c10::optional<bool> onesided = c10::nullopt, c10::optional<bool> return_complex = c10::nullopt) const
Tensor istft(int64_t n_fft, c10::optional<int64_t> hop_length = c10::nullopt, c10::optional<int64_t> win_length = c10::nullopt, const c10::optional<Tensor> &window = {}, bool center = true, bool normalized = false, c10::optional<bool> onesided = c10::nullopt, c10::optional<int64_t> length = c10::nullopt, bool return_complex = false) const
int64_t stride(int64_t dim) const
int64_t stride(Dimname dim) const
Tensor sum(c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor sum(IntArrayRef dim, bool keepdim = false, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor sum(DimnameList dim, bool keepdim = false, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor nansum(c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor nansum(IntArrayRef dim, bool keepdim = false, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor sum_to_size(IntArrayRef size) const
Tensor sqrt() const
Tensor &sqrt_() const
Tensor square() const
Tensor &square_() const
Tensor std(bool unbiased = true) const
Tensor std(IntArrayRef dim, bool unbiased = true, bool keepdim = false) const
Tensor std(DimnameList dim, bool unbiased = true, bool keepdim = false) const
Tensor prod(c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor prod(int64_t dim, bool keepdim = false, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor prod(Dimname dim, bool keepdim = false, c10::optional<ScalarType> dtype = c10::nullopt) const
Tensor t() const
Tensor &t_() const
Tensor tan() const
Tensor &tan_() const
Tensor tanh() const
Tensor &tanh_() const
Tensor tile(IntArrayRef dims) const
Tensor transpose(int64_t dim0, int64_t dim1) const
Tensor transpose(Dimname dim0, Dimname dim1) const
Tensor &transpose_(int64_t dim0, int64_t dim1) const
Tensor flip(IntArrayRef dims) const
Tensor fliplr() const
Tensor flipud() const
Tensor roll(IntArrayRef shifts, IntArrayRef dims = {}) const
Tensor rot90(int64_t k = 1, IntArrayRef dims = {0, 1}) const
Tensor trunc() const
Tensor &trunc_() const
Tensor fix() const
Tensor &fix_() const
Tensor type_as(const Tensor &other) const
Tensor unsqueeze(int64_t dim) const
Tensor &unsqueeze_(int64_t dim) const
Tensor var(bool unbiased = true) const
Tensor var(IntArrayRef dim, bool unbiased = true, bool keepdim = false) const
Tensor var(DimnameList dim, bool unbiased = true, bool keepdim = false) const
Tensor view_as(const Tensor &other) const
Tensor where(const Tensor &condition, const Tensor &other) const
Tensor norm(c10::optional<Scalar> p, ScalarType dtype) const
Tensor norm(Scalar p = 2) const
Tensor norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim, ScalarType dtype) const
Tensor norm(c10::optional<Scalar> p, IntArrayRef dim, bool keepdim = false) const
Tensor norm(c10::optional<Scalar> p, DimnameList dim, bool keepdim, ScalarType dtype) const
Tensor norm(c10::optional<Scalar> p, DimnameList dim, bool keepdim = false) const
Tensor clone(c10::optional<MemoryFormat> memory_format = c10::nullopt) const
Tensor &resize_as_(const Tensor &the_template, c10::optional<MemoryFormat> memory_format = c10::nullopt) const
Tensor &zero_() const
Tensor sub(const Tensor &other, Scalar alpha = 1) const
Tensor &sub_(const Tensor &other, Scalar alpha = 1) const
Tensor sub(Scalar other, Scalar alpha = 1) const
Tensor &sub_(Scalar other, Scalar alpha = 1) const
Tensor subtract(const Tensor &other, Scalar alpha = 1) const
Tensor &subtract_(const Tensor &other, Scalar alpha = 1) const
Tensor subtract(Scalar other, Scalar alpha = 1) const
Tensor &subtract_(Scalar other, Scalar alpha = 1) const
Tensor heaviside(const Tensor &values) const
Tensor &heaviside_(const Tensor &values) const
Tensor addmm(const Tensor &mat1, const Tensor &mat2, Scalar beta = 1, Scalar alpha = 1) const
Tensor &addmm_(const Tensor &mat1, const Tensor &mat2, Scalar beta = 1, Scalar alpha = 1) const
Tensor &sparse_resize_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const
Tensor &sparse_resize_and_clear_(IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const
Tensor sparse_mask(const Tensor &mask) const
Tensor to_dense() const
int64_t sparse_dim() const
int64_t _dimI() const
int64_t dense_dim() const
int64_t _dimV() const
int64_t _nnz() const
Tensor coalesce() const
bool is_coalesced() const
Tensor _indices() const
Tensor _values() const
Tensor &_coalesced_(bool coalesced) const
Tensor indices() const
Tensor values() const
std::vector<Tensor> unbind(int64_t dim = 0) const
std::vector<Tensor> unbind(Dimname dim) const
Tensor to_sparse(int64_t sparse_dim) const
Tensor to_sparse() const
Tensor to_mkldnn() const
Tensor dequantize() const
double q_scale() const
int64_t q_zero_point() const
Tensor q_per_channel_scales() const
Tensor q_per_channel_zero_points() const
int64_t q_per_channel_axis() const
Tensor int_repr() const
QScheme qscheme() const
Tensor to(const TensorOptions &options = {}, bool non_blocking = false, bool copy = false, c10::optional<MemoryFormat> memory_format = c10::nullopt) const
Tensor to(c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<MemoryFormat> memory_format) const
Tensor to(Device device, ScalarType dtype, bool non_blocking = false, bool copy = false, c10::optional<MemoryFormat> memory_format = c10::nullopt) const
Tensor to(ScalarType dtype, bool non_blocking = false, bool copy = false, c10::optional<MemoryFormat> memory_format = c10::nullopt) const
Tensor to(const Tensor &other, bool non_blocking = false, bool copy = false, c10::optional<MemoryFormat> memory_format = c10::nullopt) const
Scalar item() const
Tensor &set_(Storage source) const
Tensor &set_(Storage source, int64_t storage_offset, IntArrayRef size, IntArrayRef stride = {}) const
Tensor &set_(const Tensor &source) const
Tensor &set_() const
Tensor &set_quantizer_(ConstQuantizerPtr quantizer) const
bool is_set_to(const Tensor &tensor) const
Tensor &masked_fill_(const Tensor &mask, Scalar value) const
Tensor masked_fill(const Tensor &mask, Scalar value) const
Tensor &masked_fill_(const Tensor &mask, const Tensor &value) const
Tensor masked_fill(const Tensor &mask, const Tensor &value) const
Tensor &masked_scatter_(const Tensor &mask, const Tensor &source) const
Tensor masked_scatter(const Tensor &mask, const Tensor &source) const
Tensor view(IntArrayRef size) const
Tensor &put_(const Tensor &index, const Tensor &source, bool accumulate = false) const
Tensor &index_add_(int64_t dim, const Tensor &index, const Tensor &source) const
Tensor index_add(int64_t dim, const Tensor &index, const Tensor &source) const
Tensor index_add(Dimname dim, const Tensor &index, const Tensor &source) const
Tensor &index_fill_(int64_t dim, const Tensor &index, Scalar value) const
Tensor index_fill(int64_t dim, const Tensor &index, Scalar value) const
Tensor &index_fill_(int64_t dim, const Tensor &index, const Tensor &value) const
Tensor index_fill(int64_t dim, const Tensor &index, const Tensor &value) const
Tensor &index_fill_(Dimname dim, const Tensor &index, Scalar value) const
Tensor &index_fill_(Dimname dim, const Tensor &index, const Tensor &value) const
Tensor index_fill(Dimname dim, const Tensor &index, Scalar value) const
Tensor index_fill(Dimname dim, const Tensor &index, const Tensor &value) const
Tensor &scatter_(int64_t dim, const Tensor &index, const Tensor &src) const
Tensor scatter(int64_t dim, const Tensor &index, const Tensor &src) const
Tensor &scatter_(int64_t dim, const Tensor &index, Scalar value) const
Tensor scatter(int64_t dim, const Tensor &index, Scalar value) const
Tensor scatter(Dimname dim, const Tensor &index, const Tensor &src) const
Tensor scatter(Dimname dim, const Tensor &index, Scalar value) const
Tensor &scatter_(int64_t dim, const Tensor &index, const Tensor &src, std::string reduce) const
Tensor &scatter_(int64_t dim, const Tensor &index, Scalar value, std::string reduce) const
Tensor &scatter_add_(int64_t dim, const Tensor &index, const Tensor &src) const
Tensor scatter_add(int64_t dim, const Tensor &index, const Tensor &src) const
Tensor scatter_add(Dimname dim, const Tensor &index, const Tensor &src) const
Tensor &eq_(Scalar other) const
Tensor &eq_(const Tensor &other) const
Tensor bitwise_and(Scalar other) const
Tensor bitwise_and(const Tensor &other) const
Tensor &bitwise_and_(Scalar other) const
Tensor &bitwise_and_(const Tensor &other) const
Tensor __and__(Scalar other) const
Tensor __and__(const Tensor &other) const
Tensor &__iand__(Scalar other) const
Tensor &__iand__(const Tensor &other) const
Tensor bitwise_or(Scalar other) const
Tensor bitwise_or(const Tensor &other) const
Tensor &bitwise_or_(Scalar other) const
Tensor &bitwise_or_(const Tensor &other) const
Tensor __or__(Scalar other) const
Tensor __or__(const Tensor &other) const
Tensor &__ior__(Scalar other) const
Tensor &__ior__(const Tensor &other) const
Tensor bitwise_xor(Scalar other) const
Tensor bitwise_xor(const Tensor &other) const
Tensor &bitwise_xor_(Scalar other) const
Tensor &bitwise_xor_(const Tensor &other) const
Tensor __xor__(Scalar other) const
Tensor __xor__(const Tensor &other) const
Tensor &__ixor__(Scalar other) const
Tensor &__ixor__(const Tensor &other) const
Tensor __lshift__(Scalar other) const
Tensor __lshift__(const Tensor &other) const
Tensor &__ilshift__(Scalar other) const
Tensor &__ilshift__(const Tensor &other) const
Tensor __rshift__(Scalar other) const
Tensor __rshift__(const Tensor &other) const
Tensor &__irshift__(Scalar other) const
Tensor &__irshift__(const Tensor &other) const
Tensor &lgamma_() const
Tensor &atan2_(const Tensor &other) const
Tensor &tril_(int64_t diagonal = 0) const
Tensor &triu_(int64_t diagonal = 0) const
Tensor &digamma_() const
Tensor &polygamma_(int64_t n) const
Tensor &renorm_(Scalar p, int64_t dim, Scalar maxnorm) const
Tensor &pow_(Scalar exponent) const
Tensor &pow_(const Tensor &exponent) const
Tensor &lerp_(const Tensor &end, Scalar weight) const
Tensor &lerp_(const Tensor &end, const Tensor &weight) const
Tensor &fmod_(Scalar other) const
Tensor &fmod_(const Tensor &other) const
Tensor &remainder_(Scalar other) const
Tensor &remainder_(const Tensor &other) const
Tensor &addbmm_(const Tensor &batch1, const Tensor &batch2, Scalar beta = 1, Scalar alpha = 1) const
Tensor addbmm(const Tensor &batch1, const Tensor &batch2, Scalar beta = 1, Scalar alpha = 1) const
Tensor &addcdiv_(const Tensor &tensor1, const Tensor &tensor2, Scalar value = 1) const
Tensor &random_(int64_t from, c10::optional<int64_t> to, c10::optional<Generator> generator = c10::nullopt) const
Tensor &random_(int64_t to, c10::optional<Generator> generator = c10::nullopt) const
Tensor &random_(c10::optional<Generator> generator = c10::nullopt) const
Tensor &uniform_(double from = 0, double to = 1, c10::optional<Generator> generator = c10::nullopt) const
Tensor &cauchy_(double median = 0, double sigma = 1, c10::optional<Generator> generator = c10::nullopt) const
Tensor &log_normal_(double mean = 1, double std = 2, c10::optional<Generator> generator = c10::nullopt) const
Tensor &exponential_(double lambd = 1, c10::optional<Generator> generator = c10::nullopt) const
Tensor &geometric_(double p, c10::optional<Generator> generator = c10::nullopt) const
Tensor diag(int64_t diagonal = 0) const
Tensor cross(const Tensor &other, c10::optional<int64_t> dim = c10::nullopt) const
Tensor triu(int64_t diagonal = 0) const
Tensor tril(int64_t diagonal = 0) const
Tensor trace() const
Tensor ne(Scalar other) const
Tensor ne(const Tensor &other) const
Tensor &ne_(Scalar other) const
Tensor &ne_(const Tensor &other) const
Tensor not_equal(Scalar other) const
Tensor not_equal(const Tensor &other) const
Tensor &not_equal_(Scalar other) const
Tensor &not_equal_(const Tensor &other) const
Tensor eq(Scalar other) const
Tensor eq(const Tensor &other) const
Tensor ge(Scalar other) const
Tensor ge(const Tensor &other) const
Tensor &ge_(Scalar other) const
Tensor &ge_(const Tensor &other) const
Tensor greater_equal(Scalar other) const
Tensor greater_equal(const Tensor &other) const
Tensor &greater_equal_(Scalar other) const
Tensor &greater_equal_(const Tensor &other) const
Tensor le(Scalar other) const
Tensor le(const Tensor &other) const
Tensor &le_(Scalar other) const
Tensor &le_(const Tensor &other) const
Tensor less_equal(Scalar other) const
Tensor less_equal(const Tensor &other) const
Tensor &less_equal_(Scalar other) const
Tensor &less_equal_(const Tensor &other) const
Tensor gt(Scalar other) const
Tensor gt(const Tensor &other) const
Tensor &gt_(Scalar other) const
Tensor &gt_(const Tensor &other) const
Tensor greater(Scalar other) const
Tensor greater(const Tensor &other) const
Tensor &greater_(Scalar other) const
Tensor &greater_(const Tensor &other) const
Tensor lt(Scalar other) const
Tensor lt(const Tensor &other) const
Tensor &lt_(Scalar other) const
Tensor &lt_(const Tensor &other) const
Tensor less(Scalar other) const
Tensor less(const Tensor &other) const
Tensor &less_(Scalar other) const
Tensor &less_(const Tensor &other) const
Tensor take(const Tensor &index) const
Tensor index_select(int64_t dim, const Tensor &index) const
Tensor index_select(Dimname dim, const Tensor &index) const
Tensor masked_select(const Tensor &mask) const
Tensor nonzero() const
std::vector<Tensor> nonzero_numpy() const
Tensor gather(int64_t dim, const Tensor &index, bool sparse_grad = false) const
Tensor gather(Dimname dim, const Tensor &index, bool sparse_grad = false) const
Tensor addcmul(const Tensor &tensor1, const Tensor &tensor2, Scalar value = 1) const
Tensor &addcmul_(const Tensor &tensor1, const Tensor &tensor2, Scalar value = 1) const
Tensor addcdiv(const Tensor &tensor1, const Tensor &tensor2, Scalar value = 1) const
std::tuple<Tensor, Tensor> lstsq(const Tensor &A) const
std::tuple<Tensor, Tensor> triangular_solve(const Tensor &A, bool upper = true, bool transpose = false, bool unitriangular = false) const
std::tuple<Tensor, Tensor> symeig(bool eigenvectors = false, bool upper = true) const
std::tuple<Tensor, Tensor> eig(bool eigenvectors = false) const
std::tuple<Tensor, Tensor, Tensor> svd(bool some = true, bool compute_uv = true) const
Tensor swapaxes(int64_t axis0, int64_t axis1) const
Tensor &swapaxes_(int64_t axis0, int64_t axis1) const
Tensor swapdims(int64_t dim0, int64_t dim1) const
Tensor &swapdims_(int64_t dim0, int64_t dim1) const
Tensor cholesky(bool upper = false) const
Tensor cholesky_solve(const Tensor &input2, bool upper = false) const
std::tuple<Tensor, Tensor> solve(const Tensor &A) const
Tensor cholesky_inverse(bool upper = false) const
std::tuple<Tensor, Tensor> qr(bool some = true) const
std::tuple<Tensor, Tensor> geqrf() const
Tensor orgqr(const Tensor &input2) const
Tensor ormqr(const Tensor &input2, const Tensor &input3, bool left = true, bool transpose = false) const
Tensor lu_solve(const Tensor &LU_data, const Tensor &LU_pivots) const
Tensor multinomial(int64_t num_samples, bool replacement = false, c10::optional<Generator> generator = c10::nullopt) const
Tensor lgamma() const
Tensor digamma() const
Tensor polygamma(int64_t n) const
Tensor erfinv() const
Tensor &erfinv_() const
Tensor i0() const
Tensor &i0_() const
Tensor sign() const
Tensor &sign_() const
Tensor signbit() const
Tensor dist(const Tensor &other, Scalar p = 2) const
Tensor atan2(const Tensor &other) const
Tensor lerp(const Tensor &end, Scalar weight) const
Tensor lerp(const Tensor &end, const Tensor &weight) const
Tensor histc(int64_t bins = 100, Scalar min = 0, Scalar max = 0) const
Tensor fmod(Scalar other) const
Tensor fmod(const Tensor &other) const
Tensor hypot(const Tensor &other) const
Tensor &hypot_(const Tensor &other) const
Tensor igamma(const Tensor &other) const
Tensor &igamma_(const Tensor &other) const
Tensor igammac(const Tensor &other) const
Tensor &igammac_(const Tensor &other) const
Tensor nextafter(const Tensor &other) const
Tensor &nextafter_(const Tensor &other) const
Tensor remainder(Scalar other) const
Tensor remainder(const Tensor &other) const
Tensor min() const
Tensor max() const
Tensor maximum(const Tensor &other) const
Tensor max(const Tensor &other) const
Tensor minimum(const Tensor &other) const
Tensor min(const Tensor &other) const
Tensor quantile(double q, c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
Tensor quantile(const Tensor &q, c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
Tensor nanquantile(double q, c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
Tensor nanquantile(const Tensor &q, c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
std::tuple<Tensor, Tensor> sort(int64_t dim = -1, bool descending = false) const
std::tuple<Tensor, Tensor> sort(Dimname dim, bool descending = false) const
Tensor argsort(int64_t dim = -1, bool descending = false) const
Tensor argsort(Dimname dim, bool descending = false) const
std::tuple<Tensor, Tensor> topk(int64_t k, int64_t dim = -1, bool largest = true, bool sorted = true) const
Tensor all() const
Tensor any() const
Tensor renorm(Scalar p, int64_t dim, Scalar maxnorm) const
Tensor unfold(int64_t dimension, int64_t size, int64_t step) const
bool equal(const Tensor &other) const
Tensor pow(const Tensor &exponent) const
Tensor pow(Scalar exponent) const
Tensor &normal_(double mean = 0, double std = 1, c10::optional<Generator> generator = c10::nullopt) const
Tensor alias() const
Tensor isfinite() const
Tensor isinf() const
void record_stream(Stream s) const
Tensor isposinf() const
Tensor isneginf() const
Tensor fft(int64_t signal_ndim, bool normalized = false) const
Tensor det() const
Tensor outer(const Tensor &vec2) const
Tensor ger(const Tensor &vec2) const
Tensor var(int dim) const
Tensor std(int dim) const
Tensor to(caffe2::TypeMeta type_meta, bool non_blocking = false, bool copy = false) const
Tensor to(Device device, caffe2::TypeMeta type_meta, bool non_blocking = false, bool copy = false) const
template<typename F, typename ...Args>
decltype(auto) m(F func, Args&&... params) const
at::Tensor tensor_data() const

NOTE: This is similar to the legacy .data() function on Variable, and is intended to be used from functions that need to access the Variable’s equivalent Tensor (i.e.

Tensor that shares the same storage and tensor metadata with the Variable).

One notable difference with the legacy .data() function is that changes to the returned Tensor’s tensor metadata (e.g. sizes / strides / storage / storage_offset) will not update the original Variable, due to the fact that this function shallow-copies the Variable’s underlying TensorImpl.

at::Tensor variable_data() const

NOTE: var.variable_data() in C++ has the same semantics as tensor.data in Python, which create a new Variable that shares the same storage and tensor metadata with the original Variable, but with a completely new autograd history.

NOTE: If we change the tensor metadata (e.g. sizes / strides / storage / storage_offset) of a variable created from var.variable_data(), those changes will not update the original variable var. In .variable_data(), we set allow_tensor_metadata_change_ to false to make such changes explicitly illegal, in order to prevent users from changing metadata of var.variable_data() and expecting the original variable var to also be updated.

const std::shared_ptr<torch::autograd::Node> &grad_fn() const

Gets the gradient function of the Variable.

If this is a leaf variable, the pointer returned will be null.

For View Variables: Gets the up-to-date grad_fn. If the shared data or base was modified, we re-create the grad_fn to express the up-to-date view relationship between this and the base Variable.

template<typename T>
hook_return_void_t<T> register_hook(T &&hook) const

Registers a backward hook.

The hook will be called every time a gradient with respect to the Tensor is computed. The hook should have one of the following signature:

hook(Tensor grad) -> Tensor
hook(Tensor grad) -> void
The hook should not modify its argument, but it can optionally return a new gradient which will be used in place of grad.

This function returns the index of the hook in the list which can be used to remove hook.

Example:

auto v = torch::tensor({0., 0., 0.}, torch::requires_grad());
auto h = v.register_hook([](torch::Tensor grad){ return grad * 2; }); // double the gradient
v.backward(torch::tensor({1., 2., 3.}));
// This prints:
// ```
//  2
//  4
//  6
// [ CPUFloatType{3} ]
// ```
std::cout << v.grad() << std::endl;
v.remove_hook(h);  // removes the hook

template<typename T>
hook_return_var_t<T> register_hook(T &&hook) const
void remove_hook(unsigned pos) const

Remove hook at given position.

bool is_view() const

Returns true if this Variable is a view of another Variable.

const Tensor &_base() const

Returns the Variable that this Variable is a view of.

If this Variable is not a view, throw a std::runtime_error.

const std::string &name() const
template<typename T>
auto register_hook(T &&hook) const -> Tensor::hook_return_void_t<T>

Public Members

N
PtrTraits

Public Static Functions

Tensor wrap_tensor_impl(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)

Protected Functions

void enforce_invariants()

Protected Attributes

c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> impl_

Docs

Access comprehensive developer documentation for PyTorch

View Docs

Tutorials

Get in-depth tutorials for beginners and advanced developers

View Tutorials

Resources

Find development resources and get your questions answered

View Resources