Shortcuts

Class Tensor

Inheritance Relationships

Base Type

  • public TensorBase

Class Documentation

class Tensor : public TensorBase

Public Types

template<typename T>
using hook_return_void_t = std::enable_if_t<std::is_void<typename c10::invoke_result_t<T&, Tensor>>::value, unsigned>
template<typename T>
using hook_return_var_t = std::enable_if_t<std::is_same<typename c10::invoke_result_t<T&, Tensor>, Tensor>::value, unsigned>

Public Functions

Tensor() = default
inline explicit Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl> tensor_impl)
Tensor(const Tensor &tensor) = default
Tensor(Tensor &&tensor) = default
inline explicit Tensor(const TensorBase &base)
inline Tensor(TensorBase &&base)
inline Tensor contiguous(MemoryFormat memory_format = MemoryFormat::Contiguous) const
inline Tensor conj() const
inline c10::MaybeOwned<Tensor> expect_contiguous(MemoryFormat memory_format = MemoryFormat::Contiguous) const &

Should be used if *this can reasonably be expected to be contiguous and performance is important.

Compared to contiguous, it saves a reference count increment/decrement if *this is already contiguous, at the cost in all cases of an extra pointer of stack usage, an extra branch to access, and an extra branch at destruction time.

c10::MaybeOwned<Tensor> expect_contiguous(MemoryFormat memory_format = MemoryFormat::Contiguous) && = delete
inline Tensor &operator=(const TensorBase &x) &
inline Tensor &operator=(TensorBase &&x) & noexcept
inline Tensor &operator=(const Tensor &x) &
inline Tensor &operator=(Tensor &&x) & noexcept
inline Tensor &operator=(const Scalar &v) &&
inline Tensor &operator=(const Tensor &rhs) &&
inline Tensor &operator=(Tensor &&rhs) &&
inline  C10_DEPRECATED_MESSAGE ("Tensor.type() is deprecated. Instead use Tensor.options(), which in many cases (e.g. in a constructor) is a drop-in replacement. If you were using data from type(), that is now available from Tensor itself, so instead of tensor.type().scalar_type(), use tensor.scalar_type() instead and instead of tensor.type().backend() use tensor.device().") DeprecatedTypeProperties &type() const
inline Tensor toType(ScalarType t) const
inline Tensor toBackend(Backend b) const
inline  C10_DEPRECATED_MESSAGE ("Tensor.is_variable() is deprecated; everything is a variable now. (If you want to assert that variable has been appropriately handled already, use at::impl::variable_excluded_from_dispatch())") bool is_variable() const noexcept
template<typename T> inline  C10_DEPRECATED_MESSAGE ("Tensor.data<T>() is deprecated. Please use Tensor.data_ptr<T>() instead.") T *data() const
template<typename T>
T item() const
template<typename T, size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>  C10_DEPRECATED_MESSAGE ("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") GenericPackedTensorAccessor< T
inline index_t packed_accessor() const &
template<typename T, size_t N, template< typename U > class PtrTraits = DefaultPtrTraits, typename index_t = int64_t>  C10_DEPRECATED_MESSAGE ("packed_accessor is deprecated, use packed_accessor32 or packed_accessor64 instead") GenericPackedTensorAccessor< T
index_t packed_accessor() && = delete
inline Tensor operator~() const
inline Tensor operator-() const
inline Tensor &operator+=(const Tensor &other)
inline Tensor &operator+=(const Scalar &other)
inline Tensor &operator-=(const Tensor &other)
inline Tensor &operator-=(const Scalar &other)
inline Tensor &operator*=(const Tensor &other)
inline Tensor &operator*=(const Scalar &other)
inline Tensor &operator/=(const Tensor &other)
inline Tensor &operator/=(const Scalar &other)
inline Tensor &operator&=(const Tensor &other)
inline Tensor &operator|=(const Tensor &other)
inline Tensor &operator^=(const Tensor &other)
inline Tensor operator[](const Scalar &index) const
inline Tensor operator[](const Tensor &index) const
inline Tensor operator[](int64_t index) const
Tensor index(ArrayRef<at::indexing::TensorIndex> indices) const
Tensor index(std::initializer_list<at::indexing::TensorIndex> indices) const
Tensor &index_put_(ArrayRef<at::indexing::TensorIndex> indices, Tensor const &rhs)
Tensor &index_put_(ArrayRef<at::indexing::TensorIndex> indices, const Scalar &v)
Tensor &index_put_(std::initializer_list<at::indexing::TensorIndex> indices, Tensor const &rhs)
Tensor &index_put_(std::initializer_list<at::indexing::TensorIndex> indices, const Scalar &v)
inline Tensor cpu() const
inline Tensor cuda() const
inline Tensor hip() const
inline Tensor ve() const
inline Tensor vulkan() const
inline Tensor metal() const
inline Tensor meta() const
inline void backward(const Tensor &gradient = {}, c10::optional<bool> retain_graph = c10::nullopt, bool create_graph = false, c10::optional<TensorList> inputs = c10::nullopt) const

Computes the gradient of current tensor with respect to graph leaves.

The graph is differentiated using the chain rule. If the tensor is non-scalar (i.e. its data has more than one element) and requires gradient, the function additionally requires specifying gradient. It should be a tensor of matching type and location, that contains the gradient of the differentiated function w.r.t. this Tensor.

This function accumulates gradients in the leaves - you might need to zero them before calling it.

Parameters
  • gradient – Gradient w.r.t. the tensor. If it is a tensor, it will be automatically converted to a Tensor that does not require grad unless create_graph is True. None values can be specified for scalar Tensors or ones that don’t require grad. If a None value would be acceptable then this argument is optional.

  • retain_graph – If false, the graph used to compute the grads will be freed. Note that in nearly all cases setting this option to True is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph.

  • create_graph – If true, graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to false.

  • inputs – Inputs w.r.t. which the gradient will be accumulated into at::Tensor::grad. All other Tensors will be ignored. If not provided, the gradient is accumulated into all the leaf Tensors that were used to compute the current tensor. When inputs are provided and a given input is not a leaf, the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). It is an implementation detail on which the user should not rely. See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.

inline const Tensor &set_requires_grad(bool requires_grad) const
inline Tensor &mutable_grad() const

Return a mutable reference to the gradient.

This is conventionally used as t.grad() = x to set a gradient to a completely new tensor. Note that this function work with a non-const Tensor and is not thread safe.

inline const Tensor &grad() const

This function returns an undefined tensor by default and returns a defined tensor the first time a call to backward() computes gradients for this Tensor.

The attribute will then contain the gradients computed and future calls to backward() will accumulate (add) gradients into it.

inline const Tensor &_fw_grad(uint64_t level) const

This function returns the forward gradient for this Tensor at the given level.

inline void _set_fw_grad(const TensorBase &new_grad, uint64_t level, bool is_inplace_op) const

This function can be used to set the value of the forward grad.

Note that the given new_grad might not be used directly if it has different metadata (size/stride/storage offset) compared to this Tensor. In that case, new_grad content will be copied into a new Tensor

inline void __dispatch__backward(at::TensorList inputs, const c10::optional<at::Tensor> &gradient = {}, c10::optional<bool> retain_graph = c10::nullopt, bool create_graph = false) const
inline void __dispatch_set_data(const at::Tensor &new_data) const
inline at::Tensor __dispatch_data() const
inline bool __dispatch_is_leaf() const
inline int64_t __dispatch_output_nr() const
inline int64_t __dispatch__version() const
inline at::Tensor &__dispatch_requires_grad_(bool requires_grad = true) const
inline void __dispatch_retain_grad() const
inline bool __dispatch_retains_grad() const
inline at::Tensor _fw_primal(int64_t level) const
inline at::Tensor &rename_(c10::optional<at::DimnameList> names) const
inline at::Tensor rename(c10::optional<at::DimnameList> names) const
inline at::Tensor align_to(at::DimnameList names) const
inline at::Tensor align_to(at::DimnameList order, int64_t ellipsis_idx) const
inline at::Tensor align_as(const at::Tensor &other) const
inline at::Tensor refine_names(at::DimnameList names) const
inline at::Tensor abs() const
inline at::Tensor &abs_() const
inline at::Tensor absolute() const
inline at::Tensor &absolute_() const
inline at::Tensor angle() const
inline at::Tensor sgn() const
inline at::Tensor &sgn_() const
inline at::Tensor chalf(c10::optional<at::MemoryFormat> memory_format = c10::nullopt) const
inline at::Tensor _conj() const
inline at::Tensor __dispatch_conj() const
inline at::Tensor _conj_physical() const
inline at::Tensor conj_physical() const
inline at::Tensor &conj_physical_() const
inline at::Tensor resolve_conj() const
inline at::Tensor resolve_neg() const
inline at::Tensor _neg_view() const
inline at::Tensor acos() const
inline at::Tensor &acos_() const
inline at::Tensor arccos() const
inline at::Tensor &arccos_() const
inline at::Tensor add(const at::Tensor &other, const at::Scalar &alpha = 1) const
inline at::Tensor &add_(const at::Tensor &other, const at::Scalar &alpha = 1) const
inline at::Tensor add(const at::Scalar &other, const at::Scalar &alpha = 1) const
inline at::Tensor &add_(const at::Scalar &other, const at::Scalar &alpha = 1) const
inline at::Tensor addmv(const at::Tensor &mat, const at::Tensor &vec, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor &addmv_(const at::Tensor &mat, const at::Tensor &vec, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor addr(const at::Tensor &vec1, const at::Tensor &vec2, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor &addr_(const at::Tensor &vec1, const at::Tensor &vec2, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor _is_all_true() const
inline at::Tensor _is_any_true() const
inline at::Tensor all(int64_t dim, bool keepdim = false) const
inline at::Tensor all(at::Dimname dim, bool keepdim = false) const
inline bool allclose(const at::Tensor &other, double rtol = 1e-05, double atol = 1e-08, bool equal_nan = false) const
inline at::Tensor any(int64_t dim, bool keepdim = false) const
inline at::Tensor any(at::Dimname dim, bool keepdim = false) const
inline at::Tensor argmax(c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
inline at::Tensor argmin(c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
inline at::Tensor acosh() const
inline at::Tensor &acosh_() const
inline at::Tensor arccosh() const
inline at::Tensor &arccosh_() const
inline at::Tensor asinh() const
inline at::Tensor &asinh_() const
inline at::Tensor arcsinh() const
inline at::Tensor &arcsinh_() const
inline at::Tensor atanh() const
inline at::Tensor &atanh_() const
inline at::Tensor arctanh() const
inline at::Tensor &arctanh_() const
inline at::Tensor as_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset = c10::nullopt) const
inline at::Tensor as_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset = c10::nullopt) const
inline const at::Tensor &as_strided_(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset = c10::nullopt) const
inline const at::Tensor &as_strided__symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset = c10::nullopt) const
inline at::Tensor asin() const
inline at::Tensor &asin_() const
inline at::Tensor arcsin() const
inline at::Tensor &arcsin_() const
inline at::Tensor atan() const
inline at::Tensor &atan_() const
inline at::Tensor arctan() const
inline at::Tensor &arctan_() const
inline at::Tensor baddbmm(const at::Tensor &batch1, const at::Tensor &batch2, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor &baddbmm_(const at::Tensor &batch1, const at::Tensor &batch2, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor bernoulli(c10::optional<at::Generator> generator = c10::nullopt) const
inline at::Tensor &bernoulli_(const at::Tensor &p, c10::optional<at::Generator> generator = c10::nullopt) const
inline at::Tensor &bernoulli_(double p = 0.5, c10::optional<at::Generator> generator = c10::nullopt) const
inline at::Tensor bernoulli(double p, c10::optional<at::Generator> generator = c10::nullopt) const
inline at::Tensor bincount(const c10::optional<at::Tensor> &weights = {}, int64_t minlength = 0) const
inline at::Tensor bitwise_not() const
inline at::Tensor &bitwise_not_() const
inline at::Tensor copysign(const at::Tensor &other) const
inline at::Tensor &copysign_(const at::Tensor &other) const
inline at::Tensor copysign(const at::Scalar &other) const
inline at::Tensor &copysign_(const at::Scalar &other) const
inline at::Tensor logical_not() const
inline at::Tensor &logical_not_() const
inline at::Tensor logical_xor(const at::Tensor &other) const
inline at::Tensor &logical_xor_(const at::Tensor &other) const
inline at::Tensor logical_and(const at::Tensor &other) const
inline at::Tensor &logical_and_(const at::Tensor &other) const
inline at::Tensor logical_or(const at::Tensor &other) const
inline at::Tensor &logical_or_(const at::Tensor &other) const
inline at::Tensor bmm(const at::Tensor &mat2) const
inline at::Tensor broadcast_to(at::IntArrayRef size) const
inline at::Tensor broadcast_to_symint(c10::SymIntArrayRef size) const
inline at::Tensor ceil() const
inline at::Tensor &ceil_() const
inline ::std::vector<at::Tensor> unsafe_chunk(int64_t chunks, int64_t dim = 0) const
inline ::std::vector<at::Tensor> chunk(int64_t chunks, int64_t dim = 0) const
inline ::std::vector<at::Tensor> tensor_split(int64_t sections, int64_t dim = 0) const
inline ::std::vector<at::Tensor> tensor_split_symint(c10::SymInt sections, int64_t dim = 0) const
inline ::std::vector<at::Tensor> tensor_split(at::IntArrayRef indices, int64_t dim = 0) const
inline ::std::vector<at::Tensor> tensor_split_symint(c10::SymIntArrayRef indices, int64_t dim = 0) const
inline ::std::vector<at::Tensor> tensor_split(const at::Tensor &tensor_indices_or_sections, int64_t dim = 0) const
inline at::Tensor clamp(const c10::optional<at::Scalar> &min, const c10::optional<at::Scalar> &max = c10::nullopt) const
inline at::Tensor clamp(const c10::optional<at::Tensor> &min = {}, const c10::optional<at::Tensor> &max = {}) const
inline at::Tensor &clamp_(const c10::optional<at::Scalar> &min, const c10::optional<at::Scalar> &max = c10::nullopt) const
inline at::Tensor &clamp_(const c10::optional<at::Tensor> &min = {}, const c10::optional<at::Tensor> &max = {}) const
inline at::Tensor clamp_max(const at::Scalar &max) const
inline at::Tensor clamp_max(const at::Tensor &max) const
inline at::Tensor &clamp_max_(const at::Scalar &max) const
inline at::Tensor &clamp_max_(const at::Tensor &max) const
inline at::Tensor clamp_min(const at::Scalar &min) const
inline at::Tensor clamp_min(const at::Tensor &min) const
inline at::Tensor &clamp_min_(const at::Scalar &min) const
inline at::Tensor &clamp_min_(const at::Tensor &min) const
inline at::Tensor clip(const c10::optional<at::Scalar> &min, const c10::optional<at::Scalar> &max = c10::nullopt) const
inline at::Tensor clip(const c10::optional<at::Tensor> &min = {}, const c10::optional<at::Tensor> &max = {}) const
inline at::Tensor &clip_(const c10::optional<at::Scalar> &min, const c10::optional<at::Scalar> &max = c10::nullopt) const
inline at::Tensor &clip_(const c10::optional<at::Tensor> &min = {}, const c10::optional<at::Tensor> &max = {}) const
inline at::Tensor __dispatch_contiguous(at::MemoryFormat memory_format = MemoryFormat::Contiguous) const
inline at::Tensor &copy_(const at::Tensor &src, bool non_blocking = false) const
inline at::Tensor cos() const
inline at::Tensor &cos_() const
inline at::Tensor cosh() const
inline at::Tensor &cosh_() const
inline at::Tensor count_nonzero(at::IntArrayRef dim) const
inline at::Tensor count_nonzero(c10::optional<int64_t> dim = c10::nullopt) const
inline at::Tensor cov(int64_t correction = 1, const c10::optional<at::Tensor> &fweights = {}, const c10::optional<at::Tensor> &aweights = {}) const
inline at::Tensor corrcoef() const
inline ::std::tuple<at::Tensor, at::Tensor> cummax(int64_t dim) const
inline ::std::tuple<at::Tensor, at::Tensor> cummax(at::Dimname dim) const
inline ::std::tuple<at::Tensor, at::Tensor> cummin(int64_t dim) const
inline ::std::tuple<at::Tensor, at::Tensor> cummin(at::Dimname dim) const
inline at::Tensor cumprod(int64_t dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor &cumprod_(int64_t dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor cumprod(at::Dimname dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor &cumprod_(at::Dimname dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor cumsum(int64_t dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor &cumsum_(int64_t dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor cumsum(at::Dimname dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor &cumsum_(at::Dimname dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor diag_embed(int64_t offset = 0, int64_t dim1 = -2, int64_t dim2 = -1) const
inline at::Tensor diagflat(int64_t offset = 0) const
inline at::Tensor diagonal(int64_t offset = 0, int64_t dim1 = 0, int64_t dim2 = 1) const
inline at::Tensor diagonal(at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset = 0) const
inline at::Tensor &fill_diagonal_(const at::Scalar &fill_value, bool wrap = false) const
inline at::Tensor diff(int64_t n = 1, int64_t dim = -1, const c10::optional<at::Tensor> &prepend = {}, const c10::optional<at::Tensor> &append = {}) const
inline at::Tensor div(const at::Tensor &other) const
inline at::Tensor &div_(const at::Tensor &other) const
inline at::Tensor div(const at::Tensor &other, c10::optional<c10::string_view> rounding_mode) const
inline at::Tensor &div_(const at::Tensor &other, c10::optional<c10::string_view> rounding_mode) const
inline at::Tensor div(const at::Scalar &other) const
inline at::Tensor &div_(const at::Scalar &other) const
inline at::Tensor div(const at::Scalar &other, c10::optional<c10::string_view> rounding_mode) const
inline at::Tensor &div_(const at::Scalar &other, c10::optional<c10::string_view> rounding_mode) const
inline at::Tensor divide(const at::Tensor &other) const
inline at::Tensor &divide_(const at::Tensor &other) const
inline at::Tensor divide(const at::Scalar &other) const
inline at::Tensor &divide_(const at::Scalar &other) const
inline at::Tensor divide(const at::Tensor &other, c10::optional<c10::string_view> rounding_mode) const
inline at::Tensor &divide_(const at::Tensor &other, c10::optional<c10::string_view> rounding_mode) const
inline at::Tensor divide(const at::Scalar &other, c10::optional<c10::string_view> rounding_mode) const
inline at::Tensor &divide_(const at::Scalar &other, c10::optional<c10::string_view> rounding_mode) const
inline at::Tensor true_divide(const at::Tensor &other) const
inline at::Tensor &true_divide_(const at::Tensor &other) const
inline at::Tensor true_divide(const at::Scalar &other) const
inline at::Tensor &true_divide_(const at::Scalar &other) const
inline at::Tensor dot(const at::Tensor &tensor) const
inline at::Tensor vdot(const at::Tensor &other) const
inline at::Tensor new_empty(at::IntArrayRef size, at::TensorOptions options = {}) const
inline at::Tensor new_empty(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_empty_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) const
inline at::Tensor new_empty_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, at::TensorOptions options = {}) const
inline at::Tensor new_empty_strided(at::IntArrayRef size, at::IntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, at::TensorOptions options = {}) const
inline at::Tensor new_empty_strided_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_full(at::IntArrayRef size, const at::Scalar &fill_value, at::TensorOptions options = {}) const
inline at::Tensor new_full(at::IntArrayRef size, const at::Scalar &fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_full_symint(c10::SymIntArrayRef size, const at::Scalar &fill_value, at::TensorOptions options = {}) const
inline at::Tensor new_full_symint(c10::SymIntArrayRef size, const at::Scalar &fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_zeros(at::IntArrayRef size, at::TensorOptions options = {}) const
inline at::Tensor new_zeros(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_zeros_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) const
inline at::Tensor new_zeros_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_ones(at::IntArrayRef size, at::TensorOptions options = {}) const
inline at::Tensor new_ones(at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline at::Tensor new_ones_symint(c10::SymIntArrayRef size, at::TensorOptions options = {}) const
inline at::Tensor new_ones_symint(c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) const
inline const at::Tensor &resize_(at::IntArrayRef size, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) const
inline const at::Tensor &resize__symint(c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) const
inline at::Tensor erf() const
inline at::Tensor &erf_() const
inline at::Tensor erfc() const
inline at::Tensor &erfc_() const
inline at::Tensor exp() const
inline at::Tensor &exp_() const
inline at::Tensor exp2() const
inline at::Tensor &exp2_() const
inline at::Tensor expm1() const
inline at::Tensor &expm1_() const
inline at::Tensor expand(at::IntArrayRef size, bool implicit = false) const
inline at::Tensor expand_symint(c10::SymIntArrayRef size, bool implicit = false) const
inline at::Tensor expand_as(const at::Tensor &other) const
inline at::Tensor flatten(int64_t start_dim = 0, int64_t end_dim = -1) const
inline at::Tensor flatten(int64_t start_dim, int64_t end_dim, at::Dimname out_dim) const
inline at::Tensor flatten(at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) const
inline at::Tensor flatten(at::DimnameList dims, at::Dimname out_dim) const
inline at::Tensor unflatten(int64_t dim, at::IntArrayRef sizes) const
inline at::Tensor unflatten_symint(int64_t dim, c10::SymIntArrayRef sizes) const
inline at::Tensor unflatten(at::Dimname dim, at::IntArrayRef sizes, at::DimnameList names) const
inline at::Tensor unflatten_symint(at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) const
inline at::Tensor &fill_(const at::Scalar &value) const
inline at::Tensor &fill_(const at::Tensor &value) const
inline at::Tensor floor() const
inline at::Tensor &floor_() const
inline at::Tensor floor_divide(const at::Tensor &other) const
inline at::Tensor &floor_divide_(const at::Tensor &other) const
inline at::Tensor floor_divide(const at::Scalar &other) const
inline at::Tensor &floor_divide_(const at::Scalar &other) const
inline at::Tensor frac() const
inline at::Tensor &frac_() const
inline at::Tensor gcd(const at::Tensor &other) const
inline at::Tensor &gcd_(const at::Tensor &other) const
inline at::Tensor lcm(const at::Tensor &other) const
inline at::Tensor &lcm_(const at::Tensor &other) const
inline at::Tensor index(const c10::List<c10::optional<at::Tensor>> &indices) const
inline at::Tensor &index_copy_(int64_t dim, const at::Tensor &index, const at::Tensor &source) const
inline at::Tensor index_copy(int64_t dim, const at::Tensor &index, const at::Tensor &source) const
inline at::Tensor &index_copy_(at::Dimname dim, const at::Tensor &index, const at::Tensor &source) const
inline at::Tensor index_copy(at::Dimname dim, const at::Tensor &index, const at::Tensor &source) const
inline at::Tensor &index_put_(const c10::List<c10::optional<at::Tensor>> &indices, const at::Tensor &values, bool accumulate = false) const
inline at::Tensor index_put(const c10::List<c10::optional<at::Tensor>> &indices, const at::Tensor &values, bool accumulate = false) const
inline at::Tensor isclose(const at::Tensor &other, double rtol = 1e-05, double atol = 1e-08, bool equal_nan = false) const
inline at::Tensor isnan() const
inline bool is_distributed() const
inline bool __dispatch_is_floating_point() const
inline bool __dispatch_is_complex() const
inline bool __dispatch_is_conj() const
inline bool __dispatch__is_zerotensor() const
inline bool __dispatch_is_neg() const
inline at::Tensor isreal() const
inline bool is_nonzero() const
inline bool is_same_size(const at::Tensor &other) const
inline bool __dispatch_is_signed() const
inline bool __dispatch_is_inference() const
inline at::Tensor kron(const at::Tensor &other) const
inline ::std::tuple<at::Tensor, at::Tensor> kthvalue(int64_t k, int64_t dim = -1, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> kthvalue(int64_t k, at::Dimname dim, bool keepdim = false) const
inline at::Tensor nan_to_num(c10::optional<double> nan = c10::nullopt, c10::optional<double> posinf = c10::nullopt, c10::optional<double> neginf = c10::nullopt) const
inline at::Tensor &nan_to_num_(c10::optional<double> nan = c10::nullopt, c10::optional<double> posinf = c10::nullopt, c10::optional<double> neginf = c10::nullopt) const
inline at::Tensor ldexp(const at::Tensor &other) const
inline at::Tensor &ldexp_(const at::Tensor &other) const
inline at::Tensor log() const
inline at::Tensor &log_() const
inline at::Tensor log10() const
inline at::Tensor &log10_() const
inline at::Tensor log1p() const
inline at::Tensor &log1p_() const
inline at::Tensor log2() const
inline at::Tensor &log2_() const
inline at::Tensor logaddexp(const at::Tensor &other) const
inline at::Tensor logaddexp2(const at::Tensor &other) const
inline at::Tensor xlogy(const at::Tensor &other) const
inline at::Tensor xlogy(const at::Scalar &other) const
inline at::Tensor &xlogy_(const at::Tensor &other) const
inline at::Tensor &xlogy_(const at::Scalar &other) const
inline at::Tensor log_softmax(int64_t dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor log_softmax(at::Dimname dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor logcumsumexp(int64_t dim) const
inline at::Tensor logcumsumexp(at::Dimname dim) const
inline at::Tensor logsumexp(at::IntArrayRef dim, bool keepdim = false) const
inline at::Tensor logsumexp(at::DimnameList dim, bool keepdim = false) const
inline at::Tensor matmul(const at::Tensor &other) const
inline at::Tensor matrix_power(int64_t n) const
inline at::Tensor matrix_exp() const
inline ::std::tuple<at::Tensor, at::Tensor> aminmax(c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> max(int64_t dim, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> max(at::Dimname dim, bool keepdim = false) const
inline at::Tensor amax(at::IntArrayRef dim = {}, bool keepdim = false) const
inline at::Tensor mean(c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor mean(at::OptionalIntArrayRef dim, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor mean(at::DimnameList dim, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor nanmean(at::OptionalIntArrayRef dim = c10::nullopt, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor median() const
inline ::std::tuple<at::Tensor, at::Tensor> median(int64_t dim, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> median(at::Dimname dim, bool keepdim = false) const
inline at::Tensor nanmedian() const
inline ::std::tuple<at::Tensor, at::Tensor> nanmedian(int64_t dim, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> nanmedian(at::Dimname dim, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> min(int64_t dim, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> min(at::Dimname dim, bool keepdim = false) const
inline at::Tensor amin(at::IntArrayRef dim = {}, bool keepdim = false) const
inline at::Tensor mm(const at::Tensor &mat2) const
inline ::std::tuple<at::Tensor, at::Tensor> mode(int64_t dim = -1, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> mode(at::Dimname dim, bool keepdim = false) const
inline at::Tensor mul(const at::Tensor &other) const
inline at::Tensor &mul_(const at::Tensor &other) const
inline at::Tensor mul(const at::Scalar &other) const
inline at::Tensor &mul_(const at::Scalar &other) const
inline at::Tensor multiply(const at::Tensor &other) const
inline at::Tensor &multiply_(const at::Tensor &other) const
inline at::Tensor multiply(const at::Scalar &other) const
inline at::Tensor &multiply_(const at::Scalar &other) const
inline at::Tensor mv(const at::Tensor &vec) const
inline at::Tensor mvlgamma(int64_t p) const
inline at::Tensor &mvlgamma_(int64_t p) const
inline at::Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const
inline at::Tensor narrow_copy_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const
inline at::Tensor narrow(int64_t dim, int64_t start, int64_t length) const
inline at::Tensor narrow_symint(int64_t dim, c10::SymInt start, c10::SymInt length) const
inline at::Tensor narrow(int64_t dim, const at::Tensor &start, int64_t length) const
inline at::Tensor narrow_symint(int64_t dim, const at::Tensor &start, c10::SymInt length) const
inline at::Tensor permute(at::IntArrayRef dims) const
inline at::Tensor movedim(at::IntArrayRef source, at::IntArrayRef destination) const
inline at::Tensor movedim(int64_t source, int64_t destination) const
inline at::Tensor moveaxis(at::IntArrayRef source, at::IntArrayRef destination) const
inline at::Tensor moveaxis(int64_t source, int64_t destination) const
inline at::Tensor numpy_T() const
inline at::Tensor matrix_H() const
inline at::Tensor mT() const
inline at::Tensor mH() const
inline at::Tensor adjoint() const
inline bool is_pinned(c10::optional<at::Device> device = c10::nullopt) const
inline at::Tensor pin_memory(c10::optional<at::Device> device = c10::nullopt) const
inline at::Tensor pinverse(double rcond = 1e-15) const
inline at::Tensor rad2deg() const
inline at::Tensor &rad2deg_() const
inline at::Tensor deg2rad() const
inline at::Tensor &deg2rad_() const
inline at::Tensor ravel() const
inline at::Tensor reciprocal() const
inline at::Tensor &reciprocal_() const
inline at::Tensor neg() const
inline at::Tensor &neg_() const
inline at::Tensor negative() const
inline at::Tensor &negative_() const
inline at::Tensor repeat(at::IntArrayRef repeats) const
inline at::Tensor repeat_symint(c10::SymIntArrayRef repeats) const
inline at::Tensor repeat_interleave(const at::Tensor &repeats, c10::optional<int64_t> dim = c10::nullopt, c10::optional<int64_t> output_size = c10::nullopt) const
inline at::Tensor repeat_interleave(int64_t repeats, c10::optional<int64_t> dim = c10::nullopt, c10::optional<int64_t> output_size = c10::nullopt) const
inline at::Tensor repeat_interleave_symint(c10::SymInt repeats, c10::optional<int64_t> dim = c10::nullopt, c10::optional<int64_t> output_size = c10::nullopt) const
inline at::Tensor reshape(at::IntArrayRef shape) const
inline at::Tensor reshape_symint(c10::SymIntArrayRef shape) const
inline at::Tensor _reshape_alias(at::IntArrayRef size, at::IntArrayRef stride) const
inline at::Tensor _reshape_alias_symint(c10::SymIntArrayRef size, c10::SymIntArrayRef stride) const
inline at::Tensor reshape_as(const at::Tensor &other) const
inline at::Tensor round() const
inline at::Tensor &round_() const
inline at::Tensor round(int64_t decimals) const
inline at::Tensor &round_(int64_t decimals) const
inline at::Tensor relu() const
inline at::Tensor &relu_() const
inline at::Tensor prelu(const at::Tensor &weight) const
inline at::Tensor hardshrink(const at::Scalar &lambd = 0.5) const
inline at::Tensor hardshrink_backward(const at::Tensor &grad_out, const at::Scalar &lambd) const
inline at::Tensor rsqrt() const
inline at::Tensor &rsqrt_() const
inline at::Tensor select(at::Dimname dim, int64_t index) const
inline at::Tensor select(int64_t dim, int64_t index) const
inline at::Tensor select_symint(int64_t dim, c10::SymInt index) const
inline at::Tensor sigmoid() const
inline at::Tensor &sigmoid_() const
inline at::Tensor logit(c10::optional<double> eps = c10::nullopt) const
inline at::Tensor &logit_(c10::optional<double> eps = c10::nullopt) const
inline at::Tensor sin() const
inline at::Tensor &sin_() const
inline at::Tensor sinc() const
inline at::Tensor &sinc_() const
inline at::Tensor sinh() const
inline at::Tensor &sinh_() const
inline at::Tensor detach() const

Returns a new Tensor, detached from the current graph.

The result will never require gradient.

inline at::Tensor &detach_() const

Detaches the Tensor from the graph that created it, making it a leaf.

Views cannot be detached in-place.

inline int64_t size(at::Dimname dim) const
inline at::Tensor slice(int64_t dim = 0, c10::optional<int64_t> start = c10::nullopt, c10::optional<int64_t> end = c10::nullopt, int64_t step = 1) const
inline at::Tensor slice_symint(int64_t dim = 0, c10::optional<c10::SymInt> start = c10::nullopt, c10::optional<c10::SymInt> end = c10::nullopt, c10::SymInt step = 1) const
inline at::Tensor slice_scatter(const at::Tensor &src, int64_t dim = 0, c10::optional<int64_t> start = c10::nullopt, c10::optional<int64_t> end = c10::nullopt, int64_t step = 1) const
inline at::Tensor slice_scatter_symint(const at::Tensor &src, int64_t dim = 0, c10::optional<c10::SymInt> start = c10::nullopt, c10::optional<c10::SymInt> end = c10::nullopt, c10::SymInt step = 1) const
inline at::Tensor select_scatter(const at::Tensor &src, int64_t dim, int64_t index) const
inline at::Tensor select_scatter_symint(const at::Tensor &src, int64_t dim, c10::SymInt index) const
inline at::Tensor diagonal_scatter(const at::Tensor &src, int64_t offset = 0, int64_t dim1 = 0, int64_t dim2 = 1) const
inline at::Tensor as_strided_scatter(const at::Tensor &src, at::IntArrayRef size, at::IntArrayRef stride, c10::optional<int64_t> storage_offset = c10::nullopt) const
inline at::Tensor as_strided_scatter_symint(const at::Tensor &src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset = c10::nullopt) const
inline at::Tensor smm(const at::Tensor &mat2) const
inline at::Tensor softmax(int64_t dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor softmax(at::Dimname dim, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline ::std::vector<at::Tensor> unsafe_split(int64_t split_size, int64_t dim = 0) const
inline ::std::vector<at::Tensor> unsafe_split_symint(c10::SymInt split_size, int64_t dim = 0) const
inline ::std::vector<at::Tensor> split(int64_t split_size, int64_t dim = 0) const
inline ::std::vector<at::Tensor> split_symint(c10::SymInt split_size, int64_t dim = 0) const
inline ::std::vector<at::Tensor> split(at::IntArrayRef split_size, int64_t dim = 0) const
inline ::std::vector<at::Tensor> split_symint(c10::SymIntArrayRef split_size, int64_t dim = 0) const
inline ::std::vector<at::Tensor> unsafe_split_with_sizes(at::IntArrayRef split_sizes, int64_t dim = 0) const
inline ::std::vector<at::Tensor> unsafe_split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim = 0) const
inline ::std::vector<at::Tensor> split_with_sizes(at::IntArrayRef split_sizes, int64_t dim = 0) const
inline ::std::vector<at::Tensor> split_with_sizes_symint(c10::SymIntArrayRef split_sizes, int64_t dim = 0) const
inline ::std::vector<at::Tensor> hsplit(int64_t sections) const
inline ::std::vector<at::Tensor> hsplit(at::IntArrayRef indices) const
inline ::std::vector<at::Tensor> vsplit(int64_t sections) const
inline ::std::vector<at::Tensor> vsplit(at::IntArrayRef indices) const
inline ::std::vector<at::Tensor> dsplit(int64_t sections) const
inline ::std::vector<at::Tensor> dsplit(at::IntArrayRef indices) const
inline at::Tensor squeeze() const
inline at::Tensor squeeze(int64_t dim) const
inline at::Tensor squeeze(at::Dimname dim) const
inline at::Tensor squeeze(at::IntArrayRef dim) const
inline at::Tensor &squeeze_() const
inline at::Tensor &squeeze_(int64_t dim) const
inline at::Tensor &squeeze_(at::IntArrayRef dim) const
inline at::Tensor &squeeze_(at::Dimname dim) const
inline at::Tensor sspaddmm(const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor stft(int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> &window, bool normalized, c10::optional<bool> onesided = c10::nullopt, c10::optional<bool> return_complex = c10::nullopt) const
inline at::Tensor stft(int64_t n_fft, c10::optional<int64_t> hop_length = c10::nullopt, c10::optional<int64_t> win_length = c10::nullopt, const c10::optional<at::Tensor> &window = {}, bool center = true, c10::string_view pad_mode = "reflect", bool normalized = false, c10::optional<bool> onesided = c10::nullopt, c10::optional<bool> return_complex = c10::nullopt) const
inline at::Tensor istft(int64_t n_fft, c10::optional<int64_t> hop_length = c10::nullopt, c10::optional<int64_t> win_length = c10::nullopt, const c10::optional<at::Tensor> &window = {}, bool center = true, bool normalized = false, c10::optional<bool> onesided = c10::nullopt, c10::optional<int64_t> length = c10::nullopt, bool return_complex = false) const
inline int64_t stride(at::Dimname dim) const
inline at::Tensor sum(c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor sum(at::OptionalIntArrayRef dim, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor sum(at::DimnameList dim, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor nansum(at::OptionalIntArrayRef dim = c10::nullopt, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor sum_to_size(at::IntArrayRef size) const
inline at::Tensor sum_to_size_symint(c10::SymIntArrayRef size) const
inline at::Tensor sqrt() const
inline at::Tensor &sqrt_() const
inline at::Tensor square() const
inline at::Tensor &square_() const
inline at::Tensor std(bool unbiased) const
inline at::Tensor std(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim = false) const
inline at::Tensor std(at::OptionalIntArrayRef dim = c10::nullopt, const c10::optional<at::Scalar> &correction = c10::nullopt, bool keepdim = false) const
inline at::Tensor std(at::DimnameList dim, bool unbiased, bool keepdim = false) const
inline at::Tensor std(at::DimnameList dim, const c10::optional<at::Scalar> &correction = c10::nullopt, bool keepdim = false) const
inline at::Tensor prod(c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor prod(int64_t dim, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor prod(at::Dimname dim, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt) const
inline at::Tensor t() const
inline at::Tensor &t_() const
inline at::Tensor tan() const
inline at::Tensor &tan_() const
inline at::Tensor tanh() const
inline at::Tensor &tanh_() const
inline at::Tensor tile(at::IntArrayRef dims) const
inline at::Tensor tile_symint(c10::SymIntArrayRef dims) const
inline at::Tensor transpose(int64_t dim0, int64_t dim1) const
inline at::Tensor transpose(at::Dimname dim0, at::Dimname dim1) const
inline at::Tensor &transpose_(int64_t dim0, int64_t dim1) const
inline at::Tensor flip(at::IntArrayRef dims) const
inline at::Tensor fliplr() const
inline at::Tensor flipud() const
inline at::Tensor roll(at::IntArrayRef shifts, at::IntArrayRef dims = {}) const
inline at::Tensor roll_symint(c10::SymIntArrayRef shifts, at::IntArrayRef dims = {}) const
inline at::Tensor rot90(int64_t k = 1, at::IntArrayRef dims = {0, 1}) const
inline at::Tensor _nested_tensor_size() const
inline at::Tensor _nested_tensor_strides() const
inline at::Tensor _nested_tensor_storage_offsets() const
inline at::Tensor trunc() const
inline at::Tensor &trunc_() const
inline at::Tensor fix() const
inline at::Tensor &fix_() const
inline at::Tensor type_as(const at::Tensor &other) const
inline at::Tensor unsqueeze(int64_t dim) const
inline at::Tensor &unsqueeze_(int64_t dim) const
inline at::Tensor var(bool unbiased) const
inline at::Tensor var(at::OptionalIntArrayRef dim, bool unbiased, bool keepdim = false) const
inline at::Tensor var(at::OptionalIntArrayRef dim = c10::nullopt, const c10::optional<at::Scalar> &correction = c10::nullopt, bool keepdim = false) const
inline at::Tensor var(at::DimnameList dim, bool unbiased, bool keepdim = false) const
inline at::Tensor var(at::DimnameList dim, const c10::optional<at::Scalar> &correction = c10::nullopt, bool keepdim = false) const
inline at::Tensor view_as(const at::Tensor &other) const
inline at::Tensor where(const at::Tensor &condition, const at::Tensor &other) const
inline at::Tensor where(const at::Tensor &condition, const at::Scalar &other) const
inline at::Tensor norm(const c10::optional<at::Scalar> &p, at::ScalarType dtype) const
inline at::Tensor norm(const at::Scalar &p = 2) const
inline at::Tensor norm(const c10::optional<at::Scalar> &p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) const
inline at::Tensor norm(const c10::optional<at::Scalar> &p, at::IntArrayRef dim, bool keepdim = false) const
inline at::Tensor norm(const c10::optional<at::Scalar> &p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) const
inline at::Tensor norm(const c10::optional<at::Scalar> &p, at::DimnameList dim, bool keepdim = false) const
inline ::std::tuple<at::Tensor, at::Tensor> frexp() const
inline at::Tensor clone(c10::optional<at::MemoryFormat> memory_format = c10::nullopt) const
inline at::Tensor positive() const
inline const at::Tensor &resize_as_(const at::Tensor &the_template, c10::optional<at::MemoryFormat> memory_format = c10::nullopt) const
inline const at::Tensor &resize_as_sparse_(const at::Tensor &the_template) const
inline at::Tensor &zero_() const
inline at::Tensor sub(const at::Tensor &other, const at::Scalar &alpha = 1) const
inline at::Tensor &sub_(const at::Tensor &other, const at::Scalar &alpha = 1) const
inline at::Tensor sub(const at::Scalar &other, const at::Scalar &alpha = 1) const
inline at::Tensor &sub_(const at::Scalar &other, const at::Scalar &alpha = 1) const
inline at::Tensor subtract(const at::Tensor &other, const at::Scalar &alpha = 1) const
inline at::Tensor &subtract_(const at::Tensor &other, const at::Scalar &alpha = 1) const
inline at::Tensor subtract(const at::Scalar &other, const at::Scalar &alpha = 1) const
inline at::Tensor &subtract_(const at::Scalar &other, const at::Scalar &alpha = 1) const
inline at::Tensor heaviside(const at::Tensor &values) const
inline at::Tensor &heaviside_(const at::Tensor &values) const
inline at::Tensor addmm(const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor &addmm_(const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta = 1, const at::Scalar &alpha = 1) const
inline at::Tensor _addmm_activation(const at::Tensor &mat1, const at::Tensor &mat2, const at::Scalar &beta = 1, const at::Scalar &alpha = 1, bool use_gelu = false) const
inline const at::Tensor &sparse_resize_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const
inline const at::Tensor &sparse_resize_and_clear_(at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) const
inline at::Tensor sparse_mask(const at::Tensor &mask) const
inline at::Tensor _sparse_mask_projection(const at::Tensor &mask, bool accumulate_matches = false) const
inline at::Tensor to_dense(c10::optional<at::ScalarType> dtype = c10::nullopt, c10::optional<bool> masked_grad = c10::nullopt) const
inline at::Tensor _to_dense(c10::optional<at::ScalarType> dtype = c10::nullopt, c10::optional<bool> masked_grad = c10::nullopt) const
inline int64_t sparse_dim() const
inline int64_t _dimI() const
inline int64_t dense_dim() const
inline int64_t _dimV() const
inline int64_t _nnz() const
inline at::Tensor coalesce() const
inline bool is_coalesced() const
inline at::Tensor _indices() const
inline at::Tensor _values() const
inline at::Tensor &_coalesced_(bool coalesced) const
inline at::Tensor indices() const
inline at::Tensor values() const
inline at::Tensor crow_indices() const
inline at::Tensor col_indices() const
inline at::Tensor ccol_indices() const
inline at::Tensor row_indices() const
inline ::std::vector<at::Tensor> unbind(int64_t dim = 0) const
inline ::std::vector<at::Tensor<