Struct EmbeddingOptions¶
Defined in File embedding.h
Page Contents
Struct Documentation¶
-
struct EmbeddingOptions¶
Options for the
Embedding
module.Example:
Embedding model(EmbeddingOptions(10, 2).padding_idx(3).max_norm(2).norm_type(2.5).scale_grad_by_freq(true).sparse(true));
Public Functions
-
EmbeddingOptions(int64_t num_embeddings, int64_t embedding_dim)¶
-
inline auto num_embeddings(const int64_t &new_num_embeddings) -> decltype(*this)¶
The size of the dictionary of embeddings.
-
inline auto num_embeddings(int64_t &&new_num_embeddings) -> decltype(*this)¶
-
inline const int64_t &num_embeddings() const noexcept¶
-
inline int64_t &num_embeddings() noexcept¶
-
inline auto embedding_dim(const int64_t &new_embedding_dim) -> decltype(*this)¶
The size of each embedding vector.
-
inline auto embedding_dim(int64_t &&new_embedding_dim) -> decltype(*this)¶
-
inline const int64_t &embedding_dim() const noexcept¶
-
inline int64_t &embedding_dim() noexcept¶
-
inline auto padding_idx(const std::optional<int64_t> &new_padding_idx) -> decltype(*this)¶
If specified, the entries at
padding_idx
do not contribute to the gradient; therefore, the embedding vector atpadding_idx
is not updated during training, i.e.it remains as a fixed “pad”. For a newly constructed Embedding, the embedding vector at
padding_idx
will default to all zeros, but can be updated to another value to be used as the padding vector.
-
inline auto padding_idx(std::optional<int64_t> &&new_padding_idx) -> decltype(*this)¶
-
inline const std::optional<int64_t> &padding_idx() const noexcept¶
-
inline std::optional<int64_t> &padding_idx() noexcept¶
-
inline auto max_norm(const std::optional<double> &new_max_norm) -> decltype(*this)¶
If given, each embedding vector with norm larger than
max_norm
is renormalized to have normmax_norm
.
-
inline auto max_norm(std::optional<double> &&new_max_norm) -> decltype(*this)¶
-
inline const std::optional<double> &max_norm() const noexcept¶
-
inline std::optional<double> &max_norm() noexcept¶
-
inline auto norm_type(const double &new_norm_type) -> decltype(*this)¶
The p of the p-norm to compute for the
max_norm
option. Default2
.
-
inline auto norm_type(double &&new_norm_type) -> decltype(*this)¶
-
inline const double &norm_type() const noexcept¶
-
inline double &norm_type() noexcept¶
-
inline auto scale_grad_by_freq(const bool &new_scale_grad_by_freq) -> decltype(*this)¶
If given, this will scale gradients by the inverse of frequency of the words in the mini-batch.
Default
false
.
-
inline auto scale_grad_by_freq(bool &&new_scale_grad_by_freq) -> decltype(*this)¶
-
inline const bool &scale_grad_by_freq() const noexcept¶
-
inline bool &scale_grad_by_freq() noexcept¶
-
inline auto sparse(const bool &new_sparse) -> decltype(*this)¶
If
true
, gradient w.r.t.weight
matrix will be a sparse tensor.
-
inline auto sparse(bool &&new_sparse) -> decltype(*this)¶
-
inline const bool &sparse() const noexcept¶
-
inline bool &sparse() noexcept¶
-
inline auto _weight(const torch::Tensor &new__weight) -> decltype(*this)¶
The learnable weights of the module of shape (num_embeddings, embedding_dim)
-
inline auto _weight(torch::Tensor &&new__weight) -> decltype(*this)¶
-
inline const torch::Tensor &_weight() const noexcept¶
-
inline torch::Tensor &_weight() noexcept¶
-
EmbeddingOptions(int64_t num_embeddings, int64_t embedding_dim)¶