Template Function at::symint::_efficient_attention_backward(const at::Tensor&, const at::Tensor&, const at::Tensor&, const at::Tensor&, const ::std::optional<at::Tensor>&, const at::Tensor&, const ::std::optional<at::Tensor>&, const ::std::optional<at::Tensor>&, int64_t, int64_t, const at::Tensor&, double, const at::Tensor&, const at::Tensor&, int64_t, bool, ::std::optional<double>, ::std::optional<int64_t>, ::std::optional<int64_t>, bool)¶
Defined in File Functions.h
Function Documentation¶
-
template<typename T, typename = std::enable_if_t<std::is_same_v<T, int64_t>>>
::std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor> at::symint::_efficient_attention_backward(const at::Tensor &grad_out_, const at::Tensor &query, const at::Tensor &key, const at::Tensor &value, const ::std::optional<at::Tensor> &bias, const at::Tensor &out, const ::std::optional<at::Tensor> &cu_seqlens_q, const ::std::optional<at::Tensor> &cu_seqlens_k, int64_t max_seqlen_q, int64_t max_seqlen_k, const at::Tensor &logsumexp, double dropout_p, const at::Tensor &philox_seed, const at::Tensor &philox_offset, int64_t custom_mask_type, bool bias_requires_grad, ::std::optional<double> scale = ::std::nullopt, ::std::optional<int64_t> num_splits_key = ::std::nullopt, ::std::optional<int64_t> window_size = ::std::nullopt, bool shared_storage_dqdkdv = false)¶