#pragma once
// @generated by aten/src/ATen/gen.py
#include <c10/core/Scalar.h>
#include <ATen/Tensor.h>
#include <c10/core/Storage.h>
#include <ATen/core/Generator.h>
#include <c10/util/Deprecated.h>
#include <ATen/NativeFunctions.h>
#include <ATen/DeviceGuard.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Reduction.h>
#include <c10/util/Optional.h>
#include <ATen/TensorUtils.h>
#include <ATen/core/ATenDispatch.h>
#include <ATen/Context.h>
#include <ATen/core/EnableNamedTensor.h>
namespace at {
using native::tensor;
static inline Tensor _cast_Byte(const Tensor & self, bool non_blocking=false);
static inline Tensor _cast_Char(const Tensor & self, bool non_blocking=false);
static inline Tensor _cast_Double(const Tensor & self, bool non_blocking=false);
static inline Tensor _cast_Float(const Tensor & self, bool non_blocking=false);
static inline Tensor _cast_Int(const Tensor & self, bool non_blocking=false);
static inline Tensor _cast_Long(const Tensor & self, bool non_blocking=false);
static inline Tensor _cast_Short(const Tensor & self, bool non_blocking=false);
static inline Tensor _cast_Half(const Tensor & self, bool non_blocking=false);
#ifdef BUILD_NAMEDTENSOR
static inline std::vector<Tensor> align_tensors(TensorList tensors);
#endif
static inline std::tuple<Tensor,Tensor> _cudnn_ctc_loss(const Tensor & log_probs, const Tensor & targets, IntArrayRef input_lengths, IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity);
static inline Tensor _cudnn_rnn_flatten_weight(TensorList weight_arr, int64_t weight_stride0, int64_t input_size, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, bool bidirectional);
static inline std::tuple<Tensor,Tensor,Tensor,Tensor,Tensor> _cudnn_rnn(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state);
static inline std::tuple<Tensor,Tensor,Tensor,std::vector<Tensor>> _cudnn_rnn_backward(const Tensor & input, TensorList weight, int64_t weight_stride0, const Tensor & weight_buf, const Tensor & hx, const Tensor & cx, const Tensor & output, const Tensor & grad_output, const Tensor & grad_hy, const Tensor & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, const Tensor & dropout_state, const Tensor & reserve, std::array<bool,4> output_mask);
static inline Tensor _cudnn_init_dropout_state(double dropout, bool train, int64_t dropout_seed, const TensorOptions & options);
static inline int64_t _debug_has_internal_overlap(const Tensor & self);
static inline std::tuple<Tensor,Tensor> _fused_dropout(const Tensor & self, double p, Generator * generator=nullptr);
static inline Tensor _masked_scale(const Tensor & self, const Tensor & mask, double scale);
static inline std::tuple<Tensor,Tensor> _sobol_engine_draw(const Tensor & quasi, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<ScalarType> dtype);
static inline Tensor & _sobol_engine_ff_(Tensor & self, int64_t n, const Tensor & sobolstate, int64_t dimension, int64_t num_generated);
static inline Tensor & _sobol_engine_scramble_(Tensor & self, const Tensor & ltm, int64_t dimension);
static inline Tensor & _sobol_engine_initialize_state_(Tensor & self, int64_t dimension);
static inline Tensor _reshape_from_tensor(const Tensor & self, const Tensor & shape);
static inline Tensor _shape_as_tensor(const Tensor & self);
static inline Tensor dropout(const Tensor & input, double p, bool train);
static inline Tensor & dropout_(Tensor & self, double p, bool train);
static inline Tensor feature_dropout(const Tensor & input, double p, bool train);
static inline Tensor & feature_dropout_(Tensor & self, double p, bool train);
static inline Tensor alpha_dropout(const Tensor & input, double p, bool train);
static inline Tensor & alpha_dropout_(Tensor & self, double p, bool train);
static inline Tensor feature_alpha_dropout(const Tensor & input, double p, bool train);
static inline Tensor & feature_alpha_dropout_(Tensor & self, double p, bool train);
static inline Tensor abs(const Tensor & self);
static inline Tensor & abs_(Tensor & self);
static inline Tensor & abs_out(Tensor & out, const Tensor & self);
static inline Tensor acos(const Tensor & self);
static inline Tensor & acos_(Tensor & self);
static inline Tensor & acos_out(Tensor & out, const Tensor & self);
static inline Tensor avg_pool1d(const Tensor & self, IntArrayRef kernel_size, IntArrayRef stride={}, IntArrayRef padding=0, bool ceil_mode=false, bool count_include_pad=true);
static inline Tensor adaptive_avg_pool1d(const Tensor & self, IntArrayRef output_size);
static inline std::tuple<Tensor,Tensor> adaptive_max_pool1d(const Tensor & self, IntArrayRef output_size);
static inline Tensor add(const Tensor & self, const Tensor & other, Scalar alpha=1);
static inline Tensor & add_out(Tensor & out, const Tensor & self, const Tensor & other, Scalar alpha=1);
static inline Tensor add(const Tensor & self, Scalar other, Scalar alpha=1);
static inline Tensor addmv(const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
static inline Tensor & addmv_(Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
static inline Tensor & addmv_out(Tensor & out, const Tensor & self, const Tensor & mat, const Tensor & vec, Scalar beta=1, Scalar alpha=1);
static inline Tensor addr(const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
static inline Tensor & addr_out(Tensor & out, const Tensor & self, const Tensor & vec1, const Tensor & vec2, Scalar beta=1, Scalar alpha=1);
static inline Tensor affine_grid_generator(const Tensor & theta, IntArrayRef size, bool align_corners);
static inline Tensor affine_grid_generator_backward(const Tensor & grad, IntArrayRef size, bool align_corners);
static inline Tensor all(const Tensor & self, int64_t dim, bool keepdim=false);
static inline Tensor & all_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false);
#ifdef BUILD_NAMEDTENSOR
static inline Tensor all(const Tensor & self, Dimname dim, bool keepdim=false);
#endif
#ifdef BUILD_NAMEDTENSOR
static inline Tensor & all_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false);
#endif
static inline bool allclose(const Tensor & self, const Tensor & other, double rtol=1e-05, double atol=1e-08, bool equal_nan=false);
static inline Tensor any(const Tensor & self, int64_t dim, bool keepdim=false);
static inline Tensor & any_out(Tensor & out, const Tensor & self, int64_t dim, bool keepdim=false);
#ifdef BUILD_NAMEDTENSOR
static inline Tensor any(const Tensor & self, Dimname dim, bool keepdim=false);
#endif
#ifdef BUILD_NAMEDTENSOR
static inline Tensor & any_out(Tensor & out, const Tensor & self, Dimname dim, bool keepdim=false);
#endif
static inline Tensor arange(Scalar end, const TensorOptions & options={});
static inline Tensor arange(Scalar start, Scalar end, const TensorOptions & options={});
static inline Tensor arange(Scalar start, Scalar end, Scalar step, const TensorOptions & options={});
static inline Tensor & arange_out(Tensor & out, Scalar end);
static inline Tensor & arange_out(Tensor & out, Scalar start, Scalar end, Scalar step=1);
static inline Tensor _dim_arange(const Tensor & like, int64_t dim);
static inline Tensor argmax(const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
static inline Tensor argmin(const Tensor & self, c10::optional<int64_t> dim=c10::nullopt, bool keepdim=false);
static inline Tensor as_strided(const Tensor & self, IntArrayRef size, IntArrayRef stride, c10::optional<int64_t> storage_offset=c10::nullopt);
static inline Te