#pragma once
// @generated by torchgen/gen.py from RedispatchFunctions.h
#ifdef TORCH_ASSERT_ONLY_METHOD_OPERATORS
#error This change adds a dependency on all pytorch operators, meaning the \
file will need to be re-compiled every time an operator is changed or added. \
Consider using the at::_ops::{name}::redispatch() interface by including \
the specific operator from <ATen/ops/{my_operator}_ops.h>
#endif
#include <c10/core/Scalar.h>
#include <ATen/Tensor.h>
#include <c10/core/Storage.h>
#include <ATen/core/Generator.h>
#include <c10/util/Deprecated.h>
#include <ATen/DeviceGuard.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Reduction.h>
#include <c10/util/Optional.h>
#include <ATen/TensorUtils.h>
#include <ATen/Context.h>
#include <ATen/TracerMode.h>
#include <ATen/Operators.h>
namespace at {
namespace redispatch {
// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
TORCH_API inline at::Tensor _cast_Byte(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Byte::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
TORCH_API inline at::Tensor _cast_Char(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Char::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
TORCH_API inline at::Tensor _cast_Double(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Double::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
TORCH_API inline at::Tensor _cast_Float(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Float::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
TORCH_API inline at::Tensor _cast_Int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Int::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
TORCH_API inline at::Tensor _cast_Long(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Long::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
TORCH_API inline at::Tensor _cast_Short(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Short::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
TORCH_API inline at::Tensor _cast_Half(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Half::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
TORCH_API inline void __dispatch__backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false) {
return at::_ops::_backward::redispatch(dispatchKeySet, self, inputs, gradient, retain_graph, create_graph);
}
// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
TORCH_API inline void __dispatch_set_data(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & new_data) {
return at::_ops::set_data::redispatch(dispatchKeySet, self, new_data);
}
// aten::data(Tensor self) -> Tensor
TORCH_API inline at::Tensor __dispatch_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::data::redispatch(dispatchKeySet, self);
}
// aten::is_leaf(Tensor self) -> bool
TORCH_API inline bool __dispatch_is_leaf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::is_leaf::redispatch(dispatchKeySet, self);
}
// aten::output_nr(Tensor self) -> int
TORCH_API inline int64_t __dispatch_output_nr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::output_nr::redispatch(dispatchKeySet, self);
}
// aten::_version(Tensor self) -> int
TORCH_API inline int64_t __dispatch__version(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::_version::redispatch(dispatchKeySet, self);
}
// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
TORCH_API inline at::Tensor & __dispatch_requires_grad_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool requires_grad=true) {
return at::_ops::requires_grad_::redispatch(dispatchKeySet, self, requires_grad);
}
// aten::retain_grad(Tensor(a!) self) -> ()
TORCH_API inline void __dispatch_retain_grad(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
return at::_ops::retain_grad::redispatch(dispatchKeySet, self);
}
// aten::retains_grad(Tensor self) -> bool
TORCH_API inline bool __dispatch_retains_grad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::retains_grad::redispatch(dispatchKeySet, self);
}
// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
TORCH_API inline at::Tensor _fw_primal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) {
return at::_ops::_fw_primal::redispatch(dispatchKeySet, self, level);
}
// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
TORCH_API inline at::Tensor _make_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
return at::_ops::_make_dual::redispatch(dispatchKeySet, primal, tangent, level);
}
// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
TORCH_API inline ::std::tuple<at::Tensor,at::Tensor> _unpack_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dual, int64_t level) {
return at::_ops::_unpack_dual::redispatch(dispatchKeySet, dual, level);
}
// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
TORCH_API inline at::Tensor _new_zeros_with_same_feature_meta(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) {
return at::_ops::_new_zeros_with_same_feature_meta::redispatch(dispatchKeySet, self, other, self_num_batch_dims);
}
// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
TORCH_API inline bool _has_same_storage_numel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
return at::_ops::_has_same_storage_numel::redispatch(dispatchKeySet, self, other);
}
// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
TORCH_API inline at::Tensor & rename_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<at::DimnameList> names) {
return at::_ops::rename_::redispatch(dispatchKeySet, self, names);
}
// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
TORCH_API inline at::Tensor rename(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::DimnameList> names) {
return at::_