#pragma once
// @generated by torchgen/gen.py from RedispatchFunctions.h
#ifdef TORCH_ASSERT_ONLY_METHOD_OPERATORS
#error This change adds a dependency on all pytorch operators, meaning the \
file will need to be re-compiled every time an operator is changed or added. \
Consider using the at::_ops::{name}::redispatch() interface by including \
the specific operator from <ATen/ops/{my_operator}_ops.h>
#endif
#include <c10/core/Scalar.h>
#include <ATen/Tensor.h>
#include <c10/core/Storage.h>
#include <ATen/core/Generator.h>
#include <c10/util/Deprecated.h>
#include <ATen/DeviceGuard.h>
#include <c10/core/TensorOptions.h>
#include <ATen/core/Reduction.h>
#include <c10/util/Optional.h>
#include <ATen/TensorUtils.h>
#include <ATen/Context.h>
#include <ATen/TracerMode.h>
#include <ATen/Operators.h>
namespace at {
namespace redispatch {
// aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor
inline at::Tensor _cast_Byte(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Byte::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor
inline at::Tensor _cast_Char(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Char::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor
inline at::Tensor _cast_Double(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Double::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor
inline at::Tensor _cast_Float(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Float::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor
inline at::Tensor _cast_Int(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Int::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor
inline at::Tensor _cast_Long(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Long::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor
inline at::Tensor _cast_Short(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Short::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor
inline at::Tensor _cast_Half(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool non_blocking=false) {
return at::_ops::_cast_Half::redispatch(dispatchKeySet, self, non_blocking);
}
// aten::_backward(Tensor self, Tensor[] inputs, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> ()
inline void __dispatch__backward(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient={}, c10::optional<bool> retain_graph=c10::nullopt, bool create_graph=false) {
return at::_ops::_backward::redispatch(dispatchKeySet, self, inputs, gradient, retain_graph, create_graph);
}
// aten::set_data(Tensor(a!) self, Tensor new_data) -> ()
inline void __dispatch_set_data(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & new_data) {
return at::_ops::set_data::redispatch(dispatchKeySet, self, new_data);
}
// aten::data(Tensor self) -> Tensor
inline at::Tensor __dispatch_data(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::data::redispatch(dispatchKeySet, self);
}
// aten::is_leaf(Tensor self) -> bool
inline bool __dispatch_is_leaf(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::is_leaf::redispatch(dispatchKeySet, self);
}
// aten::output_nr(Tensor self) -> int
inline int64_t __dispatch_output_nr(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::output_nr::redispatch(dispatchKeySet, self);
}
// aten::_version(Tensor self) -> int
inline int64_t __dispatch__version(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::_version::redispatch(dispatchKeySet, self);
}
// aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)
inline at::Tensor & __dispatch_requires_grad_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool requires_grad=true) {
return at::_ops::requires_grad_::redispatch(dispatchKeySet, self, requires_grad);
}
// aten::retain_grad(Tensor(a!) self) -> ()
inline void __dispatch_retain_grad(c10::DispatchKeySet dispatchKeySet, at::Tensor & self) {
return at::_ops::retain_grad::redispatch(dispatchKeySet, self);
}
// aten::retains_grad(Tensor self) -> bool
inline bool __dispatch_retains_grad(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self) {
return at::_ops::retains_grad::redispatch(dispatchKeySet, self);
}
// aten::_fw_primal(Tensor(a) self, int level) -> Tensor(a)
inline at::Tensor _fw_primal(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t level) {
return at::_ops::_fw_primal::redispatch(dispatchKeySet, self, level);
}
// aten::_make_dual(Tensor(a) primal, Tensor tangent, int level) -> Tensor(a)
inline at::Tensor _make_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
return at::_ops::_make_dual::redispatch(dispatchKeySet, primal, tangent, level);
}
// aten::_unpack_dual(Tensor(a) dual, int level) -> (Tensor(a) primal, Tensor tangent)
inline ::std::tuple<at::Tensor,at::Tensor> _unpack_dual(c10::DispatchKeySet dispatchKeySet, const at::Tensor & dual, int64_t level) {
return at::_ops::_unpack_dual::redispatch(dispatchKeySet, dual, level);
}
// aten::_new_zeros_with_same_feature_meta(Tensor self, Tensor other, *, int self_num_batch_dims=0) -> Tensor
inline at::Tensor _new_zeros_with_same_feature_meta(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims=0) {
return at::_ops::_new_zeros_with_same_feature_meta::redispatch(dispatchKeySet, self, other, self_num_batch_dims);
}
// aten::_has_same_storage_numel(Tensor self, Tensor other) -> bool
inline bool _has_same_storage_numel(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other) {
return at::_ops::_has_same_storage_numel::redispatch(dispatchKeySet, self, other);
}
// aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
inline at::Tensor & rename_(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, c10::optional<at::DimnameList> names) {
return at::_ops::rename_::redispatch(dispatchKeySet, self, names);
}
// aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a)
inline at::Tensor rename(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional<at::DimnameList> names) {
return at::_ops::rename::redispatch(dispatchKeySet, self, names);
}
// aten::align_to(Tens
没有合适的资源?快使用搜索试试~ 我知道了~
whisper-tornado 依赖
共2000个文件
h:1579个
py:375个
hpp:35个
需积分: 0 0 下载量 13 浏览量
2024-05-21
00:04:57
上传
评论
收藏 238.18MB ZIP 举报
温馨提示
whisper-tornado 依赖,可用于离线环境下开发
资源推荐
资源详情
资源评论
收起资源包目录
whisper-tornado 依赖 (2000个子文件)
interface.cpp 13KB
implementation.cpp 3KB
RedispatchFunctions.h 2.08MB
VmapGeneratedPlumbing.h 1.77MB
Functions.h 494KB
TensorBody.h 288KB
c_shim_cuda.h 261KB
c_shim_cpu.h 244KB
xnnpack.h 224KB
Math.h 142KB
pybind11.h 129KB
TensorImpl.h 115KB
pytypes.h 99KB
mobile_bytecode_generated.h 99KB
ivalue_inl.h 87KB
vec256_zarch.h 86KB
numpy.h 84KB
crc_alt.h 75KB
jit_type.h 72KB
cast.h 71KB
order_preserving_flat_hash_map.h 66KB
flat_hash_map.h 63KB
vec512_bfloat16.h 62KB
vec256_int.h 61KB
NativeFunctions.h 59KB
UpSampleKernelAVXAntialias.h 58KB
Operators.h 57KB
NativeMetaFunctions.h 55KB
aten_interned_strings.h 55KB
ir.h 55KB
variable_factories.h 55KB
vec512_int.h 55KB
common.h 54KB
quantization_patterns.h 54KB
Functions.h 53KB
ivalue.h 51KB
type_caster_base.h 50KB
cpuinfo.h 50KB
SmallVector.h 49KB
vec256_qint.h 47KB
vec512_qint.h 47KB
psimd.h 46KB
vec512_complex_float.h 44KB
register_ops_utils.h 42KB
vec_base.h 41KB
python_arg_parser.h 41KB
DispatchKeySet.h 41KB
library.h 40KB
variable.h 40KB
pybind_utils.h 40KB
vec256_bfloat16.h 39KB
special.h 39KB
intrusive_ptr.h 39KB
TensorBase.h 38KB
tree_views.h 38KB
strong_type.h 38KB
TensorIterator.h 38KB
ViewFuncs.h 37KB
pooling.h 36KB
Dispatcher.h 33KB
DispatchKey.h 33KB
FunctionsManual.h 32KB
loss.h 32KB
matrix.h 32KB
loss.h 31KB
make_boxed_from_unboxed_functor.h 31KB
activation.h 31KB
activation.h 30KB
vec256_float_neon.h 30KB
pooling.h 30KB
function.h 30KB
llvmMathExtras.h 30KB
vec512_float.h 29KB
IndexKernels.h 29KB
internals.h 29KB
linalg.h 29KB
class.h 29KB
stl_bind.h 29KB
op_registration.h 29KB
DistributionTemplates.h 28KB
sugared_value.h 28KB
Exception.h 27KB
TensorOptions.h 27KB
module.h 27KB
sparse_bitset.h 27KB
loss.h 27KB
ScalarType.h 27KB
LinearAlgebraUtils.h 27KB
CompositeExplicitAutogradNonFunctionalFunctions_inl.h 26KB
stmt.h 25KB
functional_bfloat16.h 25KB
attr.h 24KB
TensorIndexing.h 24KB
function_schema.h 24KB
ir.h 24KB
module.h 24KB
typeid.h 23KB
vec512_complex_double.h 23KB
jit_type_base.h 23KB
TraceUtils.h 23KB
共 2000 条
- 1
- 2
- 3
- 4
- 5
- 6
- 20
资源评论
river_rock
- 粉丝: 15
- 资源: 16
上传资源 快速赚钱
- 我的内容管理 展开
- 我的资源 快来上传第一个资源
- 我的收益 登录查看自己的收益
- 我的积分 登录查看自己的积分
- 我的C币 登录后查看C币余额
- 我的收藏
- 我的下载
- 下载帮助
安全验证
文档复制为VIP权益,开通VIP直接复制
信息提交成功